[med-svn] [cnrun] 01/03: Squashed commit of the following:

andrei zavada hmmr-guest at moszumanska.debian.org
Sat Nov 8 14:37:16 UTC 2014


This is an automated email from the git hooks/post-receive script.

hmmr-guest pushed a commit to branch master
in repository cnrun.

commit f6e0cb8e0af9fb2be37645758d5b91cfdab2f132
Author: Andrei Zavada <johnhommer at gmail.com>
Date:   Fri Nov 7 22:39:03 2014 +0200

    Squashed commit of the following:
    
    commit aea30a79afbf343d0d79dfe9bcc5e43386a91d35
    Author: Andrei Zavada <johnhommer at gmail.com>
    Date:   Fri Nov 7 00:08:03 2014 +0200
    
        packaged 2.0.0, release in sight
    
    commit ad302856736c25a2a7c5a2216b04a0d9cd8f176b
    Author: Andrei Zavada <johnhommer at gmail.com>
    Date:   Mon Oct 27 01:46:14 2014 +0200
    
        WIP (packaged)
    
    commit 9cb1548053ab70876ee688321e1f98f79d450a68
    Author: Andrei Zavada <johnhommer at gmail.com>
    Date:   Sun Oct 19 20:28:13 2014 +0300
    
        WIP
    
    commit e0d710ea10c22021f28d0ed24ba3d596d2d55d5e
    Author: andrei zavada <andrei.zavada at massivesolutions.eu>
    Date:   Wed Oct 8 20:06:45 2014 +0300
    
        WIP
    
    commit 76aa09113be458cd2bbef81ef951b4804c01d4b9
    Author: Andrei Zavada <johnhommer at gmail.com>
    Date:   Mon Oct 6 01:53:37 2014 +0300
    
        WIP
    
    commit da7b8bfe02babdd2fb4f6a94bcfd324e17da84f7
    Author: andrei zavada <andrei.zavada at massivesolutions.eu>
    Date:   Fri Oct 3 19:37:19 2014 +0300
    
        WIP
    
    commit 6301d96acc3004aa0fc00f36823c3b3deef057be
    Author: Andrei Zavada <johnhommer at gmail.com>
    Date:   Wed Oct 1 01:54:01 2014 +0300
    
        WIP (executable builds)
    
    commit f67f771f9d6028430bb46231f9ed64c6601ef45f
    Author: andrei zavada <andrei.zavada at massivesolutions.eu>
    Date:   Tue Sep 30 20:24:49 2014 +0300
    
        WIP
    
    commit c48710744d336b7bc150388d3ae158eef973be17
    Author: andrei zavada <andrei.zavada at massivesolutions.eu>
    Date:   Mon Sep 29 23:02:42 2014 +0300
    
        WIP
    
    commit 31be7fcb232dcdd980b473e5552bf03e91b519cb
    Author: andrei zavada <johnhommer at gmail.com>
    Date:   Sat Sep 27 00:05:04 2014 +0400
    
        WIP
    
    commit e8aa0a3f9beffed7cf0bba168c8790e29a0252c6
    Author: andrei zavada <johnhommer at gmail.com>
    Date:   Sat Sep 20 10:30:55 2014 +0400
    
        WIP
    
    commit cacd07f4635db84e6542cd9e518b01c311ed6afd
    Author: andrei zavada <johnhommer at gmail.com>
    Date:   Fri Sep 19 09:07:18 2014 +0400
    
        WIP (libcn mostly good)
    
    commit 5699a22a5268dd570e9a490f71c8c8bdcdc8c0b0
    Author: andrei zavada <johnhommer at gmail.com>
    Date:   Tue Jul 8 15:44:47 2014 +0300
    
        WIP
---
 .gitignore                                         |   30 -
 debian/.gitignore                                  |    1 +
 debian/README.Debian                               |   11 +-
 debian/changelog                                   |    7 +
 debian/cnrun-tools.install                         |    2 +
 debian/cnrun-tools.manpages                        |    2 +
 debian/cnrun.manpages                              |    1 -
 debian/control                                     |   68 +-
 debian/copyright                                   |    4 +-
 debian/libcnrun2-dev.install                       |    4 +
 debian/libcnrun2.install                           |    2 +
 debian/libcnrun2.symbols                           |  492 +++++++
 debian/lua-cnrun.info                              |    1 +
 debian/lua-cnrun.install                           |    2 +
 debian/lua-cnrun.lintian-overrides                 |    3 +
 debian/rules                                       |   39 +-
 debian/unwanted-files                              |    8 +
 debian/{upstream => upstream/metadata}             |    0
 debian/watch                                       |    2 +-
 .gitignore => upstream/.gitignore                  |    4 -
 upstream/COPYING                                   |  676 +---------
 upstream/ChangeLog                                 |    6 +
 upstream/INSTALL                                   |  255 +---
 upstream/Makefile.am                               |   14 +-
 upstream/README                                    |    2 +-
 upstream/configure.ac                              |   50 +-
 upstream/doc/Makefile.am                           |   18 +-
 upstream/doc/README                                |   80 +-
 upstream/doc/examples/example1.lua                 |  204 +++
 upstream/doc/examples/{ratiocoding => }/m.nml      |    0
 upstream/doc/examples/ratiocoding/ORNa.x1000.in    |  112 --
 upstream/doc/examples/ratiocoding/ORNb.x1000.in    |  112 --
 upstream/doc/examples/ratiocoding/PN.0.sxf.target  |   10 -
 upstream/doc/examples/ratiocoding/batch            |   34 -
 .../ratiocoding/rational-plot-sdf-interactive      |   31 -
 .../examples/ratiocoding/rational-plot-sdf-static  |   29 -
 .../doc/examples/ratiocoding/rational-plot-var     |   34 -
 upstream/doc/examples/ratiocoding/script           |   58 -
 upstream/doc/lua-api/.gitignore                    |    6 +
 upstream/doc/lua-api/Makefile.am                   |    4 +
 upstream/doc/lua-api/cnrun-lua-api.texi            |  533 ++++++++
 upstream/libcnrun.pc.in                            |   12 +
 upstream/{make_vesrion => make_version}            |    0
 upstream/man/cnrun.1.in                            |  311 -----
 upstream/man/varfold.1.in                          |  129 --
 upstream/src/Common.mk                             |    8 +-
 upstream/src/Makefile.am                           |   15 +-
 upstream/src/cnrun/Makefile.am                     |   20 -
 upstream/src/cnrun/completions.cc                  |  452 -------
 upstream/src/cnrun/interpreter.cc                  |  923 -------------
 upstream/src/cnrun/main.cc                         |  258 ----
 upstream/src/cnrun/runner.hh                       |  162 ---
 upstream/src/libcn/Makefile.am                     |   45 -
 upstream/src/libcn/base-neuron.hh                  |  297 ----
 upstream/src/libcn/base-synapse.hh                 |  101 --
 upstream/src/libcn/base-unit.cc                    |  681 ----------
 upstream/src/libcn/base-unit.hh                    |  293 ----
 upstream/src/libcn/hosted-attr.hh                  |   52 -
 upstream/src/libcn/hosted-neurons.cc               |  771 -----------
 upstream/src/libcn/hosted-neurons.hh               |  349 -----
 upstream/src/libcn/hosted-synapses.cc              |  358 -----
 upstream/src/libcn/hosted-synapses.hh              |  306 -----
 upstream/src/libcn/integrate-base.hh               |   58 -
 upstream/src/libcn/integrate-rk65.hh               |   51 -
 upstream/src/libcn/model-cycle.cc                  |  595 --------
 upstream/src/libcn/model-nmlio.cc                  |  487 -------
 upstream/src/libcn/model-struct.cc                 | 1416 --------------------
 upstream/src/libcn/model.hh                        |  711 ----------
 upstream/src/libcn/mx-attr.hh                      |   49 -
 upstream/src/libcn/param-unit-literals.hh          |   32 -
 upstream/src/libcn/sources.cc                      |  155 ---
 upstream/src/libcn/sources.hh                      |  171 ---
 upstream/src/libcn/standalone-attr.hh              |   50 -
 upstream/src/libcn/standalone-neurons.cc           |  440 ------
 upstream/src/libcn/standalone-neurons.hh           |  235 ----
 upstream/src/libcn/standalone-synapses.cc          |  104 --
 upstream/src/libcn/standalone-synapses.hh          |  121 --
 upstream/src/libcn/types.cc                        |  521 -------
 upstream/src/libcn/types.hh                        |  280 ----
 upstream/src/libcnrun/Makefile.am                  |   46 +
 upstream/src/libcnrun/base-neuron.hh               |  298 ++++
 upstream/src/libcnrun/base-synapse.hh              |   97 ++
 upstream/src/libcnrun/base-unit.cc                 |  666 +++++++++
 upstream/src/libcnrun/base-unit.hh                 |  293 ++++
 upstream/src/libcnrun/forward-decls.hh             |   42 +
 upstream/src/libcnrun/hosted-attr.hh               |   56 +
 upstream/src/libcnrun/hosted-neurons.cc            |  766 +++++++++++
 upstream/src/libcnrun/hosted-neurons.hh            |  358 +++++
 upstream/src/libcnrun/hosted-synapses.cc           |  351 +++++
 upstream/src/libcnrun/hosted-synapses.hh           |  318 +++++
 upstream/src/libcnrun/integrate-base.hh            |   64 +
 upstream/src/libcnrun/integrate-rk65.hh            |   59 +
 upstream/src/libcnrun/model-cycle.cc               |  561 ++++++++
 upstream/src/libcnrun/model-nmlio.cc               |  495 +++++++
 upstream/src/libcnrun/model-struct.cc              | 1042 ++++++++++++++
 upstream/src/libcnrun/model-tags.cc                |  422 ++++++
 upstream/src/libcnrun/model.hh                     |  715 ++++++++++
 upstream/src/libcnrun/mx-attr.hh                   |   59 +
 upstream/src/libcnrun/sources.cc                   |  231 ++++
 upstream/src/libcnrun/sources.hh                   |  199 +++
 upstream/src/libcnrun/standalone-attr.hh           |   56 +
 upstream/src/libcnrun/standalone-neurons.cc        |  438 ++++++
 upstream/src/libcnrun/standalone-neurons.hh        |  255 ++++
 upstream/src/libcnrun/standalone-synapses.cc       |   99 ++
 upstream/src/libcnrun/standalone-synapses.hh       |  126 ++
 upstream/src/libcnrun/types.cc                     |  524 ++++++++
 upstream/src/libcnrun/types.hh                     |  298 ++++
 upstream/src/libstilton/Makefile.am                |   33 +-
 upstream/src/libstilton/alg.hh                     |    6 +-
 upstream/src/libstilton/containers.hh              |   19 +-
 upstream/src/libstilton/exprparser.cc              |  300 -----
 upstream/src/libstilton/exprparser.hh              |   99 --
 upstream/src/libstilton/lang.hh                    |   11 +-
 .../src/libstilton/{libcommon.cc => libstilton.cc} |  124 +-
 upstream/src/libstilton/misc.hh                    |   43 +
 upstream/src/libstilton/string.hh                  |   18 +-
 upstream/src/{cnrun => lua-cnrun}/.gitignore       |    0
 upstream/src/lua-cnrun/Makefile.am                 |   25 +
 upstream/src/lua-cnrun/cnhost.hh                   |  154 +++
 upstream/src/lua-cnrun/commands.cc                 | 1039 ++++++++++++++
 upstream/src/print_version.cc                      |   26 -
 upstream/src/tools/.gitignore                      |    1 -
 upstream/src/tools/Makefile.am                     |   12 +-
 upstream/src/tools/hh-latency-estimator.cc         |  548 ++++----
 upstream/src/tools/spike2sdf.cc                    |  146 +-
 upstream/src/tools/varfold.cc                      |  718 ----------
 126 files changed, 12129 insertions(+), 14097 deletions(-)

diff --git a/.gitignore b/.gitignore
index c7569fb..5176126 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,33 +1,3 @@
 .DirIcon
 .backups
-Doxygen
-.libs
-.deps
-autom4te*
-aclocal*
-config.h
-config.h.in
-config.guess
-config.log
-config.sub
-config.status
-configure
-depcomp
-libtool
-install-sh
-ltmain.sh
-m4
-missing
-stamp-h1
-Makefile
-Makefile.in
-*.o
-*.lo
-*.la
-*.a
-*.pc
-*.gch
-cscope.*
-TAGS
-
 *~
diff --git a/debian/.gitignore b/debian/.gitignore
new file mode 100644
index 0000000..397b4a7
--- /dev/null
+++ b/debian/.gitignore
@@ -0,0 +1 @@
+*.log
diff --git a/debian/README.Debian b/debian/README.Debian
index 86f41ea..db46043 100644
--- a/debian/README.Debian
+++ b/debian/README.Debian
@@ -1,6 +1,4 @@
-For Debian, cnrun is configured --with-tools=no.  These tools are:
-
- - varfold, a matrix convolution utility;
+For Debian, cnrun is configured --with-tools=yes.  These tools are:
 
  - hh-latency-estimator, for measurement of first-spike latency
    in response to stimulation, and
@@ -8,8 +6,5 @@ For Debian, cnrun is configured --with-tools=no.  These tools are:
  - spike2sdf, for estimation of a spike density function from a record of
    spike times.
 
-These utilities were used in the original experiment
-(http://johnhommer.com/academic/code/cnrun/ratiocoding), and hence not
-deemed of general purpose enough to be included in the Debian package.
-If you believe you have a particular need for any of them, feel free
-to build cnrun from source.
+Note that this package is not a next-version cnrun; both can be
+installed at the same time.
diff --git a/debian/changelog b/debian/changelog
index d33fa45..e70229a 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,10 @@
+cnrun (2.0.0-1) unstable; urgency=low
+
+  * New upstream version.
+  * Separate packaging of shared lib, dev files, tools and lua module.
+
+ -- Andrei Zavada <johnhommer at gmail.com>  Sat, 01 Nov 2014 02:28:00 +0200
+
 cnrun (1.1.14-1) unstable; urgency=low
 
   * New upstream version (Closes: #722760).
diff --git a/debian/cnrun-tools.install b/debian/cnrun-tools.install
new file mode 100644
index 0000000..efe9cf7
--- /dev/null
+++ b/debian/cnrun-tools.install
@@ -0,0 +1,2 @@
+usr/bin/hh-latency-estimator
+usr/bin/spike2sdf
diff --git a/debian/cnrun-tools.manpages b/debian/cnrun-tools.manpages
new file mode 100644
index 0000000..283dc89
--- /dev/null
+++ b/debian/cnrun-tools.manpages
@@ -0,0 +1,2 @@
+man/hh-latency-estimator.1
+man/spike2sdf.1
diff --git a/debian/cnrun.manpages b/debian/cnrun.manpages
deleted file mode 100644
index c3330e1..0000000
--- a/debian/cnrun.manpages
+++ /dev/null
@@ -1 +0,0 @@
-man/cnrun.1
diff --git a/debian/control b/debian/control
index f9e7d19..e0907bb 100644
--- a/debian/control
+++ b/debian/control
@@ -2,22 +2,64 @@ Source: cnrun
 Section: science
 Priority: optional
 Maintainer: Andrei Zavada <johnhommer at gmail.com>
-Build-Depends: debhelper (>= 9), dh-autoreconf, autoconf-archive, g++, libgomp1, libreadline6-dev, pkg-config, libgsl0-dev, libxml2-dev
-Standards-Version: 3.9.5
+Build-Depends: debhelper (>= 9), dh-autoreconf, autoconf-archive, texinfo,
+ libgomp1, pkg-config, libgsl0-dev, libxml2-dev,
+ liblua5.1-dev | liblua5.2-dev, lua5.1 | lua5.2
+Standards-Version: 3.9.6
 Homepage: http://johnhommer.com/academic/code/cnrun
-Vcs-Git: git://git.debian.org/git/debian-med/cnrun.git
+Vcs-Git: git://anonscm.debian.org/cnrun/cnrun.git
 Vcs-Browser: http://anonscm.debian.org/gitweb/?p=debian-med/cnrun.git;a=summary
 
-Package: cnrun
+Package: libcnrun2
 Architecture: any
+Pre-Depends: ${misc:Pre-Depends}
 Depends: ${shlibs:Depends}, ${misc:Depends}
+Description: NeuroML-capable neuronal network simulator (shared lib)
+ CNrun is a neuronal network simulator implemented as a Lua package.
+ This package contains shared libraries.
+ .
+ See lua-cnrun description for extended description.
+
+Package: libcnrun2-dev
+Section: libdevel
+Architecture: any
+Depends: libcnrun2 (= ${binary:Version}), ${misc:Depends}
+Suggests: pkg-config
+Description: NeuroML-capable neuronal network simulator (development files)
+ CNrun is a neuronal network simulator implemented as a Lua package.
+ This package contains development files.
+ .
+ See lua-licnrun description for extended description.
+
+Package: cnrun-tools
+Architecture: any
+Depends: ${shlibs:Depends}, ${misc:Depends}
+Description: NeuroML-capable neuronal network simulator (tools)
+ CNrun is a neuronal network simulator implemented as a Lua package.
+ This package contains two standalone tools (hh-latency-estimator and
+ spike2sdf) that may be of interest to CNrun users.
+ .
+ See lua-cnrun description for extended description.
+
+Package: lua-cnrun
+Architecture: any
+Depends: libcnrun2, lua5.1 | lua5.2, ${misc:Depends}
 Suggests: gnuplot
-Description: NeuroML-capable neuronal network simulator
- CNrun is a neuronal network model simulator, similar in purpose to
- NEURON except that individual neurons are not compartmentalised.  It
- can read NeuroML files (e.g., as generated by neuroConstruct);
- provides a Hodgkin-Huxley neuron (plus some varieties), a Rall and
- Alpha-Beta synapses, Poisson, Van der Pol, Colpitts oscillators and
- regular pulse generator; external inputs and logging state variables.
- Uses a 6-5 Runge-Kutta integration method.  Basic scripting and (if
- run interactively) context-aware autocompletion.
+Description: NeuroML-capable neuronal network simulator (Lua package)
+ CNrun is a neuronal network simulator, with these features:
+   * a conductance- and rate-based Hodgkin-Huxley neurons, a Rall and
+     Alpha-Beta synapses;
+   * a 6-5 Runge-Kutta integration method: slow but precise, adjustable;
+   * Poisson, Van der Pol, Colpitts oscillators and interface for
+     external stimulation sources;
+   * NeuroML network topology import/export;
+   * logging state variables, spikes;
+   * implemented as a Lua module, for scripting model behaviour (e.g.,
+     to enable plastic processes regulated by model state);
+   * interaction (topology push/pull, async connections) with other
+     cnrun models running elsewhere on a network, with interactions
+     (planned).
+ .
+ Note that there is no `cnrun' executable, which existed in cnrun-1.*.
+ Instead, you write a script for your simulation in Lua, and execute
+ it as detailed in /usr/share/cnrun/examples/example1.lua.
diff --git a/debian/copyright b/debian/copyright
index a67a602..7eaf1af 100644
--- a/debian/copyright
+++ b/debian/copyright
@@ -4,10 +4,10 @@ Upstream-Contact: Andrei Zavada <johnhommer at gmail.com>
 Source: http://johnhommer.com/academic/code/cnrun/source/
 
 Files: *
-Copyright: 2008-2012 Andrei Zavada <johnhommer at gmail.com>
+Copyright: 2008-2014 Andrei Zavada <johnhommer at gmail.com>
 License: GPL-2+
 
-Files: src/libcn/*.cc
+Files: src/libcnrun/*.cc
 Copyright: 2008 Thomas Nowotny <t.nowotny at sussex.ac.uk>
 	   2008-2012 Andrei Zavada <johnhommer at gmail.com>
 License: GPL-2+
diff --git a/debian/libcnrun2-dev.install b/debian/libcnrun2-dev.install
new file mode 100644
index 0000000..23e2166
--- /dev/null
+++ b/debian/libcnrun2-dev.install
@@ -0,0 +1,4 @@
+usr/include/libcnrun/*
+usr/include/libstilton/*
+usr/lib/*/libcnrun.so
+usr/lib/*/pkgconfig/*
diff --git a/debian/libcnrun2.install b/debian/libcnrun2.install
new file mode 100644
index 0000000..480d5e7
--- /dev/null
+++ b/debian/libcnrun2.install
@@ -0,0 +1,2 @@
+usr/lib/*/libcnrun.so.2.0.0
+usr/lib/*/libcnrun.so.2
diff --git a/debian/libcnrun2.symbols b/debian/libcnrun2.symbols
new file mode 100644
index 0000000..cc04aeb
--- /dev/null
+++ b/debian/libcnrun2.symbols
@@ -0,0 +1,492 @@
+libcnrun.so.2 libcnrun2 #MINVER#
+(c++)"cnrun::CNeuronMap::preadvance()@Base" 2.0.0
+(c++)"cnrun::CNeuronMap::CNeuronMap(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, double, double, double, cnrun::CModel*, int)@Base" 2.0.0
+(c++)"cnrun::CNeuronMap::CNeuronMap(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, double, double, double, cnrun::CModel*, int)@Base" 2.0.0
+(c++)"cnrun::CNeuronMap::~CNeuronMap()@Base" 2.0.0
+(c++)"cnrun::CNeuronMap::~CNeuronMap()@Base" 2.0.0
+(c++)"cnrun::CNeuronMap::~CNeuronMap()@Base" 2.0.0
+(c++)"cnrun::C_BaseUnit::reset_state()@Base" 2.0.0
+(c++)"cnrun::C_BaseUnit::detach_source(cnrun::C_BaseSource*, cnrun::C_BaseUnit::TSinkType, unsigned long)@Base" 2.0.0
+(c++)"cnrun::C_BaseUnit::stop_listening()@Base" 2.0.0
+(c++)"cnrun::C_BaseUnit::start_listening(int)@Base" 2.0.0
+(c++)"cnrun::C_BaseUnit::param_changed_hook()@Base" 2.0.0
+(c++)"cnrun::C_BaseUnit::apprise_from_sources()@Base" 2.0.0
+(c++)"cnrun::C_BaseUnit::tell()@Base" 2.0.0
+(c++)"cnrun::C_BaseUnit::C_BaseUnit(cnrun::TUnitType, std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, cnrun::CModel*, int)@Base" 2.0.0
+(c++)"cnrun::C_BaseUnit::C_BaseUnit(cnrun::TUnitType, std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, cnrun::CModel*, int)@Base" 2.0.0
+(c++)"cnrun::C_BaseUnit::~C_BaseUnit()@Base" 2.0.0
+(c++)"cnrun::C_BaseUnit::~C_BaseUnit()@Base" 2.0.0
+(c++)"cnrun::C_BaseUnit::~C_BaseUnit()@Base" 2.0.0
+(c++)"cnrun::CNeuronEC_d::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"cnrun::CNeuronEC_d::~CNeuronEC_d()@Base" 2.0.0
+(c++)"cnrun::CNeuronEC_d::~CNeuronEC_d()@Base" 2.0.0
+(c++)"cnrun::CNeuronEC_d::~CNeuronEC_d()@Base" 2.0.0
+(c++)"cnrun::CNeuronHH_d::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"cnrun::CNeuronHH_d::~CNeuronHH_d()@Base" 2.0.0
+(c++)"cnrun::CNeuronHH_d::~CNeuronHH_d()@Base" 2.0.0
+(c++)"cnrun::CNeuronHH_d::~CNeuronHH_d()@Base" 2.0.0
+(c++)"cnrun::CNeuronHH_r::preadvance()@Base" 2.0.0
+(c++)"cnrun::CNeuronHH_r::~CNeuronHH_r()@Base" 2.0.0
+(c++)"cnrun::CNeuronHH_r::~CNeuronHH_r()@Base" 2.0.0
+(c++)"cnrun::CNeuronHH_r::~CNeuronHH_r()@Base" 2.0.0
+(c++)"cnrun::CSourceTape::CSourceTape(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, cnrun::TSourceLoopingOption)@Base" 2.0.0
+(c++)"cnrun::CSourceTape::CSourceTape(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, cnrun::TSourceLoopingOption)@Base" 2.0.0
+(c++)"cnrun::CSourceTape::~CSourceTape()@Base" 2.0.0
+(c++)"cnrun::CSourceTape::~CSourceTape()@Base" 2.0.0
+(c++)"cnrun::CSourceTape::~CSourceTape()@Base" 2.0.0
+(c++)"cnrun::CSourceTape::operator()(double)@Base" 2.0.0
+(c++)"cnrun::CSynapseMap::preadvance()@Base" 2.0.0
+(c++)"cnrun::CSynapseMap::CSynapseMap(cnrun::C_BaseNeuron*, cnrun::C_BaseNeuron*, double, cnrun::CModel*, int, cnrun::TUnitType)@Base" 2.0.0
+(c++)"cnrun::CSynapseMap::CSynapseMap(cnrun::C_BaseNeuron*, cnrun::C_BaseNeuron*, double, cnrun::CModel*, int, cnrun::TUnitType)@Base" 2.0.0
+(c++)"cnrun::CSynapseMap::~CSynapseMap()@Base" 2.0.0
+(c++)"cnrun::CSynapseMap::~CSynapseMap()@Base" 2.0.0
+(c++)"cnrun::CSynapseMap::~CSynapseMap()@Base" 2.0.0
+(c++)"cnrun::CNeuronECA_d::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"cnrun::CNeuronECA_d::~CNeuronECA_d()@Base" 2.0.0
+(c++)"cnrun::CNeuronECA_d::~CNeuronECA_d()@Base" 2.0.0
+(c++)"cnrun::CNeuronECA_d::~CNeuronECA_d()@Base" 2.0.0
+(c++)"cnrun::CNeuronHH2_d::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"cnrun::CNeuronHH2_d::~CNeuronHH2_d()@Base" 2.0.0
+(c++)"cnrun::CNeuronHH2_d::~CNeuronHH2_d()@Base" 2.0.0
+(c++)"cnrun::CNeuronHH2_d::~CNeuronHH2_d()@Base" 2.0.0
+(c++)"cnrun::CSourceNoise::distribution_s(cnrun::CSourceNoise::TDistribution)@Base" 2.0.0
+(c++)"cnrun::CSourceNoise::distribution_by_name(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&)@Base" 2.0.0
+(c++)"cnrun::CSourceNoise::CSourceNoise(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, double, double, double, cnrun::CSourceNoise::TDistribution, int)@Base" 2.0.0
+(c++)"cnrun::CSourceNoise::CSourceNoise(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, double, double, double, cnrun::CSourceNoise::TDistribution, int)@Base" 2.0.0
+(c++)"cnrun::CSourceNoise::~CSourceNoise()@Base" 2.0.0
+(c++)"cnrun::CSourceNoise::~CSourceNoise()@Base" 2.0.0
+(c++)"cnrun::CSourceNoise::~CSourceNoise()@Base" 2.0.0
+(c++)"cnrun::CSourceNoise::operator()(double)@Base" 2.0.0
+(c++)"cnrun::C_BaseNeuron::reset_state()@Base" 2.0.0
+(c++)"cnrun::C_BaseNeuron::possibly_fire()@Base" 2.0.0
+(c++)"cnrun::C_BaseNeuron::do_detect_spike_or_whatever()@Base" 2.0.0
+(c++)"cnrun::C_BaseNeuron::~C_BaseNeuron()@Base" 2.0.0
+(c++)"cnrun::C_BaseNeuron::~C_BaseNeuron()@Base" 2.0.0
+(c++)"cnrun::C_BaseNeuron::~C_BaseNeuron()@Base" 2.0.0
+(c++)"cnrun::C_BaseSource::is_periodic()@Base" 2.0.0
+(c++)"cnrun::C_BaseSource::type_s(cnrun::TSourceType)@Base" 2.0.0
+(c++)"cnrun::C_BaseSource::operator()(double)@Base" 2.0.0
+(c++)"cnrun::CSynapseAB_dd::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"cnrun::CSynapseAB_dd::~CSynapseAB_dd()@Base" 2.0.0
+(c++)"cnrun::CSynapseAB_dd::~CSynapseAB_dd()@Base" 2.0.0
+(c++)"cnrun::CSynapseAB_dd::~CSynapseAB_dd()@Base" 2.0.0
+(c++)"cnrun::CSynapseAB_rr::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"cnrun::CSynapseAB_rr::param_changed_hook()@Base" 2.0.0
+(c++)"cnrun::CSynapseAB_rr::~CSynapseAB_rr()@Base" 2.0.0
+(c++)"cnrun::CSynapseAB_rr::~CSynapseAB_rr()@Base" 2.0.0
+(c++)"cnrun::CSynapseAB_rr::~CSynapseAB_rr()@Base" 2.0.0
+(c++)"cnrun::CSynapseMxMap::preadvance()@Base" 2.0.0
+(c++)"cnrun::CSynapseMxMap::update_queue()@Base" 2.0.0
+(c++)"cnrun::CSynapseMxMap::~CSynapseMxMap()@Base" 2.0.0
+(c++)"cnrun::CSynapseMxMap::~CSynapseMxMap()@Base" 2.0.0
+(c++)"cnrun::CSynapseMxMap::~CSynapseMxMap()@Base" 2.0.0
+(c++)"cnrun::C_BaseSynapse::reset_state()@Base" 2.0.0
+(c++)"cnrun::C_BaseSynapse::update_queue()@Base" 2.0.0
+(c++)"cnrun::C_BaseSynapse::clone_to_target(cnrun::C_BaseNeuron*, double)@Base" 2.0.0
+(c++)"cnrun::C_BaseSynapse::make_clone_independent(cnrun::C_BaseNeuron*)@Base" 2.0.0
+(c++)"cnrun::C_BaseSynapse::C_BaseSynapse(cnrun::TUnitType, cnrun::C_BaseNeuron*, cnrun::C_BaseNeuron*, double, cnrun::CModel*, int)@Base" 2.0.0
+(c++)"cnrun::C_BaseSynapse::C_BaseSynapse(cnrun::TUnitType, cnrun::C_BaseNeuron*, cnrun::C_BaseNeuron*, double, cnrun::CModel*, int)@Base" 2.0.0
+(c++)"cnrun::C_BaseSynapse::~C_BaseSynapse()@Base" 2.0.0
+(c++)"cnrun::C_BaseSynapse::~C_BaseSynapse()@Base" 2.0.0
+(c++)"cnrun::C_BaseSynapse::~C_BaseSynapse()@Base" 2.0.0
+(c++)"cnrun::CIntegrateRK65::cycle()@Base" 2.0.0
+(c++)"cnrun::CIntegrateRK65::fixate()@Base" 2.0.0
+(c++)"cnrun::CIntegrateRK65::prepare()@Base" 2.0.0
+(c++)"cnrun::CIntegrateRK65::~CIntegrateRK65()@Base" 2.0.0
+(c++)"cnrun::CIntegrateRK65::~CIntegrateRK65()@Base" 2.0.0
+(c++)"cnrun::CIntegrateRK65::~CIntegrateRK65()@Base" 2.0.0
+(c++)"cnrun::C_HostedNeuron::reset_vars()@Base" 2.0.0
+(c++)"cnrun::C_HostedNeuron::var_value(unsigned long)@Base" 2.0.0
+(c++)"cnrun::C_HostedNeuron::C_HostedNeuron(cnrun::TUnitType, std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, double, double, double, cnrun::CModel*, int, cnrun::TIncludeOption)@Base" 2.0.0
+(c++)"cnrun::C_HostedNeuron::C_HostedNeuron(cnrun::TUnitType, std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, double, double, double, cnrun::CModel*, int, cnrun::TIncludeOption)@Base" 2.0.0
+(c++)"cnrun::CNeuronDotPulse::possibly_fire()@Base" 2.0.0
+(c++)"cnrun::CNeuronDotPulse::param_changed_hook()@Base" 2.0.0
+(c++)"cnrun::CNeuronDotPulse::~CNeuronDotPulse()@Base" 2.0.0
+(c++)"cnrun::CNeuronDotPulse::~CNeuronDotPulse()@Base" 2.0.0
+(c++)"cnrun::CNeuronDotPulse::~CNeuronDotPulse()@Base" 2.0.0
+(c++)"cnrun::CSourceFunction::~CSourceFunction()@Base" 2.0.0
+(c++)"cnrun::CSourceFunction::~CSourceFunction()@Base" 2.0.0
+(c++)"cnrun::CSourceFunction::~CSourceFunction()@Base" 2.0.0
+(c++)"cnrun::CSourceFunction::operator()(double)@Base" 2.0.0
+(c++)"cnrun::CSourcePeriodic::is_periodic()@Base" 2.0.0
+(c++)"cnrun::CSourcePeriodic::CSourcePeriodic(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, cnrun::TSourceLoopingOption, double)@Base" 2.0.0
+(c++)"cnrun::CSourcePeriodic::CSourcePeriodic(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, cnrun::TSourceLoopingOption, double)@Base" 2.0.0
+(c++)"cnrun::CSourcePeriodic::~CSourcePeriodic()@Base" 2.0.0
+(c++)"cnrun::CSourcePeriodic::~CSourcePeriodic()@Base" 2.0.0
+(c++)"cnrun::CSourcePeriodic::~CSourcePeriodic()@Base" 2.0.0
+(c++)"cnrun::CSourcePeriodic::operator()(double)@Base" 2.0.0
+(c++)"cnrun::CSynapseMxAB_dd::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"cnrun::CSynapseMxAB_dd::reset_state()@Base" 2.0.0
+(c++)"cnrun::CSynapseMxAB_dd::update_queue()@Base" 2.0.0
+(c++)"cnrun::CSynapseMxAB_dd::~CSynapseMxAB_dd()@Base" 2.0.0
+(c++)"cnrun::CSynapseMxAB_dd::~CSynapseMxAB_dd()@Base" 2.0.0
+(c++)"cnrun::CSynapseMxAB_dd::~CSynapseMxAB_dd()@Base" 2.0.0
+(c++)"cnrun::CSynapseMxAB_dr::~CSynapseMxAB_dr()@Base" 2.0.0
+(c++)"cnrun::CSynapseMxAB_dr::~CSynapseMxAB_dr()@Base" 2.0.0
+(c++)"cnrun::CSynapseMxAB_dr::~CSynapseMxAB_dr()@Base" 2.0.0
+(c++)"cnrun::CSynapseRall_dd::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"cnrun::CSynapseRall_dd::~CSynapseRall_dd()@Base" 2.0.0
+(c++)"cnrun::CSynapseRall_dd::~CSynapseRall_dd()@Base" 2.0.0
+(c++)"cnrun::CSynapseRall_dd::~CSynapseRall_dd()@Base" 2.0.0
+(c++)"cnrun::C_HostedSynapse::reset_vars()@Base" 2.0.0
+(c++)"cnrun::C_HostedSynapse::var_value(unsigned long)@Base" 2.0.0
+(c++)"cnrun::C_HostedSynapse::C_HostedSynapse(cnrun::TUnitType, cnrun::C_BaseNeuron*, cnrun::C_BaseNeuron*, double, cnrun::CModel*, int, cnrun::TIncludeOption)@Base" 2.0.0
+(c++)"cnrun::C_HostedSynapse::C_HostedSynapse(cnrun::TUnitType, cnrun::C_BaseNeuron*, cnrun::C_BaseNeuron*, double, cnrun::CModel*, int, cnrun::TIncludeOption)@Base" 2.0.0
+(c++)"cnrun::COscillatorVdPol::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"cnrun::COscillatorVdPol::~COscillatorVdPol()@Base" 2.0.0
+(c++)"cnrun::COscillatorVdPol::~COscillatorVdPol()@Base" 2.0.0
+(c++)"cnrun::COscillatorVdPol::~COscillatorVdPol()@Base" 2.0.0
+(c++)"cnrun::COscillatorPoisson::possibly_fire()@Base" 2.0.0
+(c++)"cnrun::COscillatorPoisson::do_detect_spike_or_whatever()@Base" 2.0.0
+(c++)"cnrun::COscillatorPoisson::~COscillatorPoisson()@Base" 2.0.0
+(c++)"cnrun::COscillatorPoisson::~COscillatorPoisson()@Base" 2.0.0
+(c++)"cnrun::COscillatorPoisson::~COscillatorPoisson()@Base" 2.0.0
+(c++)"cnrun::CSynapseABMinus_dd::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"cnrun::CSynapseABMinus_dd::~CSynapseABMinus_dd()@Base" 2.0.0
+(c++)"cnrun::CSynapseABMinus_dd::~CSynapseABMinus_dd()@Base" 2.0.0
+(c++)"cnrun::CSynapseABMinus_dd::~CSynapseABMinus_dd()@Base" 2.0.0
+(c++)"cnrun::C_StandaloneNeuron::reset_vars()@Base" 2.0.0
+(c++)"cnrun::C_StandaloneNeuron::var_value(unsigned long)@Base" 2.0.0
+(c++)"cnrun::C_StandaloneNeuron::C_StandaloneNeuron(cnrun::TUnitType, std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, double, double, double, cnrun::CModel*, int)@Base" 2.0.0
+(c++)"cnrun::C_StandaloneNeuron::C_StandaloneNeuron(cnrun::TUnitType, std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, double, double, double, cnrun::CModel*, int)@Base" 2.0.0
+(c++)"cnrun::C_StandaloneNeuron::~C_StandaloneNeuron()@Base" 2.0.0
+(c++)"cnrun::C_StandaloneNeuron::~C_StandaloneNeuron()@Base" 2.0.0
+(c++)"cnrun::C_StandaloneNeuron::~C_StandaloneNeuron()@Base" 2.0.0
+(c++)"cnrun::COscillatorColpitts::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"cnrun::COscillatorColpitts::~COscillatorColpitts()@Base" 2.0.0
+(c++)"cnrun::COscillatorColpitts::~COscillatorColpitts()@Base" 2.0.0
+(c++)"cnrun::COscillatorColpitts::~COscillatorColpitts()@Base" 2.0.0
+(c++)"cnrun::C_StandaloneSynapse::reset_vars()@Base" 2.0.0
+(c++)"cnrun::C_StandaloneSynapse::var_value(unsigned long)@Base" 2.0.0
+(c++)"cnrun::C_StandaloneSynapse::C_StandaloneSynapse(cnrun::TUnitType, cnrun::C_BaseNeuron*, cnrun::C_BaseNeuron*, double, cnrun::CModel*, int)@Base" 2.0.0
+(c++)"cnrun::C_StandaloneSynapse::C_StandaloneSynapse(cnrun::TUnitType, cnrun::C_BaseNeuron*, cnrun::C_BaseNeuron*, double, cnrun::CModel*, int)@Base" 2.0.0
+(c++)"cnrun::CN_Vars_NeuronMap at Base" 2.0.0
+(c++)"cnrun::CN_Vars_SynapseAB at Base" 2.0.0
+(c++)"cnrun::CN_Vars_NeuronEC_d at Base" 2.0.0
+(c++)"cnrun::CN_Vars_NeuronHH_d at Base" 2.0.0
+(c++)"cnrun::CN_Vars_NeuronHH_r at Base" 2.0.0
+(c++)"cnrun::COscillatorDotPoisson::possibly_fire()@Base" 2.0.0
+(c++)"cnrun::COscillatorDotPoisson::do_detect_spike_or_whatever()@Base" 2.0.0
+(c++)"cnrun::COscillatorDotPoisson::~COscillatorDotPoisson()@Base" 2.0.0
+(c++)"cnrun::COscillatorDotPoisson::~COscillatorDotPoisson()@Base" 2.0.0
+(c++)"cnrun::COscillatorDotPoisson::~COscillatorDotPoisson()@Base" 2.0.0
+(c++)"cnrun::CN_Params_NeuronMap at Base" 2.0.0
+(c++)"cnrun::CN_Vars_NeuronECA_d at Base" 2.0.0
+(c++)"cnrun::CN_Vars_NeuronHH2_d at Base" 2.0.0
+(c++)"cnrun::CN_Vars_SynapseRall at Base" 2.0.0
+(c++)"cnrun::unit_family_by_string(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&)@Base" 2.0.0
+(c++)"cnrun::C_StandaloneAttributes::preadvance()@Base" 2.0.0
+(c++)"cnrun::C_StandaloneAttributes::~C_StandaloneAttributes()@Base" 2.0.0
+(c++)"cnrun::C_StandaloneAttributes::~C_StandaloneAttributes()@Base" 2.0.0
+(c++)"cnrun::CN_Params_NeuronEC_d at Base" 2.0.0
+(c++)"cnrun::CN_Params_NeuronHH_d at Base" 2.0.0
+(c++)"cnrun::CN_Params_NeuronHH_r at Base" 2.0.0
+(c++)"cnrun::CN_Params_SynapseMap at Base" 2.0.0
+(c++)"cnrun::CN_VarSyms_NeuronMap at Base" 2.0.0
+(c++)"cnrun::CN_VarSyms_SynapseAB at Base" 2.0.0
+(c++)"cnrun::unit_species_by_string(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&)@Base" 2.0.0
+(c++)"cnrun::CN_Params_NeuronECA_d at Base" 2.0.0
+(c++)"cnrun::CN_Params_NeuronHH2_d at Base" 2.0.0
+(c++)"cnrun::CN_VarNames_NeuronMap at Base" 2.0.0
+(c++)"cnrun::CN_VarNames_SynapseAB at Base" 2.0.0
+(c++)"cnrun::CN_VarSyms_NeuronEC_d at Base" 2.0.0
+(c++)"cnrun::CN_VarSyms_NeuronHH_d at Base" 2.0.0
+(c++)"cnrun::CN_VarSyms_NeuronHH_r at Base" 2.0.0
+(c++)"cnrun::CN_ParamSyms_NeuronMap at Base" 2.0.0
+(c++)"cnrun::CN_Params_SynapseAB_dd at Base" 2.0.0
+(c++)"cnrun::CN_Params_SynapseAB_rr at Base" 2.0.0
+(c++)"cnrun::CN_VarNames_NeuronEC_d at Base" 2.0.0
+(c++)"cnrun::CN_VarNames_NeuronHH_d at Base" 2.0.0
+(c++)"cnrun::CN_VarNames_NeuronHH_r at Base" 2.0.0
+(c++)"cnrun::CN_VarSyms_NeuronECA_d at Base" 2.0.0
+(c++)"cnrun::CN_VarSyms_SynapseRall at Base" 2.0.0
+(c++)"cnrun::CN_Vars_NeuronDotPulse at Base" 2.0.0
+(c++)"cnrun::CN_ParamNames_NeuronMap at Base" 2.0.0
+(c++)"cnrun::CN_ParamSyms_NeuronEC_d at Base" 2.0.0
+(c++)"cnrun::CN_ParamSyms_NeuronHH_d at Base" 2.0.0
+(c++)"cnrun::CN_ParamSyms_NeuronHH_r at Base" 2.0.0
+(c++)"cnrun::CN_ParamSyms_SynapseMap at Base" 2.0.0
+(c++)"cnrun::CN_VarNames_NeuronECA_d at Base" 2.0.0
+(c++)"cnrun::CN_VarNames_SynapseRall at Base" 2.0.0
+(c++)"cnrun::CN_Vars_OscillatorVdPol at Base" 2.0.0
+(c++)"cnrun::CN_ParamNames_NeuronEC_d at Base" 2.0.0
+(c++)"cnrun::CN_ParamNames_NeuronHH_d at Base" 2.0.0
+(c++)"cnrun::CN_ParamNames_NeuronHH_r at Base" 2.0.0
+(c++)"cnrun::CN_ParamNames_SynapseMap at Base" 2.0.0
+(c++)"cnrun::CN_ParamSyms_NeuronECA_d at Base" 2.0.0
+(c++)"cnrun::CN_ParamSyms_NeuronHH2_d at Base" 2.0.0
+(c++)"cnrun::CN_Params_NeuronDotPulse at Base" 2.0.0
+(c++)"cnrun::CN_Params_SynapseMxAB_dd at Base" 2.0.0
+(c++)"cnrun::CN_Params_SynapseMxAB_dr at Base" 2.0.0
+(c++)"cnrun::CN_Params_SynapseRall_dd at Base" 2.0.0
+(c++)"cnrun::C_StandaloneRateBasedNeuron::~C_StandaloneRateBasedNeuron()@Base" 2.0.0
+(c++)"cnrun::C_StandaloneRateBasedNeuron::~C_StandaloneRateBasedNeuron()@Base" 2.0.0
+(c++)"cnrun::C_StandaloneRateBasedNeuron::~C_StandaloneRateBasedNeuron()@Base" 2.0.0
+(c++)"cnrun::CN_ParamNames_NeuronECA_d at Base" 2.0.0
+(c++)"cnrun::CN_ParamNames_NeuronHH2_d at Base" 2.0.0
+(c++)"cnrun::CN_ParamSyms_SynapseAB_dd at Base" 2.0.0
+(c++)"cnrun::CN_ParamSyms_SynapseAB_dr at Base" 2.0.0
+(c++)"cnrun::CN_ParamSyms_SynapseAB_rr at Base" 2.0.0
+(c++)"cnrun::CN_Params_OscillatorVdPol at Base" 2.0.0
+(c++)"cnrun::CN_VarSyms_NeuronDotPulse at Base" 2.0.0
+(c++)"cnrun::CN_Vars_OscillatorPoisson at Base" 2.0.0
+(c++)"cnrun::CN_ParamNames_SynapseAB_dd at Base" 2.0.0
+(c++)"cnrun::CN_ParamNames_SynapseAB_dr at Base" 2.0.0
+(c++)"cnrun::CN_ParamNames_SynapseAB_rr at Base" 2.0.0
+(c++)"cnrun::CN_VarNames_NeuronDotPulse at Base" 2.0.0
+(c++)"cnrun::CN_VarSyms_OscillatorVdPol at Base" 2.0.0
+(c++)"cnrun::CN_Vars_OscillatorColpitts at Base" 2.0.0
+(c++)"cnrun::cnmodel_dump_available_units()@Base" 2.0.0
+(c++)"cnrun::CN_ParamSyms_NeuronDotPulse at Base" 2.0.0
+(c++)"cnrun::CN_ParamSyms_SynapseRall_dd at Base" 2.0.0
+(c++)"cnrun::CN_Params_OscillatorPoisson at Base" 2.0.0
+(c++)"cnrun::CN_Params_SynapseABMinus_dd at Base" 2.0.0
+(c++)"cnrun::CN_VarNames_OscillatorVdPol at Base" 2.0.0
+(c++)"cnrun::C_HostedConductanceBasedNeuron::do_detect_spike_or_whatever()@Base" 2.0.0
+(c++)"cnrun::CN_ParamNames_NeuronDotPulse at Base" 2.0.0
+(c++)"cnrun::CN_ParamNames_SynapseRall_dd at Base" 2.0.0
+(c++)"cnrun::CN_ParamSyms_OscillatorVdPol at Base" 2.0.0
+(c++)"cnrun::CN_Params_OscillatorColpitts at Base" 2.0.0
+(c++)"cnrun::CN_VarSyms_OscillatorPoisson at Base" 2.0.0
+(c++)"cnrun::CN_Vars_OscillatorPoissonDot at Base" 2.0.0
+(c++)"cnrun::CN_ParamNames_OscillatorVdPol at Base" 2.0.0
+(c++)"cnrun::CN_VarNames_OscillatorPoisson at Base" 2.0.0
+(c++)"cnrun::CN_VarSyms_OscillatorColpitts at Base" 2.0.0
+(c++)"cnrun::CN_ParamSyms_OscillatorPoisson at Base" 2.0.0
+(c++)"cnrun::CN_Params_OscillatorPoissonDot at Base" 2.0.0
+(c++)"cnrun::CN_VarNames_OscillatorColpitts at Base" 2.0.0
+(c++)"cnrun::CN_ParamNames_OscillatorPoisson at Base" 2.0.0
+(c++)"cnrun::CN_ParamSyms_OscillatorColpitts at Base" 2.0.0
+(c++)"cnrun::CN_VarSyms_OscillatorPoissonDot at Base" 2.0.0
+(c++)"cnrun::C_StandaloneConductanceBasedNeuron::~C_StandaloneConductanceBasedNeuron()@Base" 2.0.0
+(c++)"cnrun::C_StandaloneConductanceBasedNeuron::~C_StandaloneConductanceBasedNeuron()@Base" 2.0.0
+(c++)"cnrun::C_StandaloneConductanceBasedNeuron::~C_StandaloneConductanceBasedNeuron()@Base" 2.0.0
+(c++)"cnrun::CN_ParamNames_OscillatorColpitts at Base" 2.0.0
+(c++)"cnrun::CN_VarNames_OscillatorPoissonDot at Base" 2.0.0
+(c++)"cnrun::CN_ParamSyms_OscillatorPoissonDot at Base" 2.0.0
+(c++)"cnrun::CN_ParamNames_OscillatorPoissonDot at Base" 2.0.0
+(c++)"cnrun::CModel::exclude_unit(cnrun::C_BaseUnit*, cnrun::CModel::TExcludeOption)@Base" 2.0.0
+(c++)"cnrun::CModel::include_unit(cnrun::C_HostedNeuron*, cnrun::TIncludeOption)@Base" 2.0.0
+(c++)"cnrun::CModel::include_unit(cnrun::C_HostedSynapse*, cnrun::TIncludeOption)@Base" 2.0.0
+(c++)"cnrun::CModel::include_unit(cnrun::C_StandaloneNeuron*)@Base" 2.0.0
+(c++)"cnrun::CModel::include_unit(cnrun::C_StandaloneSynapse*)@Base" 2.0.0
+(c++)"cnrun::CModel::prepare_advance()@Base" 2.0.0
+(c++)"cnrun::CModel::export_NetworkML(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&)@Base" 2.0.0
+(c++)"cnrun::CModel::import_NetworkML(_xmlDoc*, std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, cnrun::CModel::TNMLImportOption)@Base" 2.0.0
+(c++)"cnrun::CModel::import_NetworkML(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, cnrun::CModel::TNMLImportOption)@Base" 2.0.0
+(c++)"cnrun::CModel::_setup_schedulers()@Base" 2.0.0
+(c++)"cnrun::CModel::coalesce_synapses()@Base" 2.0.0
+(c++)"cnrun::CModel::register_listener(cnrun::C_BaseUnit*)@Base" 2.0.0
+(c++)"cnrun::CModel::_include_base_unit(cnrun::C_BaseUnit*)@Base" 2.0.0
+(c++)"cnrun::CModel::add_neuron_species(cnrun::TUnitType, std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, cnrun::TIncludeOption, double, double, double)@Base" 2.0.0
+(c++)"cnrun::CModel::add_neuron_species(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, cnrun::TIncludeOption, double, double, double)@Base" 2.0.0
+(c++)"cnrun::CModel::cull_deaf_synapses()@Base" 2.0.0
+(c++)"cnrun::CModel::finalize_additions()@Base" 2.0.0
+(c++)"cnrun::CModel::add_synapse_species(cnrun::TUnitType, cnrun::C_BaseNeuron*, cnrun::C_BaseNeuron*, double, cnrun::CModel::TSynapseCloningOption, cnrun::TIncludeOption)@Base" 2.0.0
+(c++)"cnrun::CModel::add_synapse_species(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, double, cnrun::CModel::TSynapseCloningOption, cnrun::TIncludeOption)@Base" 2.0.0
+(c++)"cnrun::CModel::cull_blind_synapses()@Base" 2.0.0
+(c++)"cnrun::CModel::process_putout_tags(std::list<cnrun::CModel::STagGroup, std::allocator<cnrun::CModel::STagGroup> > const&)@Base" 2.0.0
+(c++)"cnrun::CModel::unregister_listener(cnrun::C_BaseUnit*)@Base" 2.0.0
+(c++)"cnrun::CModel::_do_advance_on_mixed(double, double*)@Base" 2.0.0
+(c++)"cnrun::CModel::_process_populations(_xmlNode*)@Base" 2.0.0
+(c++)"cnrun::CModel::_process_projections(_xmlNode*)@Base" 2.0.0
+(c++)"cnrun::CModel::register_spikelogger(cnrun::C_BaseNeuron*)@Base" 2.0.0
+(c++)"cnrun::CModel::process_decimate_tags(std::list<cnrun::CModel::STagGroupDecimate, std::allocator<cnrun::CModel::STagGroupDecimate> > const&)@Base" 2.0.0
+(c++)"cnrun::CModel::process_listener_tags(std::list<cnrun::CModel::STagGroupListener, std::allocator<cnrun::CModel::STagGroupListener> > const&)@Base" 2.0.0
+(c++)"cnrun::CModel::reset_state_all_units()@Base" 2.0.0
+(c++)"cnrun::CModel::unregister_spikelogger(cnrun::C_BaseNeuron*)@Base" 2.0.0
+(c++)"cnrun::CModel::process_spikelogger_tags(std::list<cnrun::CModel::STagGroupSpikelogger, std::allocator<cnrun::CModel::STagGroupSpikelogger> > const&)@Base" 2.0.0
+(c++)"cnrun::CModel::_do_advance_on_pure_hosted(double, double*)@Base" 2.0.0
+(c++)"cnrun::CModel::register_unit_with_sources(cnrun::C_BaseUnit*)@Base" 2.0.0
+(c++)"cnrun::CModel::_do_advance_on_pure_ddtbound(double, double*)@Base" 2.0.0
+(c++)"cnrun::CModel::process_paramset_source_tags(std::list<cnrun::CModel::STagGroupSource, std::allocator<cnrun::CModel::STagGroupSource> > const&)@Base" 2.0.0
+(c++)"cnrun::CModel::process_paramset_static_tags(std::list<cnrun::CModel::STagGroupNeuronParmSet, std::allocator<cnrun::CModel::STagGroupNeuronParmSet> > const&)@Base" 2.0.0
+(c++)"cnrun::CModel::process_paramset_static_tags(std::list<cnrun::CModel::STagGroupSynapseParmSet, std::allocator<cnrun::CModel::STagGroupSynapseParmSet> > const&)@Base" 2.0.0
+(c++)"cnrun::CModel::unregister_unit_with_sources(cnrun::C_BaseUnit*)@Base" 2.0.0
+(c++)"cnrun::CModel::_process_population_instances(_xmlNode*, unsigned char const*, unsigned char const*)@Base" 2.0.0
+(c++)"cnrun::CModel::_do_advance_on_pure_standalone(double, double*)@Base" 2.0.0
+(c++)"cnrun::CModel::_process_projection_connections(_xmlNode*, unsigned char const*, unsigned char const*, unsigned char const*, unsigned char const*)@Base" 2.0.0
+(c++)"cnrun::CModel::reset(cnrun::CModel::TResetOption)@Base" 2.0.0
+(c++)"cnrun::CModel::advance(double, double*)@Base" 2.0.0
+(c++)"cnrun::CModel::CModel(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, cnrun::CIntegrate_base*, cnrun::SModelOptions const&)@Base" 2.0.0
+(c++)"cnrun::CModel::CModel(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, cnrun::CIntegrate_base*, cnrun::SModelOptions const&)@Base" 2.0.0
+(c++)"cnrun::CModel::~CModel()@Base" 2.0.0
+(c++)"cnrun::CModel::~CModel()@Base" 2.0.0
+(c++)"cnrun::CModel::~CModel()@Base" 2.0.0
+(c++)"cnrun::global::precision at Base" 2.0.0
+(c++)"cnrun::global::verbosely at Base" 2.0.0
+(c++)"cnrun::__CNUDT at Base" 2.0.0
+(c++)"cnrun::stilton::str::dhms_colon(double, int)@Base" 2.0.0
+(c++)"cnrun::stilton::str::svasprintf(char const*, __va_list_tag*)@Base" 2.0.0
+(c++)"cnrun::stilton::str::homedir2tilda(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&)@Base" 2.0.0
+(c++)"cnrun::stilton::str::homedir2tilda(std::basic_string<char, std::char_traits<char>, std::allocator<char> >&)@Base" 2.0.0
+(c++)"cnrun::stilton::str::tilda2homedir(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&)@Base" 2.0.0
+(c++)"cnrun::stilton::str::tilda2homedir(std::basic_string<char, std::char_traits<char>, std::allocator<char> >&)@Base" 2.0.0
+(c++)"cnrun::stilton::str::tokens_trimmed(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, char const*)@Base" 2.0.0
+(c++)"cnrun::stilton::str::decompose_double(double, double*, int*)@Base" 2.0.0
+(c++)"cnrun::stilton::str::double_dot_aligned_s(double, int, int)@Base" 2.0.0
+(c++)"cnrun::stilton::str::pad(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, unsigned long)@Base" 2.0.0
+(c++)"cnrun::stilton::str::dhms(double, int)@Base" 2.0.0
+(c++)"std::basic_string<char, std::char_traits<char>, std::allocator<char> > cnrun::stilton::str::join<std::list<double, std::allocator<double> > >(std::list<double, std::allocator<double> > const&, char const*)@Base" 2.0.0
+(c++)"std::basic_string<char, std::char_traits<char>, std::allocator<char> > cnrun::stilton::str::join<std::vector<std::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::basic_string<char, std::char_traits<char>, std::allocator<char> > > > >(std::vector<std::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&, char const*)@Base" 2.0.0
+(c++)"cnrun::stilton::str::trim(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&)@Base" 2.0.0
+(c++)"cnrun::stilton::str::tokens(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, char const*)@Base" 2.0.0
+(c++)"cnrun::stilton::str::sasprintf(char const*, ...)@Base" 2.0.0
+(c++)"cnrun::C_BaseUnit::var_idx_by_sym(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) const at Base" 2.0.0
+(c++)"cnrun::C_BaseUnit::param_idx_by_sym(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) const at Base" 2.0.0
+(c++)"cnrun::C_BaseUnit::dump(bool, _IO_FILE*) const at Base" 2.0.0
+(c++)"cnrun::CNeuronHH_r::F(std::vector<double, std::allocator<double> >&) const at Base" 2.0.0
+(c++)"cnrun::CSourceTape::dump(_IO_FILE*) const at Base" 2.0.0
+(c++)"cnrun::CSynapseMap::Isyn(cnrun::C_BaseNeuron const&, double) const at Base" 2.0.0
+(c++)"cnrun::CSynapseMap::Isyn(std::vector<double, std::allocator<double> >&, cnrun::C_BaseNeuron const&, double) const at Base" 2.0.0
+(c++)"cnrun::CSourceNoise::dump(_IO_FILE*) const at Base" 2.0.0
+(c++)"cnrun::C_BaseNeuron::connects_to(cnrun::C_BaseNeuron const&) const at Base" 2.0.0
+(c++)"cnrun::C_BaseNeuron::connects_via(cnrun::C_BaseNeuron const&, double*) const at Base" 2.0.0
+(c++)"cnrun::C_BaseNeuron::n_spikes_in_last_dt() const at Base" 2.0.0
+(c++)"cnrun::C_BaseNeuron::E(std::vector<double, std::allocator<double> >&) const at Base" 2.0.0
+(c++)"cnrun::C_BaseNeuron::E() const at Base" 2.0.0
+(c++)"cnrun::C_BaseNeuron::F(std::vector<double, std::allocator<double> >&) const at Base" 2.0.0
+(c++)"cnrun::C_BaseNeuron::F() const at Base" 2.0.0
+(c++)"cnrun::C_BaseNeuron::dump(bool, _IO_FILE*) const at Base" 2.0.0
+(c++)"cnrun::CSynapseAB_dd::Isyn(cnrun::C_BaseNeuron const&, double) const at Base" 2.0.0
+(c++)"cnrun::CSynapseAB_dd::Isyn(std::vector<double, std::allocator<double> >&, cnrun::C_BaseNeuron const&, double) const at Base" 2.0.0
+(c++)"cnrun::CSynapseAB_rr::Isyn(cnrun::C_BaseNeuron const&, double) const at Base" 2.0.0
+(c++)"cnrun::CSynapseAB_rr::Isyn(std::vector<double, std::allocator<double> >&, cnrun::C_BaseNeuron const&, double) const at Base" 2.0.0
+(c++)"cnrun::C_BaseSynapse::dump(bool, _IO_FILE*) const at Base" 2.0.0
+(c++)"cnrun::C_HostedNeuron::get_var_value(unsigned long) const at Base" 2.0.0
+(c++)"cnrun::CSourceFunction::dump(_IO_FILE*) const at Base" 2.0.0
+(c++)"cnrun::CSourcePeriodic::dump(_IO_FILE*) const at Base" 2.0.0
+(c++)"cnrun::CSynapseMxAB_dr::Isyn(cnrun::C_BaseNeuron const&, double) const at Base" 2.0.0
+(c++)"cnrun::CSynapseMxAB_dr::Isyn(std::vector<double, std::allocator<double> >&, cnrun::C_BaseNeuron const&, double) const at Base" 2.0.0
+(c++)"cnrun::CSynapseRall_dd::Isyn(cnrun::C_BaseNeuron const&, double) const at Base" 2.0.0
+(c++)"cnrun::CSynapseRall_dd::Isyn(std::vector<double, std::allocator<double> >&, cnrun::C_BaseNeuron const&, double) const at Base" 2.0.0
+(c++)"cnrun::C_HostedSynapse::get_var_value(unsigned long) const at Base" 2.0.0
+(c++)"cnrun::C_StandaloneNeuron::get_var_value(unsigned long) const at Base" 2.0.0
+(c++)"cnrun::C_StandaloneSynapse::get_var_value(unsigned long) const at Base" 2.0.0
+(c++)"cnrun::SSpikeloggerService::sync_history() const at Base" 2.0.0
+(c++)"cnrun::SSpikeloggerService::n_spikes_since(double) const at Base" 2.0.0
+(c++)"cnrun::SSpikeloggerService::get_sxf_vector_custom(std::vector<double, std::allocator<double> >*, std::vector<double, std::allocator<double> >*, std::vector<unsigned long, std::allocator<unsigned long> >*, double, double, double, double) const at Base" 2.0.0
+(c++)"cnrun::SSpikeloggerService::sdf(double, double, double, unsigned long*) const at Base" 2.0.0
+(c++)"cnrun::SSpikeloggerService::shf(double, double) const at Base" 2.0.0
+(c++)"cnrun::C_HostedRateBasedNeuron::n_spikes_in_last_dt() const at Base" 2.0.0
+(c++)"cnrun::C_StandaloneRateBasedNeuron::n_spikes_in_last_dt() const at Base" 2.0.0
+(c++)"cnrun::C_HostedConductanceBasedNeuron::n_spikes_in_last_dt() const at Base" 2.0.0
+(c++)"cnrun::C_HostedConductanceBasedNeuron::E(std::vector<double, std::allocator<double> >&) const at Base" 2.0.0
+(c++)"cnrun::C_HostedConductanceBasedNeuron::E() const at Base" 2.0.0
+(c++)"cnrun::C_StandaloneConductanceBasedNeuron::n_spikes_in_last_dt() const at Base" 2.0.0
+(c++)"cnrun::C_StandaloneConductanceBasedNeuron::E(std::vector<double, std::allocator<double> >&) const at Base" 2.0.0
+(c++)"cnrun::C_StandaloneConductanceBasedNeuron::E() const at Base" 2.0.0
+(c++)"cnrun::CModel::dump_state(_IO_FILE*) const at Base" 2.0.0
+(c++)"cnrun::CModel::dump_units(_IO_FILE*) const at Base" 2.0.0
+(c++)"cnrun::CModel::list_units(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) const at Base" 2.0.0
+(c++)"cnrun::CModel::dump_metrics(_IO_FILE*) const at Base" 2.0.0
+(c++)"cnrun::CModel::unit_by_label(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) const at Base" 2.0.0
+(c++)"cnrun::CModel::neuron_by_label(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) const at Base" 2.0.0
+(c++)"cnrun::CModel::synapse_by_label(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) const at Base" 2.0.0
+(c++)"cnrun::CModel::verbose_threshold() const at Base" 2.0.0
+(c++)"cnrun::stilton::C_verprintf::vp(int, _IO_FILE*, char const*, ...) const at Base" 2.0.0
+(c++)"cnrun::stilton::C_verprintf::vp(int, char const*, ...) const at Base" 2.0.0
+(c++)"std::ctype<char>::do_widen(char) const at Base" 2.0.0
+(c++)"std::_Rb_tree<int, int, std::_Identity<int>, std::less<int>, std::allocator<int> >::find(int const&) const at Base" 2.0.0
+(c++)"char* std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_S_construct<char*>(char*, char*, std::allocator<char> const&, std::forward_iterator_tag)@Base" 2.0.0
+(c++)"std::_List_base<std::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::basic_string<char, std::char_traits<char>, std::allocator<char> > > >::_M_clear()@Base" 2.0.0
+(c++)"std::list<cnrun::C_BaseUnit*, std::allocator<cnrun::C_BaseUnit*> >::remove(cnrun::C_BaseUnit* const&)@Base" 2.0.0
+(c++)"std::list<cnrun::C_BaseUnit*, std::allocator<cnrun::C_BaseUnit*> >::unique()@Base" 2.0.0
+(c++)"std::list<cnrun::C_BaseNeuron*, std::allocator<cnrun::C_BaseNeuron*> >::sort()@Base" 2.0.0
+(c++)"std::list<cnrun::C_BaseNeuron*, std::allocator<cnrun::C_BaseNeuron*> >::merge(std::list<cnrun::C_BaseNeuron*, std::allocator<cnrun::C_BaseNeuron*> >&&)@Base" 2.0.0
+(c++)"std::list<cnrun::C_BaseNeuron*, std::allocator<cnrun::C_BaseNeuron*> >::remove(cnrun::C_BaseNeuron* const&)@Base" 2.0.0
+(c++)"std::list<double, std::allocator<double> >::sort()@Base" 2.0.0
+(c++)"std::list<double, std::allocator<double> >::merge(std::list<double, std::allocator<double> >&&)@Base" 2.0.0
+(c++)"std::list<unsigned int, std::allocator<unsigned int> >::_M_default_append(unsigned long)@Base" 2.0.0
+(c++)"void std::vector<cnrun::C_BaseUnit*, std::allocator<cnrun::C_BaseUnit*> >::_M_emplace_back_aux<cnrun::C_BaseUnit* const&>(cnrun::C_BaseUnit* const&)@Base" 2.0.0
+(c++)"void std::vector<cnrun::C_BaseUnit*, std::allocator<cnrun::C_BaseUnit*> >::_M_emplace_back_aux<cnrun::C_BaseUnit* const&>(cnrun::C_BaseUnit* const&)@Base" 2.0.0
+(c++)"void std::vector<std::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::basic_string<char, std::char_traits<char>, std::allocator<char> > > >::emplace_back<std::basic_string<char, std::char_traits<char>, std::allocator<char> > >(std::basic_string<char, std::char_traits<char>, std::allocator<char> >&&)@Base" 2.0.0
+(c++)"void std::vector<std::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::basic_string<char, std::char_traits<char>, std::allocator<char> > > >::emplace_back<std::basic_string<char, std::char_traits<char>, std::allocator<char> > >(std::basic_string<char, std::char_traits<char>, std::allocator<char> >&&)@Base" 2.0.0
+(c++)"void std::vector<std::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::basic_string<char, std::char_traits<char>, std::allocator<char> > > >::_M_emplace_back_aux<std::basic_string<char, std::char_traits<char>, std::allocator<char> > >(std::basic_string<char, std::char_traits<char>, std::allocator<char> >&&)@Base" 2.0.0
+(c++)"void std::vector<std::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::basic_string<char, std::char_traits<char>, std::allocator<char> > > >::_M_emplace_back_aux<std::basic_string<char, std::char_traits<char>, std::allocator<char> > >(std::basic_string<char, std::char_traits<char>, std::allocator<char> >&&)@Base" 2.0.0
+(c++)"std::vector<std::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::basic_string<char, std::char_traits<char>, std::allocator<char> > > >::~vector()@Base" 2.0.0
+(c++)"std::vector<std::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::basic_string<char, std::char_traits<char>, std::allocator<char> > > >::~vector()@Base" 2.0.0
+(c++)"void std::vector<std::pair<double, double>, std::allocator<std::pair<double, double> > >::_M_emplace_back_aux<std::pair<double, double> >(std::pair<double, double>&&)@Base" 2.0.0
+(c++)"void std::vector<std::pair<double, double>, std::allocator<std::pair<double, double> > >::_M_emplace_back_aux<std::pair<double, double> >(std::pair<double, double>&&)@Base" 2.0.0
+(c++)"void std::vector<double, std::allocator<double> >::emplace_back<double>(double&&)@Base" 2.0.0
+(c++)"void std::vector<double, std::allocator<double> >::emplace_back<double>(double&&)@Base" 2.0.0
+(c++)"void std::vector<double, std::allocator<double> >::_M_emplace_back_aux<double const&>(double const&)@Base" 2.0.0
+(c++)"void std::vector<double, std::allocator<double> >::_M_emplace_back_aux<double>(double&&)@Base" 2.0.0
+(c++)"void std::vector<double, std::allocator<double> >::_M_emplace_back_aux<double const&>(double const&)@Base" 2.0.0
+(c++)"void std::vector<double, std::allocator<double> >::_M_emplace_back_aux<double>(double&&)@Base" 2.0.0
+(c++)"std::vector<double, std::allocator<double> >::operator=(std::vector<double, std::allocator<double> > const&)@Base" 2.0.0
+(c++)"void std::vector<unsigned long, std::allocator<unsigned long> >::_M_emplace_back_aux<unsigned long const&>(unsigned long const&)@Base" 2.0.0
+(c++)"void std::vector<unsigned long, std::allocator<unsigned long> >::_M_emplace_back_aux<unsigned long const&>(unsigned long const&)@Base" 2.0.0
+(c++)"std::_Rb_tree<cnrun::C_BaseSynapse*, std::pair<cnrun::C_BaseSynapse* const, double>, std::_Select1st<std::pair<cnrun::C_BaseSynapse* const, double> >, std::less<cnrun::C_BaseSynapse*>, std::allocator<std::pair<cnrun::C_BaseSynapse* const, double> > >::_M_get_insert_unique_pos(cnrun::C_BaseSynapse* const&)@Base" 2.0.0
+(c++)"std::_Rb_tree<cnrun::C_BaseSynapse*, std::pair<cnrun::C_BaseSynapse* const, double>, std::_Select1st<std::pair<cnrun::C_BaseSynapse* const, double> >, std::less<cnrun::C_BaseSynapse*>, std::allocator<std::pair<cnrun::C_BaseSynapse* const, double> > >::_M_get_insert_hint_unique_pos(std::_Rb_tree_const_iterator<std::pair<cnrun::C_BaseSynapse* const, double> >, cnrun::C_BaseSynapse* const&)@Base" 2.0.0
+(c++)"std::_Rb_tree<cnrun::C_BaseSynapse*, std::pair<cnrun::C_BaseSynapse* const, double>, std::_Select1st<std::pair<cnrun::C_BaseSynapse* const, double> >, std::less<cnrun::C_BaseSynapse*>, std::allocator<std::pair<cnrun::C_BaseSynapse* const, double> > >::erase(cnrun::C_BaseSynapse* const&)@Base" 2.0.0
+(c++)"std::_Rb_tree<cnrun::C_BaseSynapse*, std::pair<cnrun::C_BaseSynapse* const, double>, std::_Select1st<std::pair<cnrun::C_BaseSynapse* const, double> >, std::less<cnrun::C_BaseSynapse*>, std::allocator<std::pair<cnrun::C_BaseSynapse* const, double> > >::_M_erase(std::_Rb_tree_node<std::pair<cnrun::C_BaseSynapse* const, double> >*)@Base" 2.0.0
+(c++)"std::pair<std::_Rb_tree_iterator<int>, bool> std::_Rb_tree<int, int, std::_Identity<int>, std::less<int>, std::allocator<int> >::_M_insert_unique<int>(int&&)@Base" 2.0.0
+(c++)"std::_Rb_tree<int, int, std::_Identity<int>, std::less<int>, std::allocator<int> >::_M_erase(std::_Rb_tree_node<int>*)@Base" 2.0.0
+(c++)"typeinfo for cnrun::CModel::TNMLIOResult at Base" 2.0.0
+(c++)"typeinfo for std::basic_string<char, std::char_traits<char>, std::allocator<char> >@Base" 2.0.0
+(c++)"typeinfo name for cnrun::CModel::TNMLIOResult at Base" 2.0.0
+(c++)"typeinfo name for std::basic_string<char, std::char_traits<char>, std::allocator<char> >@Base" 2.0.0
+(c++)"vtable for cnrun::CNeuronMap at Base" 2.0.0
+(c++)"vtable for cnrun::C_BaseUnit at Base" 2.0.0
+(c++)"vtable for cnrun::CNeuronEC_d at Base" 2.0.0
+(c++)"vtable for cnrun::CNeuronHH_d at Base" 2.0.0
+(c++)"vtable for cnrun::CNeuronHH_r at Base" 2.0.0
+(c++)"vtable for cnrun::CSourceTape at Base" 2.0.0
+(c++)"vtable for cnrun::CSynapseMap at Base" 2.0.0
+(c++)"vtable for cnrun::CNeuronECA_d at Base" 2.0.0
+(c++)"vtable for cnrun::CNeuronHH2_d at Base" 2.0.0
+(c++)"vtable for cnrun::CSourceNoise at Base" 2.0.0
+(c++)"vtable for cnrun::C_BaseNeuron at Base" 2.0.0
+(c++)"vtable for cnrun::C_BaseSource at Base" 2.0.0
+(c++)"vtable for cnrun::CSynapseAB_dd at Base" 2.0.0
+(c++)"vtable for cnrun::CSynapseAB_rr at Base" 2.0.0
+(c++)"vtable for cnrun::CSynapseMxMap at Base" 2.0.0
+(c++)"vtable for cnrun::C_BaseSynapse at Base" 2.0.0
+(c++)"vtable for cnrun::CIntegrateRK65 at Base" 2.0.0
+(c++)"vtable for cnrun::C_HostedNeuron at Base" 2.0.0
+(c++)"vtable for cnrun::CIntegrate_base at Base" 2.0.0
+(c++)"vtable for cnrun::CNeuronDotPulse at Base" 2.0.0
+(c++)"vtable for cnrun::CSourceFunction at Base" 2.0.0
+(c++)"vtable for cnrun::CSourcePeriodic at Base" 2.0.0
+(c++)"vtable for cnrun::CSynapseMxAB_dd at Base" 2.0.0
+(c++)"vtable for cnrun::CSynapseMxAB_dr at Base" 2.0.0
+(c++)"vtable for cnrun::CSynapseRall_dd at Base" 2.0.0
+(c++)"vtable for cnrun::C_HostedSynapse at Base" 2.0.0
+(c++)"vtable for cnrun::COscillatorVdPol at Base" 2.0.0
+(c++)"vtable for cnrun::COscillatorPoisson at Base" 2.0.0
+(c++)"vtable for cnrun::CSynapseABMinus_dd at Base" 2.0.0
+(c++)"vtable for cnrun::C_HostedAttributes at Base" 2.0.0
+(c++)"vtable for cnrun::C_StandaloneNeuron at Base" 2.0.0
+(c++)"vtable for cnrun::COscillatorColpitts at Base" 2.0.0
+(c++)"vtable for cnrun::C_StandaloneSynapse at Base" 2.0.0
+(c++)"vtable for cnrun::COscillatorDotPoisson at Base" 2.0.0
+(c++)"vtable for cnrun::C_StandaloneAttributes at Base" 2.0.0
+(c++)"vtable for cnrun::C_HostedRateBasedNeuron at Base" 2.0.0
+(c++)"vtable for cnrun::C_MultiplexingAttributes at Base" 2.0.0
+(c++)"vtable for cnrun::C_StandaloneRateBasedNeuron at Base" 2.0.0
+(c++)"vtable for cnrun::C_HostedConductanceBasedNeuron at Base" 2.0.0
+(c++)"vtable for cnrun::C_StandaloneConductanceBasedNeuron at Base" 2.0.0
+(c++)"vtable for cnrun::CModel at Base" 2.0.0
+(c++)"vtable for cnrun::stilton::C_verprintf at Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::CSynapseMap::preadvance()@Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::CSynapseAB_dd::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::CSynapseAB_rr::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::CSynapseMxMap::preadvance()@Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::CSynapseMxAB_dd::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::CSynapseRall_dd::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::C_HostedSynapse::reset_vars()@Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::C_HostedSynapse::var_value(unsigned long)@Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::CSynapseABMinus_dd::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::CSynapseMxAB_dd::update_queue()@Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::CNeuronMap::preadvance()@Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::CNeuronEC_d::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::CNeuronHH_d::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::CNeuronHH_r::preadvance()@Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::CNeuronECA_d::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::CNeuronHH2_d::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::CSynapseMxMap::update_queue()@Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::C_HostedNeuron::reset_vars()@Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::C_HostedNeuron::var_value(unsigned long)@Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::COscillatorVdPol::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
+(c++)"non-virtual thunk to cnrun::COscillatorColpitts::derivative(std::vector<double, std::allocator<double> >&, std::vector<double, std::allocator<double> >&)@Base" 2.0.0
diff --git a/debian/lua-cnrun.info b/debian/lua-cnrun.info
new file mode 100644
index 0000000..c137dc8
--- /dev/null
+++ b/debian/lua-cnrun.info
@@ -0,0 +1 @@
+doc/lua-api/cnrun-lua-api.info
diff --git a/debian/lua-cnrun.install b/debian/lua-cnrun.install
new file mode 100644
index 0000000..05e6ced
--- /dev/null
+++ b/debian/lua-cnrun.install
@@ -0,0 +1,2 @@
+usr/lib/lua/*/cnrun.so
+usr/share/doc/lua-cnrun/examples/example1.lua
diff --git a/debian/lua-cnrun.lintian-overrides b/debian/lua-cnrun.lintian-overrides
new file mode 100644
index 0000000..26d170c
--- /dev/null
+++ b/debian/lua-cnrun.lintian-overrides
@@ -0,0 +1,3 @@
+# lua-cnrun produces a .so library that we move (in install hook)
+# directly to /usr/lib/lua/5.x/cnrun.so, where it only belongs
+lua-cnrun binary: missing-dependency-on-libc
diff --git a/debian/rules b/debian/rules
index e69cdcc..95f91ee 100755
--- a/debian/rules
+++ b/debian/rules
@@ -1,16 +1,37 @@
 #!/usr/bin/make -f
 # -*- makefile -*-
-# Sample debian/rules that uses debhelper.
-# This file was originally written by Joey Hess and Craig Small.
-# As a special exception, when this file is copied by dh-make into a
-# dh-make output file, you may use that output file without restriction.
-# This special exception was added by Craig Small in version 0.37 of dh-make.
-
-# Uncomment this to turn on verbose mode.
-#export DH_VERBOSE=1
 
 DPKG_EXPORT_BUILDFLAGS = 1
 include /usr/share/dpkg/buildflags.mk
+DEB_BUILD_OPTIONS = parallel=4
 
 %:
-	dh $@ --with autoreconf
+	dh $@ --with autoreconf --parallel
+#	dh $@ -plua-cnrun --buildsystem=lua --with lua
+
+# Perhaps I didn't try hard enough, but there was a conflict between
+# upstream's own, independent way to install Lua module as
+# /usr/lib/lua/5.x/cnrun.so, and Debian's recommended, using
+# dh-lua.conf.  I stuck with the former, simply because upstream has
+# the full right to handle lua module himself.  Just let debian
+# maintainers pick up ready-made lua/cnrun.so already installed where
+# it belongs.
+#
+# And, not to mention the need to write %.lo: target in top Makefile.am.
+
+override_dh_clean:
+	rm -f config.log
+	dh_clean
+
+override_dh_shlibdeps:
+	dh_shlibdeps -L libcnrun2-dev
+
+# Here, we want dependencies to be scanned including a package not yet
+# installed, which I believe gets fixed by making an explicit mention
+# of libcnrun2-dev here.
+
+override_dh_makeshlibs:
+	dh_makeshlibs -plibcnrun2 -V -- -t -c1 -q -v2.0.0
+
+# This is nasty; not sure supplying symbols in demangled form is of
+# any help; but -q and -c1 obviously is.
diff --git a/debian/unwanted-files b/debian/unwanted-files
index fe1cddd..8ddf06f 100644
--- a/debian/unwanted-files
+++ b/debian/unwanted-files
@@ -13,3 +13,11 @@ Makefile.in
 src/Makefile.in
 src/*/Makefile.in
 doc/Makefile.in
+doc/lua-api/mdate-sh
+doc/lua-api/texinfo.tex
+doc/lua-api/Makefile.in
+doc/lua-api/mdate-sh
+doc/lua-api/texinfo.tex
+doc/lua-api/stamp-vti
+doc/lua-api/version.texi
+doc/lua-api/cnrun-lua-api.info
diff --git a/debian/upstream b/debian/upstream/metadata
similarity index 100%
rename from debian/upstream
rename to debian/upstream/metadata
diff --git a/debian/watch b/debian/watch
index 90dcd11..ed1e141 100644
--- a/debian/watch
+++ b/debian/watch
@@ -1,2 +1,2 @@
 version=3
-http://johnhommer.com/academic/code/cnrun/source/cnrun-([\d\.]+).tar.bz2
+http://johnhommer.com/academic/code/cnrun/source/cnrun-([\d\.]+).tar.xz
diff --git a/.gitignore b/upstream/.gitignore
similarity index 91%
copy from .gitignore
copy to upstream/.gitignore
index c7569fb..c2333f0 100644
--- a/.gitignore
+++ b/upstream/.gitignore
@@ -1,5 +1,3 @@
-.DirIcon
-.backups
 Doxygen
 .libs
 .deps
@@ -29,5 +27,3 @@ Makefile.in
 *.gch
 cscope.*
 TAGS
-
-*~
diff --git a/upstream/COPYING b/upstream/COPYING
index 94a9ed0..0698566 100644
--- a/upstream/COPYING
+++ b/upstream/COPYING
@@ -1,674 +1,2 @@
-                    GNU GENERAL PUBLIC LICENSE
-                       Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-                            Preamble
-
-  The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
-  The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works.  By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users.  We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors.  You can apply it to
-your programs, too.
-
-  When we speak of free software, we are referring to freedom, not
-price.  Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
-  To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights.  Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
-  For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received.  You must make sure that they, too, receive
-or can get the source code.  And you must show them these terms so they
-know their rights.
-
-  Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
-  For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software.  For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
-  Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so.  This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software.  The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable.  Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products.  If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
-  Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary.  To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.
-
-                       TERMS AND CONDITIONS
-
-  0. Definitions.
-
-  "This License" refers to version 3 of the GNU General Public License.
-
-  "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
-  "The Program" refers to any copyrightable work licensed under this
-License.  Each licensee is addressed as "you".  "Licensees" and
-"recipients" may be individuals or organizations.
-
-  To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy.  The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
-  A "covered work" means either the unmodified Program or a work based
-on the Program.
-
-  To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy.  Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
-  To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies.  Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
-  An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License.  If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
-  1. Source Code.
-
-  The "source code" for a work means the preferred form of the work
-for making modifications to it.  "Object code" means any non-source
-form of a work.
-
-  A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
-  The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form.  A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
-  The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities.  However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work.  For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
-  The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
-  The Corresponding Source for a work in source code form is that
-same work.
-
-  2. Basic Permissions.
-
-  All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met.  This License explicitly affirms your unlimited
-permission to run the unmodified Program.  The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work.  This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
-  You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force.  You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright.  Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
-  Conveying under any other circumstances is permitted solely under
-the conditions stated below.  Sublicensing is not allowed; section 10
-makes it unnecessary.
-
-  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
-  No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
-  When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
-  4. Conveying Verbatim Copies.
-
-  You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
-  You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
-  5. Conveying Modified Source Versions.
-
-  You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
-    a) The work must carry prominent notices stating that you modified
-    it, and giving a relevant date.
-
-    b) The work must carry prominent notices stating that it is
-    released under this License and any conditions added under section
-    7.  This requirement modifies the requirement in section 4 to
-    "keep intact all notices".
-
-    c) You must license the entire work, as a whole, under this
-    License to anyone who comes into possession of a copy.  This
-    License will therefore apply, along with any applicable section 7
-    additional terms, to the whole of the work, and all its parts,
-    regardless of how they are packaged.  This License gives no
-    permission to license the work in any other way, but it does not
-    invalidate such permission if you have separately received it.
-
-    d) If the work has interactive user interfaces, each must display
-    Appropriate Legal Notices; however, if the Program has interactive
-    interfaces that do not display Appropriate Legal Notices, your
-    work need not make them do so.
-
-  A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit.  Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
-  6. Conveying Non-Source Forms.
-
-  You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
-    a) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by the
-    Corresponding Source fixed on a durable physical medium
-    customarily used for software interchange.
-
-    b) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by a
-    written offer, valid for at least three years and valid for as
-    long as you offer spare parts or customer support for that product
-    model, to give anyone who possesses the object code either (1) a
-    copy of the Corresponding Source for all the software in the
-    product that is covered by this License, on a durable physical
-    medium customarily used for software interchange, for a price no
-    more than your reasonable cost of physically performing this
-    conveying of source, or (2) access to copy the
-    Corresponding Source from a network server at no charge.
-
-    c) Convey individual copies of the object code with a copy of the
-    written offer to provide the Corresponding Source.  This
-    alternative is allowed only occasionally and noncommercially, and
-    only if you received the object code with such an offer, in accord
-    with subsection 6b.
-
-    d) Convey the object code by offering access from a designated
-    place (gratis or for a charge), and offer equivalent access to the
-    Corresponding Source in the same way through the same place at no
-    further charge.  You need not require recipients to copy the
-    Corresponding Source along with the object code.  If the place to
-    copy the object code is a network server, the Corresponding Source
-    may be on a different server (operated by you or a third party)
-    that supports equivalent copying facilities, provided you maintain
-    clear directions next to the object code saying where to find the
-    Corresponding Source.  Regardless of what server hosts the
-    Corresponding Source, you remain obligated to ensure that it is
-    available for as long as needed to satisfy these requirements.
-
-    e) Convey the object code using peer-to-peer transmission, provided
-    you inform other peers where the object code and Corresponding
-    Source of the work are being offered to the general public at no
-    charge under subsection 6d.
-
-  A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
-  A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling.  In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage.  For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product.  A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
-  "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source.  The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
-  If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information.  But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
-  The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed.  Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
-  Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
-  7. Additional Terms.
-
-  "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law.  If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
-  When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it.  (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.)  You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
-  Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
-    a) Disclaiming warranty or limiting liability differently from the
-    terms of sections 15 and 16 of this License; or
-
-    b) Requiring preservation of specified reasonable legal notices or
-    author attributions in that material or in the Appropriate Legal
-    Notices displayed by works containing it; or
-
-    c) Prohibiting misrepresentation of the origin of that material, or
-    requiring that modified versions of such material be marked in
-    reasonable ways as different from the original version; or
-
-    d) Limiting the use for publicity purposes of names of licensors or
-    authors of the material; or
-
-    e) Declining to grant rights under trademark law for use of some
-    trade names, trademarks, or service marks; or
-
-    f) Requiring indemnification of licensors and authors of that
-    material by anyone who conveys the material (or modified versions of
-    it) with contractual assumptions of liability to the recipient, for
-    any liability that these contractual assumptions directly impose on
-    those licensors and authors.
-
-  All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10.  If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term.  If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
-  If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
-  Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
-  8. Termination.
-
-  You may not propagate or modify a covered work except as expressly
-provided under this License.  Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
-  However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
-  Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
-  Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License.  If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
-  9. Acceptance Not Required for Having Copies.
-
-  You are not required to accept this License in order to receive or
-run a copy of the Program.  Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance.  However,
-nothing other than this License grants you permission to propagate or
-modify any covered work.  These actions infringe copyright if you do
-not accept this License.  Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
-  10. Automatic Licensing of Downstream Recipients.
-
-  Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License.  You are not responsible
-for enforcing compliance by third parties with this License.
-
-  An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations.  If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
-  You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License.  For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
-  11. Patents.
-
-  A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based.  The
-work thus licensed is called the contributor's "contributor version".
-
-  A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version.  For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
-  Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
-  In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement).  To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
-  If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients.  "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
-  If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
-  A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License.  You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
-  Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
-  12. No Surrender of Others' Freedom.
-
-  If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all.  For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
-  13. Use with the GNU Affero General Public License.
-
-  Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work.  The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
-  14. Revised Versions of this License.
-
-  The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time.  Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-  Each version is given a distinguishing version number.  If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation.  If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
-  If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
-  Later license versions may give you additional or different
-permissions.  However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
-  15. Disclaimer of Warranty.
-
-  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-  16. Limitation of Liability.
-
-  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
-  17. Interpretation of Sections 15 and 16.
-
-  If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
-                     END OF TERMS AND CONDITIONS
-
-            How to Apply These Terms to Your New Programs
-
-  If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-  To do so, attach the following notices to the program.  It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-    <one line to give the program's name and a brief idea of what it does.>
-    Copyright (C) <year>  <name of author>
-
-    This program is free software: you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation, either version 3 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
-  If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
-    <program>  Copyright (C) <year>  <name of author>
-    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
-    This is free software, and you are welcome to redistribute it
-    under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License.  Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
-  You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<http://www.gnu.org/licenses/>.
-
-  The GNU General Public License does not permit incorporating your program
-into proprietary programs.  If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library.  If this is what you want to do, use the GNU Lesser General
-Public License instead of this License.  But first, please read
-<http://www.gnu.org/philosophy/why-not-lgpl.html>.
+CNRun is licensed under GPL-2+.  The full text of the latest version
+of GPL is easiest found in /usr/share/common-licenses/GPL.
diff --git a/upstream/ChangeLog b/upstream/ChangeLog
index 2ccfcc2..33c67cf 100644
--- a/upstream/ChangeLog
+++ b/upstream/ChangeLog
@@ -1,3 +1,9 @@
+2014-11-01  andrei zavada  <johnhommer at gmail.com>
+	* cnrun executable gone, replaced by a Lua module (cnrun.so).
+	* Sweeping refactoring effort, incomplete in places, towards
+	  higher coding standards and discipline.
+	* Drop varfold (too specific to the ratiocoding experiment setup).
+
 2013-09-22  andrei zavada  <johnhommer at gmail.com>
 	* donotwant boost.
 	* Proper use of installed libexec/*.so.
diff --git a/upstream/INSTALL b/upstream/INSTALL
index 9ccf15b..239e074 100644
--- a/upstream/INSTALL
+++ b/upstream/INSTALL
@@ -1,249 +1,18 @@
-Installation Instructions
-*************************
-
-Cnrun is fully autotools compliant, and normally installable by
+CNRun uses canonical autotools and is normally installable by
 ./configure && make install.
 
-Dependencies include: libxml2.
-
-The ./configure option --enable-tools will build these three
-executables in addition to cnrun: varfold, spike2sdf, and
-hh-latency-estimator.
-
-The standard GNU autotools install instructions follow.
-
-
-Copyright (C) 1994, 1995, 1996, 1999, 2000, 2001, 2002, 2004, 2005,
-2006, 2007 Free Software Foundation, Inc.
-
-This file is free documentation; the Free Software Foundation gives
-unlimited permission to copy, distribute and modify it.
-
-Basic Installation
-==================
-
-Briefly, the shell commands `./configure; make; make install' should
-configure, build, and install this package.  The following
-more-detailed instructions are generic; see the `README' file for
-instructions specific to this package.
-
-   The `configure' shell script attempts to guess correct values for
-various system-dependent variables used during compilation.  It uses
-those values to create a `Makefile' in each directory of the package.
-It may also create one or more `.h' files containing system-dependent
-definitions.  Finally, it creates a shell script `config.status' that
-you can run in the future to recreate the current configuration, and a
-file `config.log' containing compiler output (useful mainly for
-debugging `configure').
-
-   It can also use an optional file (typically called `config.cache'
-and enabled with `--cache-file=config.cache' or simply `-C') that saves
-the results of its tests to speed up reconfiguring.  Caching is
-disabled by default to prevent problems with accidental use of stale
-cache files.
-
-   If you need to do unusual things to compile the package, please try
-to figure out how `configure' could check whether to do them, and mail
-diffs or instructions to the address given in the `README' so they can
-be considered for the next release.  If you are using the cache, and at
-some point `config.cache' contains results you don't want to keep, you
-may remove or edit it.
-
-   The file `configure.ac' (or `configure.in') is used to create
-`configure' by a program called `autoconf'.  You need `configure.ac' if
-you want to change it or regenerate `configure' using a newer version
-of `autoconf'.
-
-The simplest way to compile this package is:
-
-  1. `cd' to the directory containing the package's source code and type
-     `./configure' to configure the package for your system.
-
-     Running `configure' might take a while.  While running, it prints
-     some messages telling which features it is checking for.
-
-  2. Type `make' to compile the package.
-
-  3. Optionally, type `make check' to run any self-tests that come with
-     the package.
-
-  4. Type `make install' to install the programs and any data files and
-     documentation.
-
-  5. You can remove the program binaries and object files from the
-     source code directory by typing `make clean'.  To also remove the
-     files that `configure' created (so you can compile the package for
-     a different kind of computer), type `make distclean'.  There is
-     also a `make maintainer-clean' target, but that is intended mainly
-     for the package's developers.  If you use it, you may have to get
-     all sorts of other programs in order to regenerate files that came
-     with the distribution.
-
-  6. Often, you can also type `make uninstall' to remove the installed
-     files again.
-
-Compilers and Options
-=====================
-
-Some systems require unusual options for compilation or linking that the
-`configure' script does not know about.  Run `./configure --help' for
-details on some of the pertinent environment variables.
-
-   You can give `configure' initial values for configuration parameters
-by setting variables in the command line or in the environment.  Here
-is an example:
-
-     ./configure CC=c99 CFLAGS=-g LIBS=-lposix
-
-   *Note Defining Variables::, for more details.
-
-Compiling For Multiple Architectures
-====================================
-
-You can compile the package for more than one kind of computer at the
-same time, by placing the object files for each architecture in their
-own directory.  To do this, you can use GNU `make'.  `cd' to the
-directory where you want the object files and executables to go and run
-the `configure' script.  `configure' automatically checks for the
-source code in the directory that `configure' is in and in `..'.
-
-   With a non-GNU `make', it is safer to compile the package for one
-architecture at a time in the source code directory.  After you have
-installed the package for one architecture, use `make distclean' before
-reconfiguring for another architecture.
-
-Installation Names
-==================
-
-By default, `make install' installs the package's commands under
-`/usr/local/bin', include files under `/usr/local/include', etc.  You
-can specify an installation prefix other than `/usr/local' by giving
-`configure' the option `--prefix=PREFIX'.
-
-   You can specify separate installation prefixes for
-architecture-specific files and architecture-independent files.  If you
-pass the option `--exec-prefix=PREFIX' to `configure', the package uses
-PREFIX as the prefix for installing programs and libraries.
-Documentation and other data files still use the regular prefix.
-
-   In addition, if you use an unusual directory layout you can give
-options like `--bindir=DIR' to specify different values for particular
-kinds of files.  Run `configure --help' for a list of the directories
-you can set and what kinds of files go in them.
-
-   If the package supports it, you can cause programs to be installed
-with an extra prefix or suffix on their names by giving `configure' the
-option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'.
-
-Optional Features
-=================
-
-Some packages pay attention to `--enable-FEATURE' options to
-`configure', where FEATURE indicates an optional part of the package.
-They may also pay attention to `--with-PACKAGE' options, where PACKAGE
-is something like `gnu-as' or `x' (for the X Window System).  The
-`README' should mention any `--enable-' and `--with-' options that the
-package recognizes.
-
-   For packages that use the X Window System, `configure' can usually
-find the X include and library files automatically, but if it doesn't,
-you can use the `configure' options `--x-includes=DIR' and
-`--x-libraries=DIR' to specify their locations.
-
-Specifying the System Type
-==========================
-
-There may be some features `configure' cannot figure out automatically,
-but needs to determine by the type of machine the package will run on.
-Usually, assuming the package is built to be run on the _same_
-architectures, `configure' can figure that out, but if it prints a
-message saying it cannot guess the machine type, give it the
-`--build=TYPE' option.  TYPE can either be a short name for the system
-type, such as `sun4', or a canonical name which has the form:
-
-     CPU-COMPANY-SYSTEM
-
-where SYSTEM can have one of these forms:
-
-     OS KERNEL-OS
-
-   See the file `config.sub' for the possible values of each field.  If
-`config.sub' isn't included in this package, then this package doesn't
-need to know the machine type.
-
-   If you are _building_ compiler tools for cross-compiling, you should
-use the option `--target=TYPE' to select the type of system they will
-produce code for.
-
-   If you want to _use_ a cross compiler, that generates code for a
-platform different from the build platform, you should specify the
-"host" platform (i.e., that on which the generated programs will
-eventually be run) with `--host=TYPE'.
-
-Sharing Defaults
-================
-
-If you want to set default values for `configure' scripts to share, you
-can create a site shell script called `config.site' that gives default
-values for variables like `CC', `cache_file', and `prefix'.
-`configure' looks for `PREFIX/share/config.site' if it exists, then
-`PREFIX/etc/config.site' if it exists.  Or, you can set the
-`CONFIG_SITE' environment variable to the location of the site script.
-A warning: not all `configure' scripts look for a site script.
-
-Defining Variables
-==================
-
-Variables not defined in a site shell script can be set in the
-environment passed to `configure'.  However, some packages may run
-configure again during the build, and the customized values of these
-variables may be lost.  In order to avoid this problem, you should set
-them in the `configure' command line, using `VAR=value'.  For example:
-
-     ./configure CC=/usr/local2/bin/gcc
-
-causes the specified `gcc' to be used as the C compiler (unless it is
-overridden in the site shell script).
-
-Unfortunately, this technique does not work for `CONFIG_SHELL' due to
-an Autoconf bug.  Until the bug is fixed you can use this workaround:
-
-     CONFIG_SHELL=/bin/bash /bin/bash ./configure CONFIG_SHELL=/bin/bash
-
-`configure' Invocation
-======================
-
-`configure' recognizes the following options to control how it operates.
-
-`--help'
-`-h'
-     Print a summary of the options to `configure', and exit.
-
-`--version'
-`-V'
-     Print the version of Autoconf used to generate the `configure'
-     script, and exit.
-
-`--cache-file=FILE'
-     Enable the cache: use and save the results of the tests in FILE,
-     traditionally `config.cache'.  FILE defaults to `/dev/null' to
-     disable caching.
-
-`--config-cache'
-`-C'
-     Alias for `--cache-file=config.cache'.
+Dependencies include libxml2, gsl, and lua libs.
 
-`--quiet'
-`--silent'
-`-q'
-     Do not print messages saying which checks are being made.  To
-     suppress all normal output, redirect it to `/dev/null' (any error
-     messages will still be shown).
+With the --enable-tools option, configure will build a couple of
+standalone tools, spike2sdf and hh-latency-estimator, which some may
+find useful.
 
-`--srcdir=DIR'
-     Look for the package's source code in directory DIR.  Usually
-     `configure' can determine that directory automatically.
+In order to use the Lua package, if you installed CNrun into a prefix
+other than /, you need to make sure LUA_CPATH contains the location of
+cnrun.so (e.g. export LUA_CPATH="/path/to/cnrun/?.so;$LUA_CPATH").  You
+should then be able to write 'require("cnrun")'.
 
-`configure' also accepts some other, not widely useful, options.  Run
-`configure --help' for more details.
+For further instructions, see doc/cnrun/examples/example1.lua.
 
+For the standard GNU autotools install instructions, please consult
+the original INSTALL file (commonly /usr/share/autoconf/INSTALL).
diff --git a/upstream/Makefile.am b/upstream/Makefile.am
index 163a08f..98fd85d 100644
--- a/upstream/Makefile.am
+++ b/upstream/Makefile.am
@@ -1,16 +1,18 @@
-ACLOCAL_AMFLAGS := -I m4
+ACLOCAL_AMFLAGS = -I m4
+include src/Common.mk
+
 SUBDIRS := src doc
 
 EXTRA_DIST = \
 	ChangeLog \
 	autogen.sh \
-	acinclude.m4
+	libcnrun.pc.in
 
-man_MANS = \
-	man/cnrun.1
 if DO_TOOLS
-man_MANS += \
-	man/varfold.1 \
+man_MANS = \
 	man/spike2sdf.1 \
 	man/hh-latency-estimator.1
 endif
+
+pkgconfigdir = $(libdir)/pkgconfig
+pkgconfig_DATA = libcnrun.pc
diff --git a/upstream/README b/upstream/README
index 378fbe6..2064512 100644
--- a/upstream/README
+++ b/upstream/README
@@ -1 +1 @@
-(refer to doc/README)
+Refer to doc/README, or visit http://johnhommer.com/academic/code/cnrun.
diff --git a/upstream/configure.ac b/upstream/configure.ac
index ccdd2f3..60a80cc 100644
--- a/upstream/configure.ac
+++ b/upstream/configure.ac
@@ -1,7 +1,7 @@
 AC_COPYRIGHT([Copyright (c) 2008-14 Andrei Zavada <johnhommer at gmail.com>])
 
-AC_INIT([cnrun], [1.1.15_rc], [johnhommer at gmail.com])
-AC_CONFIG_SRCDIR([src/cnrun/runner-main.cc])
+AC_INIT([cnrun], [2.0.0], [johnhommer at gmail.com])
+AC_CONFIG_SRCDIR([src/libcnrun/model.hh])
 AC_CONFIG_MACRO_DIR([m4])
 AC_PREREQ(2.61)
 
@@ -47,52 +47,45 @@ ac_cv_cxx_cpp11_features,
 ])
 AC_CXX_STDCPP11_FEATURES()
 test $ac_cv_cxx_cpp11_features = no && \
-   AC_MSG_ERROR([g++ >= 4.7 is required to build $PACKAGE as we must use -std=c++11 features your compiler doesn't seem to support], 1)
+   AC_MSG_ERROR([
+Your C++ compiler seems to not support some c++11 features\
+that we would rather like to have.  Please check config.log for details.
+], 1)
 cxx_version=`$CXX --version | head -n1`
 
 AC_OPENMP()
 
-AX_LIB_READLINE
-if test x"$ax_cv_lib_readline" = x"no"; then
-   echo "Required library readline not found"
-   AC_MSG_ERROR( [Missing readline], 2)
-fi
-
 PKG_CHECK_MODULES([LIBCN], [gsl libxml-2.0])
 
+AX_PROG_LUA([5.1], [5.3],)
+AX_LUA_LIBS
+AX_LUA_HEADERS
+dnl we cannot do strcmp in cpp, so here's bash to the rescue
+if test x"$LUA_VERSION" = x"5.1"; then
+   AC_DEFINE([HAVE_LUA_51], [], ["Do we have lua 5.1?"])
+else
+   AC_DEFINE([HAVE_LUA_52], [], ["Do we have lua 5.2?"])
+fi
 
 AC_ARG_ENABLE(
 	[tools],
-	AS_HELP_STRING( [--enable-tools], [build spike2sdf, varfold & hh-latency-estimator (default = yes)]),
-	[do_tools=$enableval], [do_tools=no])
+	AS_HELP_STRING( [--enable-tools], [build spike2sdf, varfold & hh-latency-estimator (default = no)]),
+	[do_tools=$enableval], [do_tools=yes])
 AM_CONDITIONAL(DO_TOOLS, [test x"$do_tools" = xyes])
 if test x"$do_tools" != xyes; then
    do_tools=no
 fi
 
-AC_ARG_ENABLE(
-	[pch],
-	[AS_HELP_STRING( [--enable-pch], [precompile headers (default = no)])],
-	[do_pch=$enable_pch],
-	[do_pch=no])
-dnl defaulting to no to enable make dist-check
-AM_CONDITIONAL(DO_PCH, test x$do_pch = xyes)
-
-
-AC_SUBST(user, [`whoami`@`hostname`])
-AC_SUBST(docdir, [${prefix}/share/doc/${PACKAGE_TARNAME}])
-
-
 AC_OUTPUT([
 	Makefile
+        libcnrun.pc
 	src/Makefile
 	src/libstilton/Makefile
-	src/libcn/Makefile
-	src/cnrun/Makefile
+	src/libcnrun/Makefile
+	src/lua-cnrun/Makefile
 	doc/Makefile
-	man/cnrun.1
+	doc/lua-api/Makefile
 	man/spike2sdf.1
-	man/varfold.1
 	man/hh-latency-estimator.1
 	src/tools/Makefile])
 
@@ -107,5 +100,4 @@ AC_MSG_RESULT([
 
    build tools:		${do_tools}
 
-   precompile headers:  $do_pch
 ])
diff --git a/upstream/doc/Makefile.am b/upstream/doc/Makefile.am
index 312471a..d4ef37c 100644
--- a/upstream/doc/Makefile.am
+++ b/upstream/doc/Makefile.am
@@ -1,22 +1,18 @@
-ACLOCAL_AMFLAGS = -I m4
+SUBDIRS = lua-api
 
 doc_DATA = \
 	README
 
+# override, else we will stomp over cnrun docdir from a 1.x
+# installation, which can and must exist in parallel
+docdir=${datarootdir}/doc/lua-cnrun
+
 examples_DATA = \
-	examples/ratiocoding/ORNa.x1000.in \
-	examples/ratiocoding/ORNb.x1000.in \
-	examples/ratiocoding/PN.0.sxf.target \
-	examples/ratiocoding/batch \
-	examples/ratiocoding/m.nml \
-	examples/ratiocoding/script
+	examples/example1.lua \
+	examples/m.nml
 
 examplesdir = $(docdir)/examples
 
-install-data-hook:
-	$(mkinstalldirs) $(DESTDIR)/$(examplesdir)
-
-
 EXTRA_DIST = \
 	$(examples_DATA) \
 	$(doc_DATA)
diff --git a/upstream/doc/README b/upstream/doc/README
index e1594fb..878da73 100644
--- a/upstream/doc/README
+++ b/upstream/doc/README
@@ -1,74 +1,22 @@
-CNrun
------
+CNrun is a neuronal network simulator, with the following features:
 
-1. Overview
-2. Usage
-3. Repeatability, rng-dependent behaviour
+* a conductance- and rate-based Hodgkin-Huxley neurons, a Rall and
+  Alpha-Beta synapses;
 
+* a 6-5 Runge-Kutta integration method: slow but precise, adjustable;
 
-1. Overview
+* Poisson, Van der Pol, Colpitts oscillators and interface for
+  external stimulation sources;
 
-This is a library (libcn) and a CLI (cnrun) tool to simulate neuronal
-networks, similar to NEURON and GENESIS except that neurons are
-non-compartmentalised, and there is no scripting language.  It is
-written by Andrei Zavada <johnhommer at gmail.com> building on the
-original work by Thomas Nowotny <tnowotny at sussex.ac.uk>.
+* NeuroML network topology import/export;
 
-CNrun reads network topology description from a NeuroML file (as, for
-example, generated by neuroConstruct), where the `cell_type' attribute
-determines the unit class.
+* logging state variables, spikes, for visualization with e.g. gnuplot;
 
-The following neuron and synapse classes are provided by libcn:
+* implemented as a Lua module, for scripting model behaviour (e.g.,
+  to enable plastic processes regulated by model state);
 
-  - HH         : Hodgkin-Huxley by Traub and Miles (1991)
-  - HHRate     : Rate-based model of the Hodgkin-Huxley neuron
-  - HH2        : Hodgkin-Huxley by Traub & Miles w/ K leakage
-  - DotPoisson : Duration-less spike Poisson oscillator
-  - Poisson    : Poisson oscillator
-  - DotPulse   : Dot Pulse generator
-  - NMap       : Map neuron
-  - LV         : Lotka-Volterra oscillator
-  - Colpitts   : Colpitts oscillator
-  - VdPol      : Van der Pol oscillator
-
-  - AB         : Alpha-Beta synapse (Destexhe, Mainen, Sejnowsky, 1994)
-  - ABMinus    : Alpha-Beta synapse w/out (1-S) term
-  - Rall       : Rall synapse (Rall, 1967)
-  - Map        : Map synapse
-
-Scripting support in CNrun includes commands for creating and
-populating a model, setting parameters for single units or groups
-selected based on regex matching.  Variables (‘a = 1; b = a + 2’) and
-arithmetic expressions (‘-’, ‘+’, ‘*’, ‘/’, ‘<’, ‘<=’, ‘>’, ‘>=’,
-‘==’, ‘()’) are supported as in C.
-
-
-2. Installation and prerequisites
-
-As a reasonably complex C++ piece of code, CNRun has many a loop with
-iterators.  Since gcc 4.4.4, the keyword auto has come as a great
-relief in this regard; versions of gcc prior to 4.4.4, therefore, will
-not compile CNRun.
-
-Cnrun depends on libreadline, libgsl, libxml2, whichever
-version is current at the time of release.
-
-
-3. Repeatability, rng-based behaviour
-
-Using rng facilities of the GNU Scientific Library, cnrun has the
-ability to specify the gsl rng type and set the seed via the
-environment variables GSL_RNG_TYPE and GSL_RNG_SEED, in which case
-reproducibility of CNrun results is guaranteed (as per gsl's statement
-that the generated series will).
-
-If you don't bother setting those env vars, seeding will be done with
-the current time (specifically, field .tv_usec of a struct timeval
-after a call to gettimeofday()).
-
-
-
-If you are interested in using libcn for your own projects, look at
-doc/example.cc, and perhaps at src/hh-latency-estimator.cc
-(all the code is there, and it's yours :).
+* interaction (topology push/pull, async connections) with other
+  cnrun models running elsewhere on a network, with interactions
+  (planned).
 
+There is an example1.lua in examples dir for a primer.
diff --git a/upstream/doc/examples/example1.lua b/upstream/doc/examples/example1.lua
new file mode 100644
index 0000000..2feca9f
--- /dev/null
+++ b/upstream/doc/examples/example1.lua
@@ -0,0 +1,204 @@
+-- This is an example illustrating how to use cnrun package in Lua.
+--
+-- 1. After loading the cnrun module with 'require', the first step is
+--    to get or create an interpreter context.  It is an opaque light
+--    user data object, which you will pass as the first argument to
+--    all subsequent calls to cnrun functions.
+--
+-- 2. You can create and keep multiple models in a context, modify and
+--    advance them independently. Models are identified by a label (a
+--    string).
+--
+-- 3. On error, all cnrun methods return two values: first a nil,
+--    second a string describing what went wrong.  On success, the
+--    first value is 1 (an integer), and the rest are method-specific.
+--
+-- 4. Don't lose the context object.  It will not be gabage-collected
+--    for you (it is a C++ thing).
+
+-- To execute this script with lua-5.1, do s/table.unpack/unpack/g.
+
+local M = require("cnrun")
+
+local res, ult, result
+local C, model
+
+M.dump_available_units ()
+
+res, ult = M.get_context ()
+if res == nil then
+   print (ult)
+   return
+end
+C = ult
+
+local mname = "FAFA"
+res, ult = M.new_model (C, mname)
+if res == nil then
+   print (ult)
+   return
+end
+model = ult
+print ("Created model")
+
+print ("Setting verbosely to 4")
+M.set_model_parameter (C, mname, "verbosely", 4)
+
+result = {M.list_models (C)}
+res, ult = result[1], {table.unpack(result, 2)}
+if res == nil then
+   print (ult)
+   return
+end
+print ()
+
+print ("Model(s):")
+local model_list = ult
+print (table.concat(model_list))
+print ()
+
+
+res, ult = M.import_nml (C, mname, "m.nml")
+if res == nil then
+   print (ult)
+   -- return
+end
+print ()
+
+
+print ("Host parmeters:")
+local parameters = {
+   "verbosely", "integration_dt_min",
+   "integration_dt_max", "integration_dt_cap",
+   "listen_dt", "listen_mode",
+   "sxf_start_delay", "sxf_period", "sdf_sigma"
+}
+local fmt = " %22s: %-q"
+for i,p in ipairs(parameters) do
+   res, ult = M.get_model_parameter (C, mname, p)
+   print (string.format (fmt, p, ult))
+end
+print ()
+
+res, ult = M.delete_model (C, "fafa moo")
+if res == nil then
+   print (ult .. " (ignored)")
+   -- return
+end
+
+
+result = {M.get_units_matching(C, mname, "L.*")}
+res, ult = result[1], {table.unpack(result, 2)}
+if res == nil then
+   print (ult)
+   return
+end
+print ()
+print ("There are " .. #ult .. " unit(s) matching L.*:")
+local unit_list = ult
+local fmt = " %-10s %-16s %-16s %-12s %-16s %-6s"
+print (string.format(
+          fmt,
+          "label", "class", "family", "species", "has_sources", "is_altered"))
+print (string.rep('-', 87))
+for _, u in ipairs(unit_list) do
+   result = {M.get_unit_properties (C, mname, u)}
+   res, ult = result[1], {table.unpack(result, 2)}
+   local b = function (x) if x then return "yes" else return "no" end end
+   print (string.format(
+             fmt,
+             ult[1], ult[2], ult[3], ult[4], b(ult[5]), b(ult[6])))
+end
+print()
+
+
+print ("Advancing 10 sec:")
+res, ult = M.advance (C, mname, 10000)
+
+
+print ("Modify parameter:")
+local u, p, v0, v9, vr = "LNz.0", "gNa"
+_, ult = M.get_unit_parameter (C, mname, u, p)
+v0 = ult
+_, ult = M.set_unit_parameter (C, mname, u, p, v0 * 2)
+_, ult = M.get_unit_parameter (C, mname, u, p)
+v9 = ult
+-- with a revert
+res, ult = M.revert_matching_unit_parameters (C, mname, u)
+if res == nil then
+   print (ult)
+   return
+end
+local count_reset = ult
+_, ult = M.get_unit_parameter (C, mname, u, p)
+vr = ult
+print (string.format(
+          ".. changed %s of %s from %g to %g, then reset (%d affected) to %g\n",
+          p, u, v0, v9, count_reset, vr))
+
+
+print ("Modify parameter in bulk:")
+local us, ut, gsyn = "LNz.0", "LN1.0"
+_, ult = M.set_matching_synapse_parameter (C, mname, us, ut, "gsyn", 4.2)
+if res == nil then
+   print (ult)
+   return
+end
+print (string.format(
+          ".. changed gsyn of synapse connecting %s to %s, to %g\n",
+          us, ut, 4.2))
+
+res, ult = M.describe_model (C, mname)
+
+
+print ("State variables:")
+for i = 1, 6, 1 do
+   M.advance (C, mname, 1000)
+   result = {M.get_unit_vars (C, mname, "LNz.0")}
+   res, ult = result[1], {table.unpack(result, 2)}
+   print (table.concat(ult, '; '))
+end
+print()
+
+
+local affected, remaining
+print ("Putout:")
+-- there is unit_list already:
+math.randomseed(os.time())
+local deleting = unit_list[math.random(1, #unit_list)]
+-- deleting, _ = string.gsub(deleting, ".", "\\.")
+res, ult = M.putout (C, mname, deleting)
+if res == nil then
+   print (ult)
+   return
+end
+print (string.format(".. deleted unit %s", deleting))
+print()
+
+print ("Decimate:")
+res, ult = M.decimate (C, mname, "L.*", 0.3)
+if res == nil then
+   print (nil)
+   return
+end
+affected, remaining = ult
+remaining = #{M.get_units_matching (C, mname, ".*")} - 1
+print (string.format(
+          ".. %d units gone, %d remaining",
+          affected, remaining))
+print()
+
+
+res, ult = M.delete_model (C, mname)
+if res == nil then
+   print ("Error: Failed to delete model: ", ult)
+   return
+end
+print ("Model ".. ult .. " deleted")
+
+res, ult = M.drop_context (C)
+if res == nil then
+   print ("Error: Failed to drop context: ", ult)
+   return
+end
+print ("Context dropped: " .. ult)
diff --git a/upstream/doc/examples/ratiocoding/m.nml b/upstream/doc/examples/m.nml
similarity index 100%
rename from upstream/doc/examples/ratiocoding/m.nml
rename to upstream/doc/examples/m.nml
diff --git a/upstream/doc/examples/ratiocoding/ORNa.x1000.in b/upstream/doc/examples/ratiocoding/ORNa.x1000.in
deleted file mode 100644
index b70eeaa..0000000
--- a/upstream/doc/examples/ratiocoding/ORNa.x1000.in
+++ /dev/null
@@ -1,112 +0,0 @@
-125
-0 
-0 0  10 10
-0 0  10 10
-0 0  10 10
-0 0  10 10
-0 0  10 10
-0 0  10 10
-0 0  10 10
-0 0  10 10
-0 0  10 10
-0 0  10 10
-
-0 0  13 13
-0 0  13 13
-0 0  13 13
-0 0  13 13
-0 0  13 13
-0 0  13 13
-0 0  13 13
-0 0  13 13
-0 0  13 13
-0 0  13 13
-
-0 0  16.9 16.9
-0 0  16.9 16.9
-0 0  16.9 16.9
-0 0  16.9 16.9
-0 0  16.9 16.9
-0 0  16.9 16.9
-0 0  16.9 16.9
-0 0  16.9 16.9
-0 0  16.9 16.9
-0 0  16.9 16.9
-
-0 0  21.97 21.97
-0 0  21.97 21.97
-0 0  21.97 21.97
-0 0  21.97 21.97
-0 0  21.97 21.97
-0 0  21.97 21.97
-0 0  21.97 21.97
-0 0  21.97 21.97
-0 0  21.97 21.97
-0 0  21.97 21.97
-
-0 0  28.561 28.561
-0 0  28.561 28.561
-0 0  28.561 28.561
-0 0  28.561 28.561
-0 0  28.561 28.561
-0 0  28.561 28.561
-0 0  28.561 28.561
-0 0  28.561 28.561
-0 0  28.561 28.561
-0 0  28.561 28.561
-
-0 0  37.1293 37.1293
-0 0  37.1293 37.1293
-0 0  37.1293 37.1293
-0 0  37.1293 37.1293
-0 0  37.1293 37.1293
-0 0  37.1293 37.1293
-0 0  37.1293 37.1293
-0 0  37.1293 37.1293
-0 0  37.1293 37.1293
-0 0  37.1293 37.1293
-
-0 0  48.2681 48.2681
-0 0  48.2681 48.2681
-0 0  48.2681 48.2681
-0 0  48.2681 48.2681
-0 0  48.2681 48.2681
-0 0  48.2681 48.2681
-0 0  48.2681 48.2681
-0 0  48.2681 48.2681
-0 0  48.2681 48.2681
-0 0  48.2681 48.2681
-
-0 0  62.7485 62.7485
-0 0  62.7485 62.7485
-0 0  62.7485 62.7485
-0 0  62.7485 62.7485
-0 0  62.7485 62.7485
-0 0  62.7485 62.7485
-0 0  62.7485 62.7485
-0 0  62.7485 62.7485
-0 0  62.7485 62.7485
-0 0  62.7485 62.7485
-
-0 0  81.5731 81.5731
-0 0  81.5731 81.5731
-0 0  81.5731 81.5731
-0 0  81.5731 81.5731
-0 0  81.5731 81.5731
-0 0  81.5731 81.5731
-0 0  81.5731 81.5731
-0 0  81.5731 81.5731
-0 0  81.5731 81.5731
-0 0  81.5731 81.5731
-
-0 0  106.045 106.045
-0 0  106.045 106.045
-0 0  106.045 106.045
-0 0  106.045 106.045
-0 0  106.045 106.045
-0 0  106.045 106.045
-0 0  106.045 106.045
-0 0  106.045 106.045
-0 0  106.045 106.045
-0 0  106.045 106.045
-
diff --git a/upstream/doc/examples/ratiocoding/ORNb.x1000.in b/upstream/doc/examples/ratiocoding/ORNb.x1000.in
deleted file mode 100644
index f1282e2..0000000
--- a/upstream/doc/examples/ratiocoding/ORNb.x1000.in
+++ /dev/null
@@ -1,112 +0,0 @@
-125
-0 
-0 0  10 10
-0 0  13 13
-0 0  16.9 16.9
-0 0  21.97 21.97
-0 0  28.561 28.561
-0 0  37.1293 37.1293
-0 0  48.2681 48.2681
-0 0  62.7485 62.7485
-0 0  81.5731 81.5731
-0 0  106.045 106.045
-
-0 0  10 10
-0 0  13 13
-0 0  16.9 16.9
-0 0  21.97 21.97
-0 0  28.561 28.561
-0 0  37.1293 37.1293
-0 0  48.2681 48.2681
-0 0  62.7485 62.7485
-0 0  81.5731 81.5731
-0 0  106.045 106.045
-
-0 0  10 10
-0 0  13 13
-0 0  16.9 16.9
-0 0  21.97 21.97
-0 0  28.561 28.561
-0 0  37.1293 37.1293
-0 0  48.2681 48.2681
-0 0  62.7485 62.7485
-0 0  81.5731 81.5731
-0 0  106.045 106.045
-
-0 0  10 10
-0 0  13 13
-0 0  16.9 16.9
-0 0  21.97 21.97
-0 0  28.561 28.561
-0 0  37.1293 37.1293
-0 0  48.2681 48.2681
-0 0  62.7485 62.7485
-0 0  81.5731 81.5731
-0 0  106.045 106.045
-
-0 0  10 10
-0 0  13 13
-0 0  16.9 16.9
-0 0  21.97 21.97
-0 0  28.561 28.561
-0 0  37.1293 37.1293
-0 0  48.2681 48.2681
-0 0  62.7485 62.7485
-0 0  81.5731 81.5731
-0 0  106.045 106.045
-
-0 0  10 10
-0 0  13 13
-0 0  16.9 16.9
-0 0  21.97 21.97
-0 0  28.561 28.561
-0 0  37.1293 37.1293
-0 0  48.2681 48.2681
-0 0  62.7485 62.7485
-0 0  81.5731 81.5731
-0 0  106.045 106.045
-
-0 0  10 10
-0 0  13 13
-0 0  16.9 16.9
-0 0  21.97 21.97
-0 0  28.561 28.561
-0 0  37.1293 37.1293
-0 0  48.2681 48.2681
-0 0  62.7485 62.7485
-0 0  81.5731 81.5731
-0 0  106.045 106.045
-
-0 0  10 10
-0 0  13 13
-0 0  16.9 16.9
-0 0  21.97 21.97
-0 0  28.561 28.561
-0 0  37.1293 37.1293
-0 0  48.2681 48.2681
-0 0  62.7485 62.7485
-0 0  81.5731 81.5731
-0 0  106.045 106.045
-
-0 0  10 10
-0 0  13 13
-0 0  16.9 16.9
-0 0  21.97 21.97
-0 0  28.561 28.561
-0 0  37.1293 37.1293
-0 0  48.2681 48.2681
-0 0  62.7485 62.7485
-0 0  81.5731 81.5731
-0 0  106.045 106.045
-
-0 0  10 10
-0 0  13 13
-0 0  16.9 16.9
-0 0  21.97 21.97
-0 0  28.561 28.561
-0 0  37.1293 37.1293
-0 0  48.2681 48.2681
-0 0  62.7485 62.7485
-0 0  81.5731 81.5731
-0 0  106.045 106.045
-
diff --git a/upstream/doc/examples/ratiocoding/PN.0.sxf.target b/upstream/doc/examples/ratiocoding/PN.0.sxf.target
deleted file mode 100644
index 3d86f48..0000000
--- a/upstream/doc/examples/ratiocoding/PN.0.sxf.target
+++ /dev/null
@@ -1,10 +0,0 @@
- 1.68000000e+01 1.62910659e+00 -6.76042467e+00 -7.19703816e+00 -7.19999730e+00 -7.20000000e+00 -7.20000000e+00 -7.20000000e+00 -7.20000000e+00 -7.20000000e+00
- 1.62910659e+00 1.68000000e+01 1.62910659e+00 -6.76042467e+00 -7.19703816e+00 -7.19999730e+00 -7.20000000e+00 -7.20000000e+00 -7.20000000e+00 -7.20000000e+00
- -6.76042467e+00 1.62910659e+00 1.68000000e+01 1.62910659e+00 -6.76042467e+00 -7.19703816e+00 -7.19999730e+00 -7.20000000e+00 -7.20000000e+00 -7.20000000e+00
- -7.19703816e+00 -6.76042467e+00 1.62910659e+00 1.68000000e+01 1.62910659e+00 -6.76042467e+00 -7.19703816e+00 -7.19999730e+00 -7.20000000e+00 -7.20000000e+00
- -7.19999730e+00 -7.19703816e+00 -6.76042467e+00 1.62910659e+00 1.68000000e+01 1.62910659e+00 -6.76042467e+00 -7.19703816e+00 -7.19999730e+00 -7.20000000e+00
- -7.20000000e+00 -7.19999730e+00 -7.19703816e+00 -6.76042467e+00 1.62910659e+00 1.68000000e+01 1.62910659e+00 -6.76042467e+00 -7.19703816e+00 -7.19999730e+00
- -7.20000000e+00 -7.20000000e+00 -7.19999730e+00 -7.19703816e+00 -6.76042467e+00 1.62910659e+00 1.68000000e+01 1.62910659e+00 -6.76042467e+00 -7.19703816e+00
- -7.20000000e+00 -7.20000000e+00 -7.20000000e+00 -7.19999730e+00 -7.19703816e+00 -6.76042467e+00 1.62910659e+00 1.68000000e+01 1.62910659e+00 -6.76042467e+00
- -7.20000000e+00 -7.20000000e+00 -7.20000000e+00 -7.20000000e+00 -7.19999730e+00 -7.19703816e+00 -6.76042467e+00 1.62910659e+00 1.68000000e+01 1.62910659e+00
- -7.20000000e+00 -7.20000000e+00 -7.20000000e+00 -7.20000000e+00 -7.20000000e+00 -7.19999730e+00 -7.19703816e+00 -6.76042467e+00 1.62910659e+00 1.68000000e+01
diff --git a/upstream/doc/examples/ratiocoding/batch b/upstream/doc/examples/ratiocoding/batch
deleted file mode 100755
index 29b669d..0000000
--- a/upstream/doc/examples/ratiocoding/batch
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-
-totalruns=$1
-m=$2
-
-function run_in_dir()
-{
-    mkdir -p $1 && cd "$1"
-    echo
-    echo "----------------- running in $1"
-    rm -f CFs *sxf*
-    local i
-    for ((i = 0; i < totalruns; ++i)); do
-        echo "run $((i+1)) of $totalruns"
-        cnrun -Dfi=$2 -Dft=$3 -Dfo=$4 -e ../script -v1 -tT.05 && \
-            varfold  -x10 -x10 -G.. -Vweight -om PN.0 && \
-            mv PN.0.sxf.mx $i.PN.0.sxf.mx && \
-            mv PN.0.sxf $i.PN.0.sxf && \
-            cat <PN.0.CF >>CFs
-    done
-
-    varfold -x10 -x10 -om -zavg -t- -UAVERAGE *.PN.0.sxf.mx
-    cp AVERAGE.mx ../AVERAGE.$1.mx
-
-    cd ..
-}
-
-run_in_dir "___" 1   1   1
-run_in_dir "__o" 1   1  $m
-run_in_dir "_o_" 1  $m   1
-run_in_dir "_oo" 1  $m  $m
-run_in_dir "o__" $m  1   1
-run_in_dir "o_o" $m  1  $m
-run_in_dir "oo_" $m  $m  1
diff --git a/upstream/doc/examples/ratiocoding/rational-plot-sdf-interactive b/upstream/doc/examples/ratiocoding/rational-plot-sdf-interactive
deleted file mode 100755
index b1652ed..0000000
--- a/upstream/doc/examples/ratiocoding/rational-plot-sdf-interactive
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-D=`dirname $1`
-T=`basename $1`
-
-cd "$D"
-CF=$(<CF)
-
-DESC=${D##*/}
-CASE=${DESC%%_*}
-PARAMS="Params: "${DESC#$CASE"_"}
-
-gnuplot -persist <<EOF
-
-set title "$CASE"
-set key off
-
-set samples 40
-set isosample 20
-
-set cbrange [0:10]
-
-set hidden3d
-set pm3d
-
-set label "$PARAMS" at character 1,3
-set label "CF = $CF" at character 1,1
-
-splot "$T" matrix with dots
-
-EOF
diff --git a/upstream/doc/examples/ratiocoding/rational-plot-sdf-static b/upstream/doc/examples/ratiocoding/rational-plot-sdf-static
deleted file mode 100755
index 4574a85..0000000
--- a/upstream/doc/examples/ratiocoding/rational-plot-sdf-static
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-
-D=`dirname $1`
-T=`basename $1`
-
-cd "$D"
-#CF=$(<CF)
-
-DESC=${D##*/}
-CASE=$T
-#CASE=${DESC%%_*}
-PARAMS="Params: "${DESC#$CASE"_"}
-
-gnuplot -persist <<EOF
-
-set title "$CASE"
-set key off
-
-set cbrange [0:12]
-
-#set label "$PARAMS" at character 1,3
-#set label "CF = $CF" at character 1,1
-
-unset xtics
-unset ytics
-
-plot "$T" matrix with image
-
-EOF
diff --git a/upstream/doc/examples/ratiocoding/rational-plot-var b/upstream/doc/examples/ratiocoding/rational-plot-var
deleted file mode 100755
index 825bb5e..0000000
--- a/upstream/doc/examples/ratiocoding/rational-plot-var
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-
-CSIZE=500
-
-D=`dirname $1`
-T=`basename $1`
-
-GNUPLOTARGS=
-for F in $*; do
-    FTITLE=`basename $F`
-    FTITLE=${FTITLE%.var}
-    FTITLE=${FTITLE%.varx}
-    if file "$1" | grep data &>/dev/null; then
-	FSPEC="binary format=\"%lf%lf\""
-    else
-	FSPEC="using 1:2"
-    fi
-    GNUPLOTARGS+="\"$F\" $FSPEC title \"$FTITLE\" with lines,"
-done
-GNUPLOTARGS=${GNUPLOTARGS%,}
-echo $GNUPLOTARGS
-
-DESC=${D##*/}
-
-gnuplot -persist <<EOF
-
-set title "$DESC"
-
-set xrange [0:5000]
-set xtics 0,$((CSIZE*10))
-set mxtics 10
-plot $GNUPLOTARGS
-
-EOF
diff --git a/upstream/doc/examples/ratiocoding/script b/upstream/doc/examples/ratiocoding/script
deleted file mode 100644
index a7ec47a..0000000
--- a/upstream/doc/examples/ratiocoding/script
+++ /dev/null
@@ -1,58 +0,0 @@
-load_nml ../m.nml
-
-new_source Periodic aabb ../ORNa.x1000.in
-new_source Periodic abab ../ORNb.x1000.in
-connect_source aabb ORNa\.0 lambda
-connect_source abab ORNb\.0 lambda
-
-# .0103 and .0074 come from annealing
-set_parm_synapse	ORN.* LN[12]\..*	gsyn	0.110/1000
-set_parm_synapse	ORN.* LNz\..*		gsyn	0.073/1000
-
-set_parm_synapse	ORN.* LN.*	alpha	.4 # .27785
-set_parm_synapse	ORN.* LN.*	beta	.3
-set_parm_synapse	ORN.* LN.*	trel	3
-
-set_parm_synapse	LN.*	LN.*		Esyn	-80
-
-gi = 0.499/1
-gt = 0.389/1
-go = 0.386/1
-#gi = 0.18
-#gt = 0.18
-#go = 0.18
-
-# inbound
-set_parm_synapse	LN[12]\..* LNz\..*	gsyn	gi * fi
-# transverse
-set_parm_synapse	LN[12]\..* LN[21]\..*	gsyn	gt * ft
-# outbound
-set_parm_synapse	LNz\..* LN[12]\..*	gsyn	go * fo
-
-set_parm_synapse	LN.* LN.*		alpha	.1
-set_parm_synapse	LN.* LN.*		beta	.05
-set_parm_synapse	LN.* LN.*		trel	5
-
-set_parm_synapse	LNz\..*	PNi\..*		Esyn	-80
-set_parm_synapse	LNz\..* PNi\..*		gsyn	0.0060
-set_parm_synapse	LNz\..* PNi\..*		alpha	.2
-set_parm_synapse	LNz\..* PNi\..*		beta	.05
-set_parm_synapse	LNz\..* PNi\..*		trel	25
-
-set_parm_synapse	PNi\..* PN\..*		Esyn	-80
-set_parm_synapse	PNi\..* PN\..*		gsyn	0.02
-set_parm_synapse	PNi\..* PN\..*		alpha	.2
-set_parm_synapse	PNi\..* PN\..*		beta	.05
-
-# set up oscillations
-set_parm_neuron		PNi?\..*		Idc	.1
-
-
-sxf_params 625:500:400
-start_log_spikes PN\.0
-
-listen_mode b+
-start_listen	LNz\..
-start_listen	PN\..
-
-advance	50000+250
diff --git a/upstream/doc/lua-api/.gitignore b/upstream/doc/lua-api/.gitignore
new file mode 100644
index 0000000..fa63357
--- /dev/null
+++ b/upstream/doc/lua-api/.gitignore
@@ -0,0 +1,6 @@
+cnrun-lua-api.info
+cnrun-lua-api.html/
+mdate-sh
+stamp-vti
+texinfo.tex
+version.texi
diff --git a/upstream/doc/lua-api/Makefile.am b/upstream/doc/lua-api/Makefile.am
new file mode 100644
index 0000000..0fbb8a5
--- /dev/null
+++ b/upstream/doc/lua-api/Makefile.am
@@ -0,0 +1,4 @@
+info_TEXINFOS = \
+	cnrun-lua-api.texi
+
+all-local: html info
diff --git a/upstream/doc/lua-api/cnrun-lua-api.texi b/upstream/doc/lua-api/cnrun-lua-api.texi
new file mode 100644
index 0000000..6e76736
--- /dev/null
+++ b/upstream/doc/lua-api/cnrun-lua-api.texi
@@ -0,0 +1,533 @@
+\input texinfo @c -*-texinfo-*-
+ at c %**start of header
+ at setfilename cnrun-lua-api.info
+ at settitle CNrun Lua API
+ at c %**end of header
+
+ at include version.texi
+
+ at dircategory Libraries
+ at direntry
+* CNrun Lua API: (cnrun-lua).        CNrun API in Lua.
+ at end direntry
+
+ at copying
+
+Copyright @copyright{} 2014 Andrei Zavada @email{johnhommer@@gmail.com}.
+
+The files representing this documentation set are part of CNrun project,
+and covered by GPL-2+.
+ at end copying
+
+ at titlepage
+ at title CNrun Lua API
+ at subtitle version @value{VERSION}
+ at subtitle @value{UPDATED}
+ at author Andrei Zavada
+
+ at page
+ at vskip 0pt plus 1filll
+ at insertcopying
+ at end titlepage
+
+ at contents
+
+ at ifnottex
+ at node Top
+ at top CNrun Lua API
+ at comment  node-name,  next,  previous,  up
+ at insertcopying
+
+This file documents the CNrun functions exposed in Lua.
+ at end ifnottex
+
+ at c The master menu, created with texinfo-master-menu, goes here.
+
+ at menu
+* Introduction::  CNrun is a neuronal network model simulator, with
+  scripting done in Lua.
+* General notes::  Loading cnrun module in Lua; how errors are reported.
+* Interpreter context::  CNrun interpreter context needs to be created
+  first.
+* Models::  Operations on neuronal network models: create, populate,
+  simulate, etc.
+* Individual units::  View/modify individual units' parameters and
+  state variables.
+* External excitation sources::  External excitation sources.
+* Sampling state variables::  Ways to assess model state and behaviour.
+* Unit species::  A table of all available built-in units.
+* Planned features::  There are some, although only time permitting.
+* Index::  All functions listed alphabetically.
+ at end menu
+
+ at node Introduction
+ at chapter Introduction
+
+CNrun is a slow but precise neuronal network model simulator written in
+C++, for which functions are exposed in the Lua scripting language.
+These functions are described in this document.
+
+In the present version (2.x), CNrun core is made into a shared library,
+in contrast to CNrun 1.x which had it as a single executable
+interpreting its own, very simple scripts.  In order to enable a more
+competent scripting, with interesting possibilities such as network
+plastic processes regulated by some model activity (excitation levels,
+spike patterns, etc), wrappers are provided to call core functions from
+Lua.
+
+In the simplest case where you have a NeuroML-defined topology, a
+simulation session could be as brief as this:
+
+ at example
+local M = require("cnrun")
+_, C = M.get_context()
+_, M = M.new_model (C, "fafa")
+M.import_nml (C, "fafa", "model.nml")
+M.advance (C, "fafa", 1000)
+ at end example
+
+ at noindent
+This snippet will create an interpreter context, create a model in it,
+load an NML file, and advance the model one second.
+
+To report a bug or request a wishlist item, go to @url{http://github.com/hmmr/cnrun}.
+
+ at node General notes
+ at chapter General notes
+
+ at section Preparations
+ All functions are made available in @code{cnrun} module namespace, by
+ means of standard @code{require}.  Thus, @code{local N = require("cnrun"); M.some_function(args)}.
+
+ at section Returned arguments
+ On error, all functions return two arguments: first a @code{nil},
+ and second, an error message describing what went wrong (a string).
+
+ On success, the first returned argument will be 1 (integer), followed
+ by one or more values specifically described in the following sections.
+ Unless stated otherwise, functions which have nothing meaningful to
+ return, on success return @code{1, model_name}.
+
+ at node Interpreter context
+ at chapter Interpreter context
+
+In Lua, after loading the cnrun module with @code{require("cnrun")},
+the first step is to get an interpreter context.  It is an opaque light
+user data object, which you should pass as the first argument to all
+subsequent calls to CNrun functions.
+
+You can create and keep multiple models in a context, modify and advance
+them independently.
+
+The function to create a CNrun context is @code{get_context()}:
+
+ at defun get_context ()
+  Create a CNrun interpreter context, in which all subsequent operations
+  will be performed.
+
+  On success, returns the newly created context object @var{C} as the
+  second argument.
+ at end defun
+
+ at defun drop_context (C)
+  Drop the interpreter context @var{C}, previously obtained with
+  @emph{get_context()}.
+ at end defun
+
+In the following sections, context is passed as the first (or only)
+argument to all functions.  It is denoted as @var{C} and not described
+each time.
+
+ at node Models
+ at chapter Models
+
+Multiple models can be created, accessed, modified, advanced within a
+single interpreter context.  Models are identified by a label (a string).
+
+ at section Creating and deleting models
+
+ at defun new_model (C, M)
+  Create a model named @var{M} (model label).
+ at end defun
+
+ at defun delete_model (C, M)
+  Delete model @var{M}.
+ at end defun
+
+ at defun list_models (C)
+  List models existing in context @var{C}, returned as strings.
+ at end defun
+
+ at section Populating models
+
+Models can be populated by constituent neurons and synapses in two ways:
+ at enumerate
+ at item importing topology from a file (@code{import_nml()});
+ at item adding individual units one by one (@code{new_neuron()}, @code{new_synapse()}).
+ at end enumerate
+
+ at defun import_nml (C, M, file_name)
+  Import network topology from a file (@var{file_name}) into a model
+  named @var{M}.
+ at end defun
+
+ at defun export_nml (C, M, file_name)
+  Export network topology of model @var{M} into file @var{file_name}.
+ at end defun
+
+ at defun new_neuron (C, M, type, label)
+  Create a neuron of type @var{type}, with this @var{label}, in model
+  @var{M}.
+ at end defun
+
+ at defun new_synapse (C, M, type, source, target)
+  Create a synapse of this @var{type} connecting neurons labelled
+  @var{source} and @var{target}.
+ at end defun
+
+ at section Other operations on models as a whole
+
+ at defun reset_model (C, M)
+  Reset the state of all units, rewind all periodic sources and flush
+  and close any logs in model @var{M}.
+ at end defun
+
+ at defun cull_deaf_synapses (C, M)
+  Remove all synapses with a zero @var{gsyn}, in model @var{M}.  This
+  makes sense unless you are going to modify @var{gsyn} at a later time.
+ at end defun
+
+ at defun describe_model (C, M)
+  Describe model @var{M}.  The output will be printed to stdout and look
+  like this:
+ at verbatim
+Model "FAFA":
+     13 units total (7 Neurons, 6 Synapses):
+       11 hosted,
+        2 standalone
+        0 discrete dt-bound
+      0 Listening units
+      0 Spikelogging neurons
+      0 Units being tuned continuously
+      0 Units being tuned periodically
+      2 Spontaneously firing neurons
+      2 Multiplexing synapses
+     26 vars on integration vector
+ at end verbatim
+ at end defun
+
+ at defun advance (C, M, duration)
+  Run simulation in model @var{M} for @var{duration} milliseconds.
+ at end defun
+
+ at defun advance_until (C, M, time)
+  Run simulation in model @var{M} until point in time @var{time}.
+
+  Note that the real eventual model time after this function has
+  returned may be a little (less than the last @var{dt}) greater than
+  expected.
+ at end defun
+
+
+ at section Model parameters
+
+Each model has the following parameters that affect its behaviour:
+
+ at table @emph
+ at item verbosely
+Level of verbosity of printed messages (integer, 0 up to 6).
+
+ at item integration_dt_min
+Lower bound for @var{dt} (float).
+
+ at item integration_dt_max
+Upper bound for @var{dt} (float).
+
+ at item integration_dt_cap
+Maximal factor by which @var{dt} can be allowed to increase in
+consecutive iterations (float).
+
+ at item listen_dt
+A time increment between consecutive sampling and logging of state
+variables (float).
+
+ at item listen_mode
+A string of symbols defining unit `listening' mode, of the form
+ at emph{x}@{@emph{-}@}, where @emph{x} indicates the mode and @emph{-},
+whether to disable that mode (if given, else enable).  There are three
+modes: @var{1}, whether to log the first state variable only, or all
+unit vars; @var{d}, whether to defer writing until end of simulation;
+and @var{b}, whether to write FP values in native machine representation
+instead of @code{"%g"}.
+
+ at item sxf_start_delay
+Length of time, before and after sampling point, limiting the extent of
+counting spikes for sdf/sxf evaluation (float).  Leave at 0 to count
+all spikes from 0 until current model time; a couple of seconds should
+be good for reasonable accuracy.
+
+ at item sxf_period
+Sampling period for sdf and shf (spike density and spike heterogeneity)
+functions.
+
+ at item sdf_sigma
+Parameter @var{sigma} in sdf (float).
+ at end table
+
+ at defun get_model_parameter (C, M, P)
+  Get a model parameter @var{P}, one of those listed above.
+ at end defun
+
+ at defun set_model_parameter (C, M, P, V)
+  Set a model parameter @var{P} to value @var{V}.
+ at end defun
+
+
+ at node Individual units
+ at chapter Individulal unit identification, properties and parameters
+
+Units populating a model are uniquely identified by their label, set at
+creation time.  Where a single unit needs to be selected for a function,
+the corresponding argument to that function is designated @var{L}.  Or,
+if an operation is supposed to affect many units, selected by a regex
+pattern on their labels, that argument is designated @var{R}.
+
+Apart from the (arbitrary) label, units are classified as belonging to
+either Neuron or Synapse class, further belonging to a certain family
+and species.  These categories are built-in, each species defining a
+set of parameters and state variables.
+
+All available unit species are listed in @ref{Unit species}.
+
+ at defun get_unit_properties (C, M, L)
+  Return the following attributes and properties of unit @var{L}, in
+  order: @emph{label}, @emph{class_name}, @emph{family}, @emph{species},
+  as strings, followed by flags @emph{has_sources} and
+  @emph{is_not_altered}.
+ at end defun
+
+ at defun get_unit_parameter (C, M, L, P)
+  Get the value of unit @var{L}'s parameter @var{P}.
+ at end defun
+
+ at defun set_unit_parameter (C, M, L, P, V)
+  Set unit @var{L}'s parameter @var{P} to a value of @var{V}.
+ at end defun
+
+ at defun get_unit_vars (C, M, L)
+  Get the values of all state variables of unit @var{L}, returned as
+  floats in the order they are listed in @ref{Unit species} table.
+ at end defun
+
+ at defun reset_unit (C, M, L)
+  Reset all state variables of unit @var{L}.
+ at end defun
+
+ at defun get_units_matching (C, M, R)
+  Return all units with labels matching regex @var{R}.
+ at end defun
+
+ at defun get_units_of_type (C, M, sp)
+  Return all units of a species @var{sp}.
+ at end defun
+
+ at defun set_matching_neuron_parameter (C, M, R, P, V)
+  Set the value of parameter @var{P} to @var{V} in all neurons labelled
+  matching regex @var{R}.
+ at end defun
+
+ at defun set_matching_synapse_parameter (C, M, Rs, Rt, P, V)
+  Set the value of parameter @var{P} to @var{V} in all synapses
+  connecting, resp., any neurons labelled matching regexes @var{Rs} and
+  @var{Rt}.
+ at end defun
+
+ at defun revert_matching_unit_parameters (C, M, R)
+  Revert to defaults all parameters of units labelled matching regex
+  @var{R}.
+ at end defun
+
+ at defun decimate (C, M, R, frac)
+  Delete a random @var{frac} of all units matching regex @var{R}.
+ at end defun
+
+ at defun putout (C, M, R)
+  Delete all units matching regex @var{R}.
+ at end defun
+
+
+ at node External excitation sources
+ at chapter External excitation sources
+
+CNrun provides for three types of external stimulation sources:
+
+ at itemize @bullet
+ at item @emph{Tape},
+ with all possible values defined in sequence, with timestamps, in a
+ file, optionally looping.
+ at item @emph{Periodic},
+ with a sequence of values defined to occur at regular intervals within
+ a specified period.
+ at item @emph{Noise},
+ a continuous sampling from a uniform or gaussian distribution.
+ at end itemize
+
+ at defun new_tape_source (C, M, source_name, file_name, looping)
+  Set up a new tape source named @var{source_name}, from data in file
+  @var{file_name}.
+ at end defun
+
+ at defun new_periodic_source (C, M, source_name, file_name, looping, period)
+  Set up a new periodic source named @var{source_name}, from data in file
+  @var{file_name}, optionally @var{looping} over a @var{period} (stuck
+  at the last value, if not).
+ at end defun
+
+ at defun new_noise_source (C, M, source_name, min, max, sigma, distribution)
+  Set up a new noise source named @var{source_name}, of a given
+  @var{distribution} (possible values are @emph{"uniform"} and
+  @emph{"gaussian"}), with given @var{min}, @var{max}, and (for the
+  gaussian) @var{sigma}.
+ at end defun
+
+ at defun get_sources (C, M)
+  Get all sources created in the model, returning labels as strings.
+ at end defun
+
+ at defun connect_source (C, M, L, P, source_name)
+  Connect source @var{source_name} to parameter @var{P} of unit @var{L}.
+ at end defun
+
+ at defun disconnect_source (C, M, L, P, source_name)
+  Disconnect a previously connected source @var{source_name} from
+  parameter @var{P} of unit @var{L}.
+ at end defun
+
+
+ at node Sampling state variables
+ at chapter Sampling state variables
+
+In addition to direct access to unit state variables in Lua (see
+ at code{get_unit_parameter()}), there are two ways to record unit state
+for offline assessment:
+
+ at itemize @bullet
+
+ at item Have units write their state variable(s) to logs (created in the
+current directory with unit labels with a suffix ``.vars'' as names);
+
+ at item Have neurons record the times of spikes (written to files
+similarly named except with suffix ``.spikes'').
+
+ at end itemize
+
+ at defun start_listen (C, M, R)
+  Enable logging of state variables (with options as defined in model
+  parameter @var{listen_mode} @ref{Models}) in all units
+  labelled matching regex @var{R}.  Return the count of units affected.
+ at end defun
+
+ at defun stop_listen (C, M, R)
+  Disable logging of state variables in all units labelled matching
+  regex @var{R}.  Units writing logs will flush and close.  Return
+  the count of units affected.
+ at end defun
+
+ at defun start_log_spikes (C, M, R)
+  Enable logging of spike times in all neurons labelled matching regex
+  @var{R}.  Return the count of units affected.
+ at end defun
+
+ at defun stop_log_spikes (C, M, R)
+  Disable logging spikes in all units labelled matching regex @var{R}.
+  Return the count of units affected.
+ at end defun
+
+
+ at node Unit species
+ at chapter Unit species
+
+Given below are some unit species available for your models.  For a
+complete list, with parameter standard values and descriptions, see the
+output of @code{dump_available_units()}.
+
+ at section Neuron species
+ at multitable @columnfractions .1 .25 .15 .5
+ at headitem Species @tab Parameters @tab State vars @tab Description
+
+ at item HH
+ at tab gNa, ENa, gK, EK, gl, El, Cmem, Idc
+ at tab E
+ at tab A classical, conductance-based Hodgkin-Huxley neuron
+
+ at item HHRate
+ at tab a, I0, r, Idc
+ at tab F
+ at tab  Rate-based model of the Hodgkin-Huxley neuron
+
+ at item DotPoisson
+ at tab lambda, Vrst, Vfir
+ at tab E
+ at tab Duration-less spike Poisson oscillator
+
+ at item Poisson
+ at tab lambda, trel, trel+trfr, Vrst, Vfir
+ at tab E
+ at tab  Poisson oscillator
+
+ at item VdPol
+ at tab eta, omegasq
+ at tab A
+ at tab  Van der Pol oscillator
+
+ at item DotPulse
+ at tab f, Vrst, Vfir
+ at tab E
+ at tab  Dot Pulse generator
+
+ at item NMap
+ at tab Vspike, alpha, gamma, beta, Idc
+ at tab E
+ at tab Map neuron
+
+ at end multitable
+
+ at section Synapse species
+
+In addition to parameters listed in the table, each synapse has a
+conductance (parameter @var{gsyn}).
+
+ at multitable @columnfractions .1 .25 .15 .5
+ at headitem Synapse @tab Parameters @tab State vars @tab Description
+
+ at item AB
+ at tab Esyn, Epre, alpha, beta, trel
+ at tab S
+ at tab An alpha-beta synapse (Destexhe, Mainen, Sejnowsky, 1994)
+
+ at item Rall
+ at tab Esyn, Epre, tau
+ at tab S, R
+ at tab Rall synapse (Rall, 1967)
+
+ at item Map
+ at tab tau, delta, Vrev
+ at tab S
+ at tab Map synapse
+
+ at end multitable
+
+
+
+ at node Planned features
+ at chapter Planned features
+
+Interconnections, both control as well as direct, asynchronous stimuli
+transduction, between external peer CNrun nodes.
+
+ at node Index
+ at unnumbered @code{CNrun Lua API} Function index
+
+ at printindex fn
+
+ at bye
diff --git a/upstream/libcnrun.pc.in b/upstream/libcnrun.pc.in
new file mode 100644
index 0000000..e3e9654
--- /dev/null
+++ b/upstream/libcnrun.pc.in
@@ -0,0 +1,12 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: cnrun
+Description: CNRun Library
+Version: @PACKAGE_VERSION@
+Requires: gsl xml2
+Cflags: -I${includedir}
+Libs: -L${libdir} -lcnrun
+Libs.private: -lxml2 -lgsl
diff --git a/upstream/make_vesrion b/upstream/make_version
similarity index 100%
rename from upstream/make_vesrion
rename to upstream/make_version
diff --git a/upstream/man/cnrun.1.in b/upstream/man/cnrun.1.in
deleted file mode 100644
index 510d60b..0000000
--- a/upstream/man/cnrun.1.in
+++ /dev/null
@@ -1,311 +0,0 @@
-.TH CNrun 1 "@build_date@" @VERSION@ "CNrun"
-.SH NAME
-	CNrun -- a neuronal network simulator
-.SH SYNOPSIS
-	cnrun \fB\-h\fR | \fB\-U\fR | \fB\-e\fR \fIscript\fR [\fBOPTION\fR ...]
-.B
-.PP
-
-.SH DESCRIPTION
-.PP
-\fBCNrun\fR is a neuronal network simulator, similar to NEURON or
-GENESIS, but without provision for unit compartments.  It reads the
-network topology in NeuroML format as exported, f.i., by
-neuroConstruct.  Unit types are determined by the \(oqcell_type\(cq
-attribute in the .nml definitions.
-
-Available neuron types, by the corresponding \(oqcell_type\(cq string, include:
-.IP \(bu
-\fIHH\fR and \fIHHRate\fR, conductance\- and rate\-based Hodgkin\-Huxley
-neurons (Traub & Miles, 1991);
-.IP \(bu
-A simplified but fast, fixed\-dt \fIMap\fR neurons mimicking the HH
-model;
-.IP \(bu
-\fIPoisson\fR, Van der Pol (\fIVdP\fR) and
-simple \fIPulse\fR oscillators;
-.IP \(bu
-synapses as described in Rall et al, 1967 (\fIRall\fR) and Destexhe et
-al, 1994 (\fIAB\fR).
-
-.PP
-Unit parameters can be set via a \fBset_parm_*\fR command (see \fBSCRIPTING\fR
-below); values can be set once before the simulation, or continuously
-or periodically per user\-defined schedule.
-
-A 6\-5\-order Runge\-Kutta integration method is used to compute state
-variables.  These (membrane potential E or instantaneous firing rate R
-for neurons, neurotransmitter release S for synapses) as well as spike
-times can be logged.
-
-Scripting support in CNrun includes commands for creating and
-populating a model, setting parameters for single units or groups
-selected based on regex matching.  Variables (\(oqa = 1; b = a +
-2\(cq) and arithmetic expressions (\(oq\-\(cq, \(oq+\(cq, \(oq*\(cq,
-\(oq/\(cq, \(oq()\(cq ) are supported.
-
-.SH OPTIONS
-\fB\-C\fR \fIdir\fR
-chdir to \fIdir\fR before running.
-.TP
-\fB\-D\fR
-Dump all unit types in the model and exit.
-.TP
-\fB\-e\fR [\fIscript\fR]
-Execute \fIscript\fR.  If this option is given without a file name (or
-not given at all), start an interactive interpreter.
-.TP
-\fB\-s\fR
-Sort units (mostly useful with verbose output).
-.TP
-\fB\-L\fR[1dbxL]
-For all listeners:
-.RS 4
-.IP d
-Defer writing to disk until done rather than write continuously
-(speeds up the simulation but you can\(cqt watch the progress live
-with gnuplot)
-.IP 1
-Only log the first variable (appropriate for the HH model, which units
-have in excess the three uninteresting gating parameters).
-.IP b
-Write in native binary form rather than in ASCII.  This will speed up
-viewing the (now label.varx files) with gnuplot.  Do your plotting
-with \(lqbinary format="%lf%lf"\(rq to achieve this.
-
-These options can also be set using command \fBlisten_mode\fR (which see, below).
-.IP L
-log integrator dt.
-.RE
-.TP
-\fB\-E\fR \fIdouble\fR
-Listen at this interval (default 1 msec; set to
-0 to listen every cycle, which can slow cnrun down considerably).
-Also available as command \fBlisten_dt\fR.
-.TP
-\fB\-k\fR[l|0]
-Write a model\-wide log of spiking neurons, using labels (\(oql\(cq) or unit ids (\(oq0\(cq).
-.TP
-\fB\-e \fIuint\fR
-Set precision for all output (default 8).
-.TP
-\fB\-iT\fIdouble\fR
-dt_max (default 0.5).
-.TP
-\fB\-it\fIdouble\fR
-dt_min (default 1e\-05).
-.TP
-\fB\-ix\fIdouble\fR
-Cap dt increase by current dt value x this (default 5).
-.TP
-\fB\-nc\fR
-Disable synapse coalescing (for benchmarking).
-.TP
-\fB\-v \fIint\fR
-Set verbosity level (default 1; values up to 7 are meaningful).
-Use a negative value to show the progress percentage only,
-indented on the line at \-8 x this value.
-.TP
-\fB\-U\fR
-List all available units.
-.TP
-\fB\-h\fR
-Print the overview of command\-line options.
-
-Space is optional between the option letter and argument for
-single\-letter options.  In all two\-letter options taking an argument
-though, make sure there is no space in between.
-
-.SH SCRIPTING
-Commands are delimited by a colon or new\-line.  Comments are lines
-starting with #.  The following commands are available:
-.TP
-\fBnew_model\fR NAME
-Create a new model called NAME.  Existing model is deleted.
-.TP
-\fBuse_nml\fR NML_FILE
-Load network topology from NML_FILE, creating
-a model if necessary, or replacing an existing model\(rq topology.
-.TP
-\fBmerge_nml\fR NML_FILE
-Merge in the topology from NML_FILE.
-.TP
-\fBadd_neuron\fR TYPE LABEL
-Add a new newron of type TYPE with label LABEL.
-.TP
-\fBadd_synapse\fR TYPE SOURCE TARGET G
-Connect the neuron labelled SOURCE to one labelled TARGET with a
-synapse of type TYPE, with gsyn G.
-.TP
-\fBcull_deaf_synapses\fR
-Remove synapses with zero weight.
-.TP
-\fBset_parm_neuron\fR LABEL PARM VALUE
-Set parameter PARM for a specified group of neurons labelled matching LABEL.
-.TP
-\fBset_parm_synapse\fR SRC TGT PARM VALUE
-Set parameter PARM for synapses between neurons labelled matching SRC and TGT.
-The synaptic weight, itself not being a synapse parameter, can also be set with
-this command: to do this, use \(oqgsyn\(cq as PARM.
-.TP
-\fBreset\fR
-Reset the model.  Model time is rewound to 0 and all units have their
-state variables reset to stock defaults.  Any previously assigned unit
-parameters and attached data sources are preserved.
-.TP
-\fBreset_revert_params\fR
-Reset the model.  Model time is rewound to 0, all units have their
-state variables and parameters reset to stock defaults.
-.TP
-\fBreset_state_units\fR REGEX
-Reset the units\(cq as above, keeping current model time.
-.TP
-\fBadvance_until\fR TIME
-Advance until TIME msec.
-.TP
-\fBadvance\fR TIME
-Advance TIME msec.
-.TP
-\fBputout\fR REGEX
-Delete units matching REGEX by label.
-.TP
-\fBdecimate\fR REGEX FRAC
-Randomly delete FRAC units of a population of units selected by REGEX.
-.TP
-\fBstart_listen\fR REGEX
-Make matching units listen.
-.TP
-\fBstop_listen\fR
-Make matching units stop listening.
-.TP
-\fBlisten_dt\fR [VALUE]
-Set listening interval to VALUE, or show current value if VALUE not given.
-.TP
-\fBlisten_mode\fR [SPEC]
-Print (if argument is omitted) the current listening mode (one var only, deferred write,
-and/or binary); otherwise, enable the corresponding mode if \(oq1\(cq, \(oqd\(cq or \(oqb\(cq
-occurs in SPEC, or disable it if it does and is immediately followed by a \(oq\-\(cq.
-Note that those units already listening will be unaffected; to change the mode for them, issue
-\fBstart_listen\fR for them after the new mode has been set.
-.TP
-\fBstart_log_spikes\fR REGEX
-Make neurons matching REGEX log spikes.
-.TP
-\fBstop_log_spikes\fR REGEX
-Make neurons matching REGEX stop log spikes.
-.TP
-\fBsxf_params\fR DELAY:PERIOD:SIGMA
-Set spike density function initial delay, sampling period and sigma as specified.
-.TP
-\fBdescribe_model\fR
-Print a summary of model topology and unit types.
-.TP
-\fBshow_units\fR REGEX
-Print parameters and state of units matching REGEX.
-.TP
-\fBnew_source\fR TYPE ID ARG ...
-Create a new source of type and with an id as indicated.  Sources can be connected to unit
-parameters as a means to set up a dynamically changing behaviour.  See \fBDYNAMIC SOURCES\fR below.
-.TP
-\fBconnect_source\fR SOURCE_ID LABEL PARM
-Connect this source to matching units\(cq parameter.
-.TP
-\fBshow_sources\fR
-Show the currently active sources (both connected and idle).
-.TP
-\fBexec\fR [SCRIPT]
-Execute a script.  If SCRIPT not specified, start an interactive interpreter.
-.TP
-\fBverbosity\fR [LEVEL]
-Set/show verbosity level.
-.TP
-\fBshow_vars\fR [REGEX]
-Print variables matching REGEX, or all variables if REGEX omitted.
-.TP
-\fBclear_vars\fR [REGEX]
-Clear variables matching REGEX, or all if REGEX omitted.
-.TP
-\fBpause\fR [DELAY]
-Pause for DELAY sec if specified, or until user presses Enter otherwise.
-.TP
-\fBquit\fR
-Exit current interpreter if called by \fBexec\fR; exit the program otherwise.
-
-.RE
-When you use the interpreter interactively, TAB will list completions
-approproiately, depending on the context.
-
-
-.SH DYNAMIC SOURCES
-In addition to static unit parameter/variable assignment with
-\fBset_parm_{neuron,synapse}\fR, units can have a data source attached
-to any of their parameters or variable (even though variables will get
-overwritten in the next cycle).
-
-Data sources are of three types (a fourth one is available for
-developers, an arbitrary user function of time, but not exposed as an
-interpreter command).  Where data for a source are read from a file,
-values are read using a \(oq>>\(cq operator (from <ifstream>) into a
-double variable.  The corresponding \fBnew_source\fR arguments are:
-
-.TP
-\fBTape\fR FILE
-Read \(lqtime value\(rq pairs from FILE and set the parameter\(cqs value accordingly.
-.TP
-\fBPeriodic\fR FILE
-FILE is expected to contain, as the first number value read
-by scanf("%lg"), a time period at which the following values are
-sequentially assigned to the parameter.  Values are assigned at the
-beginning of each integration cycle.
-.TP
-\fBNoise\fR MIN:MAX
-Generate (irrespective of time) a uniformly distributed random number within MIN:MAX.
-
-.RE
-Similarly to the parameters, state variables can also be set in this
-manner; in this case, the values read, will override whatever the
-inner workings of the unit assign to it.  Where a Tape has a gap
-between assignment times larger than current dt, assignments are still
-made; this, however, does not apply to Periodic sources (chiefly for
-performance reasons).
-
-.SH SYNAPSE COALESCING
-Coalesced synapses are those having identical parameters and having
-the same source.  Coalescing reduces, per divergence rate, the number
-of times the S variable is recomputed with identical parameters per
-cycle; additionally for hosted synapses, the integration vector is
-shrunk to fit towards further performance gain.
-
-Coalescing happens automatically between two synapses from same source
-when, after all parameter assignments, they are found to be identical
-(disregarding synaptic weights).  Conversely, when the user changes a
-parameter to one coalesced synapses that is different from that
-parameter\(cqs value in the others, that synapse becomes independent.
-
-Note that a synapse units\(cqs label is dynamically formed of the
-label of the source with a semicolon and the current number of
-targets.  Another consequence of coalescing is that there can be more
-than one synapse units labelled identically (hence, uniquely to
-identify a synapse, you need to specify its source and target).
-
-The command\-line option \fB\-nc\fR can be used to disable coalescing.
-
-.SH FILES
-.TP
-\fI.cnrun\-history\fR, a per\-directory history of entered commands.
-
-.SH EXAMPLE
-In @docdir@/ratiocoding, there is a working example of cnrun
-setup which reproduces some of the results presented in Zavada et al
-(2011) PLoS paper.
-
-.SH BUGS
-The oscillator units other than Poisson, have not been tested.
-
-.SH SEE ALSO
-spike2sdf(1), varfold(1).
-
-.SH AUTHOR
-CNRun and the underlying library libcn is written by Andrei Zavada
-<johnhommer at gmail.com>, building on the original code by Thomas
-Nowotny, while at Sussex University in 2008\-10.
diff --git a/upstream/man/varfold.1.in b/upstream/man/varfold.1.in
deleted file mode 100644
index a0a804c..0000000
--- a/upstream/man/varfold.1.in
+++ /dev/null
@@ -1,129 +0,0 @@
-.TH varfold 1 "@build_date@" @VERSION@ "CNrun"
-.SH NAME
-	varfold -- a simple numerical matrix convolution tool
-.SH SYNOPSIS
-	varfold \fB\-h\fR | [\fBOPTION\fR ...] \fBfilename_or_unitlabel\fR ...
-.B
-.PP
-
-.SH DESCRIPTION
-.PP
-Varfold is a simple tool to obtain a measure of fitting of a matrix to
-another, reference matrix, by means of \(oqconvoluting\(cq the former
-against the latter to produce a scalar value (Cost Function).
-
-The data are expected to be timestamped (time in the first column
-followed by data in a record).  Use \fB\-t\-\fR if your data are not.
-
-Varfold can also extract data by sampling the trace at specified
-intervals from a .var output from a CNrun simulation.
-
-In a typical usage with CNrun, you have spiking data of a trial saved
-as a vector of SDF (spike density function) values, along with SHF
-(spike heterogeneity function, which is
-SDF/stdev(\fIintervals_between_spikes\fR)), and the spike count per
-sampling window from a simulation where you have enabled some
-spikeloggers.  Those data are available, per unit, in .sxf files.  You
-then create a similar, reference vector of the same size.  Varfold
-will read these vectors and, for each, apply an element\-by\-element
-operation (currently, sum of differences squared or weighting, see
-option \fB\-V\fR) to it vs the reference vector, thus yielding
-individual scalar CF values.
-
-Individual CF value(s) will be saved in files ending in \(oq.CF\(cq.
-If data from more then one unit are combined (option \fB\-z\fR), a
-grand CF will be taken by convoluting the individual matrix sum,
-average or product against the reference matrix read from a file
-specified with option \fB\-T\fR, and written into a file named
-\(oqAVERAGE.CF\(cq, \(oqSUM.CF\(cq or \(oqPRODUCT.CF\(cq.
-
-If no convolution is desired, varfold can be useful to \(oqfold\(cq
-the data vectors into matrices (only for two\-dim matrices, though).
-See option \fB\-o\fR.
-
-NaN and inf data are allowed in input vectors.
-
-.SH OPTIONS
-\fB\-C\fR \fIdir\fR
-chdir to \fIdir\fR before running.
-.TP
-\fB\-G\fR \fIdir\fR
-Look for reference files in \fIdir\fR rather than in the current
-directory.  These should match what will eventually be determined as a
-unit\(cqs data vector file name, plus a \(oq.target\(cq suffix.
-.TP
-\fB\-x\fR \fIn\fR
-A dimension size.  If your vector is a serialised (multidimensional) matrix,
-repeat this option as needed.  Only useful in conjunction with option \fB\-o\fR
-(which see).
-.TP
-\fB\-V\fRsqdiff|weight
-Operation to apply on trial vs reference vectors to produce the cost
-function: a sum of squared differences, or a sum of trial vector
-elements multiplied by corresponding elements from the reference
-vector.
-.TP
-\fB\-z\fRsum|avg|prod
-Operation applied on individual matrices to produce an overall matrix.
-.TP
-\fB\-T\fR \fIfname\fR
-Read the overall reference matrix from this file.
-.TP
-\fB\-R\fR
-Sample trial vector data from a .var file rather than .sxf.  With this option,
-you can use option \fB\-f\fR and must, option \fB\-d\fR.
-.TP
-\fB\-f \fIn\fR:\fIm\fR
-Extract \fIn\fRth field of \fIm\fR consecuive fields per datum; sample
-every datapoint if omitted.
-.TP
-\fB\-d \fIf\fR:\fIp\fR:\fIw\fR
-Sample from time \fIf\fR at period \fIp\fR with window size \fIw\fR.
-.TP
-\fB\-F \fIn\fR
-Read vector data from position \fIn\fR (does not apply to the .var
-case, where you would specify \fB\-d\fIf\fR other than 0).
-.TP
-\fB\-H\fR
-Multiply SDF value (in the second column in a .sxf file) by SHF (the third column).
-.TP
-\fB\-H\-\fR
-Assume there is no SHF column in your files.  Use this options with files generated by spike2sdf
-(in this case, files will have an .sdf suffix, not .sxf).
-.TP
-\fB\-t\-\fR
-Assume there is no timestamp in the data vector (does not apply to
-data sampled from .var files).  Implies \fB\-H\-\fR.
-.TP
-\fB\-N\fR
-Normalise input matrix before convolution.
-.TP
-\fB\-o\fR [mc]
-Fold vectors and output data as a matrix (m) and/or a list of <x y value> records (c);
-only valid for two\-dimensional data.
-.TP
-\fB\-O\fR
-Write Octave\-compatible designations of nan and inf data (i.e.,
-\(oqNaN\(cq and \(oqInf\(cq).
-.TP
-\fB\-h\fR
-Print the overview of command\-line options.
-.TP
-\fIunit_label\fR or \fIfilename\fR
-
-Non\-option arguments are treated each as a single data vector file
-name, or label of a unit in the trial (with file names
-\(oq\fIunit_label\fR.s{x,d}f\(cq).  If a convolution is requested
-(with option \fB\-V\fR), reference vector is read from a file with the
-name as found for the data vector suffixed with \(oq.target\(cq in the
-directory specified with \fB\-G\fR.
-
-Files named \fIlabel\fR are tried first, failing which varfold will
-try \fIlabel\fR.sxf, and eventually \fIlabel\fR.sdf as if with option
-\fB\-H\-\fR enabled.
-
-.SH SEE ALSO
-cnrun(1), spike2sdf(1).
-
-.SH AUTHOR
-Andrei Zavada (johnhommer at gmail.com).
diff --git a/upstream/src/Common.mk b/upstream/src/Common.mk
index e0287d8..05741fc 100644
--- a/upstream/src/Common.mk
+++ b/upstream/src/Common.mk
@@ -1,10 +1,4 @@
-%.hh.gch: %.hh
-# for some reason $(CXXCOMPILE) is just... "c", whereas when seen in
-# any sub/Makefile.am, it does the trick alright, so spell it out in full
-	$(CXX) $(AM_CXXFLAGS) -c $<
-
 AM_CXXFLAGS := -Wall -std=c++0x -fno-rtti \
 	-I$(top_srcdir) -I$(top_srcdir)/src \
 	$(LIBCN_CFLAGS) $(OPENMP_CXXFLAGS) \
-	-DHAVE_CONFIG_H \
-	-DBUILT_BY=\"@user@\"
+	-DHAVE_CONFIG_H
diff --git a/upstream/src/Makefile.am b/upstream/src/Makefile.am
index d09bdcd..8f66064 100644
--- a/upstream/src/Makefile.am
+++ b/upstream/src/Makefile.am
@@ -1,18 +1,9 @@
 include $(top_srcdir)/src/Common.mk
 
-SUBDIRS = . libstilton libcn cnrun
+SUBDIRS = libstilton libcnrun lua-cnrun
 if DO_TOOLS
 SUBDIRS += tools
 endif
 
-libicing_a_CXXFLAGS := \
-	$(AM_CXXFLAGS) -DGIT_DESCRIBE_TAGS=\"$(shell git describe --tags)\"
-
-noinst_LIBRARIES := \
-	libicing.a
-
-libicing_a_SOURCES := \
-	print_version.cc
-
-print_version.o: FORCE
-FORCE:
+install-exec-hook:
+	rm -f "$(DESTDIR)/$(pkglibdir)/lib*/*.la"
diff --git a/upstream/src/cnrun/Makefile.am b/upstream/src/cnrun/Makefile.am
deleted file mode 100644
index 148bf1a..0000000
--- a/upstream/src/cnrun/Makefile.am
+++ /dev/null
@@ -1,20 +0,0 @@
-include $(top_srcdir)/src/Common.mk
-
-if DO_PCH
-BUILT_SOURCES = \
-	cnrun.hh.gch
-
-CLEANFILES = $(BUILT_SOURCES)
-endif
-
-bin_PROGRAMS = \
-	cnrun
-cnrun_SOURCES = \
-	main.cc interpreter.cc completions.cc cnrun.hh
-cnrun_LDADD = \
-	$(top_srcdir)/src/libicing.a \
-	$(top_srcdir)/src/libcn/libcn.la \
-	$(top_srcdir)/src/libstilton/libstilton.la \
-	$(LIBCN_LIBS)
-cnrun_LDFLAAGS = \
-	-shared
diff --git a/upstream/src/cnrun/completions.cc b/upstream/src/cnrun/completions.cc
deleted file mode 100644
index 3609ee1..0000000
--- a/upstream/src/cnrun/completions.cc
+++ /dev/null
@@ -1,452 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *
- * License: GPL-2+
- *
- * Initial version: 2010-02-12
- *
- * CNModel runner (interpreter)
- */
-
-
-#include <stdio.h>
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-#ifdef HAVE_LIBREADLINE
-#  if defined(HAVE_READLINE_READLINE_H)
-#    include <readline/readline.h>
-#  elif defined(HAVE_READLINE_H)
-#    include <readline.h>
-#  endif
-#endif
-
-#ifdef HAVE_READLINE_HISTORY
-#  if defined(HAVE_READLINE_HISTORY_H)
-#    include <readline/history.h>
-#  elif defined(HAVE_HISTORY_H)
-#    include <history.h>
-#  endif
-#endif
-
-#include "runner.hh"
-#include "libcn/model.hh"
-
-using namespace std;
-using namespace cnrun;
-
-
-
-static char*
-cnrun_null_generator( const char* text, int state)
-{
-	return nullptr;
-}
-
-
-static char*
-cnrun_cmd_generator( const char* text, int state)
-{
-	static int list_index, len;
-        const char *name;
-
-        if ( !state ) {
-		list_index = 0;
-		len = strlen( text);
-        }
-
-        while ( (name = cnrun_cmd[list_index]) ) {
-		list_index++;
-		if ( strncmp( name, text, len) == 0 )
-			return strdup( name);
-        }
-        return nullptr;
-}
-
-static char*
-cnrun_source_types_generator( const char* text, int state)
-{
-	static int list_index, len;
-        const char *name;
-
-        if ( !state ) {
-		list_index = 0;
-		len = strlen( text);
-        }
-
-        while ( (name = __SourceTypes[list_index]) ) {
-		list_index++;
-		if ( strncmp( name, text, len) == 0 )
-			return strdup( name);
-        }
-        return nullptr;
-}
-
-
-
-
-
-
-
-static char*
-cnrun_neu_type_generator( const char *text, int state)
-{
-	static const char** neuron_types = nullptr;
-	if ( !neuron_types ) {
-		if ( !(neuron_types = (const char**)malloc( (NT_LAST - NT_FIRST+1+1)*sizeof(char*))) )
-			abort();
-		size_t n;
-		for ( n = 0; n <= NT_LAST - NT_FIRST; n++ )
-			neuron_types[n] = strdup( __CNUDT[NT_FIRST+n].species);  // family would do just as well
-		neuron_types[n] = nullptr;
-	}
-
-	static int list_index, len;
-        const char *name;
-        if ( !state ) {
-		list_index = 0;
-		len = strlen( text);
-        }
-        while ( (name = neuron_types[list_index]) ) {
-		list_index++;
-		if ( strncmp( name, text, len) == 0 )
-			return strdup( name);
-        }
-        return nullptr;
-}
-
-
-
-static char*
-cnrun_syn_type_generator( const char *text, int state)
-{
-	static const char** synapse_types = nullptr;
-	if ( !synapse_types ) {
-		if ( !(synapse_types = (const char**)malloc( (YT_LAST - YT_FIRST+1+1)*sizeof(char*))) )
-			abort();
-		size_t n, i;
-		for ( n = i = 0; n <= YT_LAST - YT_FIRST; n++ )
-			synapse_types[i++] = strdup( __CNUDT[YT_FIRST+n].family);
-		// there are fewer families than species, so we are wasting some tens of bytes here.  oh well.
-		synapse_types[i] = nullptr;
-	}
-
-	static int list_index, len;
-        const char *name;
-        if ( !state ) {
-		list_index = 0;
-		len = strlen( text);
-        }
-        while ( (name = synapse_types[list_index]) ) {
-		list_index++;
-		if ( strncmp( name, text, len) == 0 )
-			return strdup( name);
-        }
-        return nullptr;
-}
-
-
-
-
-
-bool cnrun::regenerate_unit_labels = true;
-
-#define GENERATE_NEURONS  1
-#define GENERATE_SYNAPSES 2
-static int restrict_generated_set = 0;
-
-static char*
-cnrun_unit_label_generator( const char *text, int state)
-{
-	static int list_index, len;
-        const char *name;
-
-	static char** unit_labels = nullptr;
-
-	if ( regenerate_unit_labels ) {
-		regenerate_unit_labels = false;
-
-		if ( !Model ) {
-			free( unit_labels);
-			unit_labels = nullptr;
-			return nullptr;
-		}
-
-		if ( !(unit_labels = (char**)realloc( unit_labels, (Model->units()+1) * sizeof(char*))) )
-			abort();
-		size_t n = 0;
-		for_model_units (Model, U)
-			if ( ((restrict_generated_set & GENERATE_NEURONS) && (*U)->is_neuron()) ||
-			     ((restrict_generated_set & GENERATE_SYNAPSES) && (*U)->is_synapse()) )
-				unit_labels[n++] = strdup( (*U) -> label());
-		unit_labels[n] = nullptr;
-	}
-
-	if ( !unit_labels )
-		return nullptr;
-
-        if ( !state ) {
-		list_index = 0;
-		len = strlen( text);
-        }
-        while ( (name = unit_labels[list_index]) ) {
-		list_index++;
-		if ( strncmp( name, text, len) == 0 )
-			return strdup( name);
-        }
-        return nullptr;
-}
-
-
-
-bool cnrun::regenerate_var_names = true;
-
-static char*
-cnrun_var_names_generator( const char *text, int state)
-{
-	static int list_index, len;
-        const char *name;
-
-	static char** var_names = nullptr;
-
-	if ( regenerate_var_names ) {
-		regenerate_var_names = false;
-
-		if ( current_shell_variables->size() == 0 )
-			return nullptr;
-
-		if ( !(var_names = (char**)realloc( var_names, (current_shell_variables->size()+1) * sizeof(char*))) )
-			abort();
-		size_t n = 0;
-		for ( auto &v : *current_shell_variables )
-			var_names[n++] = strdup( v.name);
-		var_names[n] = nullptr;
-	}
-
-	if ( !var_names )
-		return nullptr;
-
-        if ( !state ) {
-		list_index = 0;
-		len = strlen( text);
-        }
-        while ( (name = var_names[list_index]) ) {
-		list_index++;
-		if ( strncmp( name, text, len) == 0 )
-			return strdup( name);
-        }
-        return nullptr;
-}
-
-
-
-
-
-bool cnrun::regenerate_source_ids = true;
-
-static char*
-cnrun_source_id_generator( const char *text, int state)
-{
-	static int list_index, len;
-        const char *name;
-
-	static char** source_ids = nullptr;
-
-	if ( regenerate_source_ids ) {
-		regenerate_source_ids = false;
-
-		if ( !Model || Model->Sources.size() == 0 )
-			return nullptr;
-
-		if ( !(source_ids = (char**)realloc( source_ids, (Model->Sources.size()+1) * sizeof(char*))) )
-			abort();
-		size_t n = 0;
-		for ( auto &v : Model->Sources )
-			source_ids[n++] = strdup( v->name.c_str());
-		source_ids[n] = nullptr;
-	}
-
-	if ( !source_ids )
-		return nullptr;
-
-        if ( !state ) {
-		list_index = 0;
-		len = strlen( text);
-        }
-        while ( (name = source_ids[list_index]) ) {
-		list_index++;
-		if ( strncmp( name, text, len) == 0 )
-			return strdup( name);
-        }
-        return nullptr;
-}
-
-
-
-
-static char **parm_names = nullptr;
-static char *unit_label_completing_for = nullptr;
-static char *synapse_target_label_completing_for = nullptr;
-
-static char*
-cnrun_parm_names_generator( const char *text, int state)
-{
-	static int list_index, len;
-        const char *name;
-
-	if ( !Model )
-		return nullptr;
-	C_BaseSynapse *y;
-	TUnitType t;
-	C_BaseUnit *u1, *u2;
-	if ( synapse_target_label_completing_for )
-		if ( (u1 = Model->unit_by_label( unit_label_completing_for)) && u1->is_neuron() &&
-		     (u2 = Model->unit_by_label( synapse_target_label_completing_for)) && u2->is_neuron() &&
-		     (y = (static_cast<C_BaseNeuron*>(u1)) -> connects_via( *static_cast<C_BaseNeuron*>(u2))) )
-			t = y->type();
-		else
-			return nullptr;
-	else
-		t = Model -> unit_by_label( unit_label_completing_for) -> type();
-	if ( t == NT_VOID )
-		return nullptr;
-
-	if ( !(parm_names = (char**)realloc( parm_names, (__CNUDT[t].pno+1) * sizeof(char*))) )
-		abort();
-	size_t n, p;
-	for ( n = p = 0; p < __CNUDT[t].pno; p++ )
-		if ( __cn_verbosely > 5 || __CNUDT[t].stock_param_syms[p][0] != '.' )
-			parm_names[n++] = strdup( __CNUDT[t].stock_param_syms[p]);
-	parm_names[n] = nullptr;
-
-	if ( !parm_names )
-		return nullptr;
-
-        if ( !state ) {
-		list_index = 0;
-		len = strlen( text);
-        }
-        while ( (name = parm_names[list_index]) ) {
-		list_index++;
-		if ( strncmp( name, text, len) == 0 )
-			return strdup( name);
-        }
-        return nullptr;
-}
-
-
-
-
-static int
-rl_point_at_word() __attribute__ ((pure));
-
-static int
-rl_point_at_word()
-{
-	int p = 0, delims = 0;
-	while ( p < rl_point ) {
-		if ( isspace(rl_line_buffer[p]) ) {
-			delims++;
-			do p++;
-			while ( p < rl_point && isspace(rl_line_buffer[p]) );
-		}
-		p++;
-	}
-	return delims;
-}
-
-
-
-char**
-cnrun::
-cnrun_completion( const char *text, int start, int end)
-{
-	if ( start == 0 )
-		return rl_completion_matches( text, &cnrun_cmd_generator);
-
-	char	*line_buffer = strdupa( rl_line_buffer),
-	 	*cmd = strtok( line_buffer, " \t");
-
-	if ( strcmp( cmd, cnrun_cmd[CNCMD_add_neuron]) == 0 ) {
-		switch ( rl_point_at_word() ) {
-		case 1:   return rl_completion_matches( text, &cnrun_neu_type_generator);
-		default:  return rl_completion_matches( text, &cnrun_null_generator);
-		}
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_add_synapse]) == 0 ) {
-		switch ( rl_point_at_word() ) {
-		case 1:  return rl_completion_matches( text, &cnrun_syn_type_generator);
-		case 2:
-		case 3:  return (restrict_generated_set = 0|GENERATE_NEURONS,
-				 rl_completion_matches( text, &cnrun_unit_label_generator));
-		default: return rl_completion_matches( text, &cnrun_null_generator);
-		}
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_load_nml]) == 0 ) {
-		return nullptr; // use built-in filename completion
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_show_units]) == 0 ||
-		    strcmp( cmd, cnrun_cmd[CNCMD_decimate]) == 0 ||
-		    strcmp( cmd, cnrun_cmd[CNCMD_start_listen]) == 0 ||
-		    strcmp( cmd, cnrun_cmd[CNCMD_stop_listen]) == 0 ||
-		    strcmp( cmd, cnrun_cmd[CNCMD_start_log_spikes]) == 0 ||
-		    strcmp( cmd, cnrun_cmd[CNCMD_stop_log_spikes]) == 0 ||
-		    strcmp( cmd, cnrun_cmd[CNCMD_putout]) == 0 ) {
-		return (rl_point_at_word() == 1) ? (restrict_generated_set = 0|GENERATE_NEURONS|GENERATE_SYNAPSES,
-						    rl_completion_matches( text, &cnrun_unit_label_generator)) : nullptr;
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_show_vars])  == 0 ||
-		    strcmp( cmd, cnrun_cmd[CNCMD_clear_vars]) == 0 ||
-		    strcmp( cmd, cnrun_cmd[CNCMD_listen_dt])  == 0 ||
-		    strcmp( cmd, cnrun_cmd[CNCMD_integration_dt_min]) == 0 ||
-		    strcmp( cmd, cnrun_cmd[CNCMD_integration_dt_max]) == 0 ||
-		    strcmp( cmd, cnrun_cmd[CNCMD_integration_dt_cap]) == 0 ) {
-		return (rl_point_at_word() == 1) ? rl_completion_matches( text, cnrun_var_names_generator) : nullptr;
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_set_parm_neuron]) == 0 ) {
-		switch ( rl_point_at_word() ) {
-		case 1:	restrict_generated_set = 0|GENERATE_NEURONS;
-			return rl_completion_matches( text, cnrun_unit_label_generator);
-		case 2:	unit_label_completing_for = strtok( nullptr, " ");
-			synapse_target_label_completing_for = nullptr;
-			return rl_completion_matches( text, cnrun_parm_names_generator);
-		default: return rl_completion_matches( text, cnrun_var_names_generator);
-		}
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_set_parm_synapse]) == 0 ) {
-		switch ( rl_point_at_word() ) {
-		case 1:
-		case 2:	restrict_generated_set = 0|GENERATE_NEURONS;
-			return rl_completion_matches( text, cnrun_unit_label_generator);
-		case 3:	unit_label_completing_for = strtok( nullptr, " ");
-			synapse_target_label_completing_for = strtok( nullptr, " ");
-			return rl_completion_matches( text, cnrun_parm_names_generator);
-		default: return rl_completion_matches( text, cnrun_var_names_generator);
-		}
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_connect_source]) == 0 ) {
-		switch ( rl_point_at_word() ) {
-		case 1:	return rl_completion_matches( text, &cnrun_source_id_generator);
-		case 2:	restrict_generated_set = 0|GENERATE_NEURONS|GENERATE_SYNAPSES;
-			return rl_completion_matches( text, &cnrun_unit_label_generator);
-		case 3:	unit_label_completing_for = (strtok( nullptr, " "), strtok( nullptr, " "));
-			synapse_target_label_completing_for = nullptr;
-			return rl_completion_matches( text, cnrun_parm_names_generator);
-		default: return rl_completion_matches( text, cnrun_null_generator);
-		}
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_new_source]) == 0 ) {
-		switch ( rl_point_at_word() ) {
-		case 1:  return rl_completion_matches( text, cnrun_source_types_generator);
-		default: return rl_completion_matches( text, cnrun_null_generator);
-		}
-
-	} else {
-		return nullptr;
-	}
-}
-
-
-// EOF
diff --git a/upstream/src/cnrun/interpreter.cc b/upstream/src/cnrun/interpreter.cc
deleted file mode 100644
index 9aaa464..0000000
--- a/upstream/src/cnrun/interpreter.cc
+++ /dev/null
@@ -1,923 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *
- * License: GPL-2+
- *
- * Initial version: 2010-02-12
- *
- * CNModel runner (interpreter)
- */
-
-
-#include <sys/stat.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <cassert>
-#include <regex.h>
-#include <list>
-#include <initializer_list>
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-#if defined(HAVE_READLINE_READLINE_H)
-#  include <readline/readline.h>
-#elif defined(HAVE_READLINE_H)
-#  include <readline.h>
-#endif
-
-#if defined(HAVE_READLINE_HISTORY_H)
-#  include <readline/history.h>
-#elif defined(HAVE_HISTORY_H)
-#  include <history.h>
-#endif
-
-#include "libstilton/string.hh"
-#include "runner.hh"
-#include "libstilton/exprparser.hh"
-#include "libcn/integrate-rk65.hh"
-#include "libcn/base-unit.hh"
-
-using namespace std;
-using namespace cnrun;
-
-const char* const cnrun::cnrun_cmd[] = {
-	"new_model",
-	"load_nml",
-	"merge_nml",
-	"add_neuron",
-	"add_synapse",
-	"reset",
-	"reset_revert_params",
-	"reset_state_units",
-	"advance_until",
-	"advance",
-	"putout",
-	"decimate",
-	"start_listen",
-	"stop_listen",
-	"listen_dt",
-	"listen_mode",
-	"integration_dt_min",
-	"integration_dt_max",
-	"integration_dt_cap",
-	"start_log_spikes",
-	"stop_log_spikes",
-	"sxf_params",
-	"new_source",
-	"show_sources",
-	"connect_source",
-	"disconnect_source",
-	"set_parm_neuron",
-	"set_parm_synapse",
-	"cull_deaf_synapses",
-	"describe_model",
-	"show_units",
-	"exec",
-	"verbosity",
-	"show_vars",
-	"clear_vars",
-	"pause",
-	"quit",
-	nullptr
-};
-
-
-
-list<SVariable> *cnrun::current_shell_variables;
-
-
-
-namespace {
-
-void
-report_script_issue( const char *fname, int lineno, int vrb, const char* fmt, ...)
-{
-	va_list ap;
-	va_start (ap, fmt);
-	string body = str::svasprintf( fmt, ap);
-	va_end (ap);
-
-	string pre = ( lineno > 0 )
-		? str::sasprintf( "%s:%d: %s", fname, lineno, body.c_str())
-		: str::sasprintf( "%s: %s", fname, body.c_str());
-}
-
-int do_single_cmd( const char*,
-		   list<SVariable> &varlist,
-		   int level = 0, const char *fname = "",
-		   unsigned lineno = -1);
-
-
-#define CN_INTERP_EXIT		 1
-#define CN_INTERP_WARN		-1
-#define CN_INTERP_PARSEERROR	-2
-#define CN_INTERP_SYSFAIL	-3
-
-#define CNRUN_HISTORY ".cnrun-history"
-
-
-
-
-int
-new_model( const char *model_name, const char *fname, unsigned lineno)
-{
-	if ( !(Model = new CModel( model_name,
-				   new CIntegrateRK65( Options.integration_dt_min,
-						       Options.integration_dt_max,
-						       Options.integration_dt_max_cap),
-				   0 |
-				   (Options.log_dt ? CN_MDL_LOGDT : 0) |
-				   (Options.log_spikers ? CN_MDL_LOGSPIKERS : 0) |
-				   (Options.sort_units ? CN_MDL_SORTUNITS : 0) |
-				   (Options.log_spikers_use_serial_id ? CN_MDL_LOGUSINGID : 0) |
-				   (Options.display_progress_percent ? CN_MDL_DISPLAY_PROGRESS_PERCENT : 0) |
-				   (Options.display_progress_time ? CN_MDL_DISPLAY_PROGRESS_TIME : 0) |
-				   (Options.dont_coalesce ? CN_MDL_DONT_COALESCE : 0))) ) {
-		report_script_issue( fname, lineno, -1, "Failed to create model");
-		return CN_INTERP_SYSFAIL;
-	}
-
-	Model->verbosely = Options.verbosely;
-	Model->listen_dt = Options.listen_dt;
-	Model->spike_threshold = Options.spike_threshold /*,  Model->spike_lapse = Options.spike_lapse */;
-	lprintf( 3,
-		 "generator type: %s\n"
-		 "         seed = %lu\n"
-		 "  first value = %lu\n",
-		 gsl_rng_name (Model->_rng), gsl_rng_default_seed, gsl_rng_get (Model->_rng));
-
-	return 0;
-}
-
-
-
-
-
-
-int
-do_single_cmd( const char* raw,
-	       list<SVariable> &varlist,
-	       int level, const char *fname, unsigned lineno)
-{
-	string	raw_s( raw);
-	char	*cmd = strtok( &raw_s[0], " \t"),
-		*operand = strtok( nullptr, "\n");
-
-	CExpression expr;
-	double result;
-
-#define CHECK_MODEL \
-	if ( !Model ) {							\
-		report_script_issue( fname, lineno, -1, "No model loaded");		\
-		return CN_INTERP_WARN;					\
-	}
-
-
-	if ( strcmp( cmd, cnrun_cmd[CNCMD_new_model]) == 0 ) {
-		if ( !operand ) {
-			report_script_issue( fname, lineno, -1, "Missing a name for the new model");
-			return CN_INTERP_PARSEERROR;
-		}
-		delete Model;
-
-		regenerate_unit_labels = true;
-		return new_model( operand, fname, lineno);
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_load_nml]) == 0 ) {
-		struct stat s;
-		if ( stat( operand, &s) ) {
-			report_script_issue( fname, lineno, -1, "No such file: \"%s\"", operand);
-			return CN_INTERP_SYSFAIL;
-		}
-
-		int retval = new_model( operand, fname, lineno);
-		if ( retval )
-			return retval;
-
-		if ( Model->import_NetworkML( operand, false) < 0 ) {
-			report_script_issue( fname, lineno, -1, "Failed to create model topology from \"%s\"", operand);
-			delete Model;
-			Model = nullptr;
-			return CN_INTERP_SYSFAIL;
-		}
-
-		Model -> cull_blind_synapses();
-		regenerate_unit_labels = true;
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_merge_nml]) == 0 ) {
-		CHECK_MODEL;
-		struct stat s;
-		if ( stat( operand, &s) ) {
-			report_script_issue( fname, lineno, -1, "No such file: \"%s\"", operand);
-			return CN_INTERP_SYSFAIL;
-		}
-		if ( Model->import_NetworkML( operand, true) < 0 ) {
-			report_script_issue( fname, lineno, -1, "Failed to import topology from \"%s\"", operand);
-			return CN_INTERP_SYSFAIL;
-		}
-
-		regenerate_unit_labels = true;
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_add_neuron]) == 0 ) {
-		CHECK_MODEL;
-		char *type_s, *label_s;
-		if ( !operand ||
-		     !(type_s = (strtok( operand, " \t"))) ||
-		     !(label_s = strtok( nullptr, "\n")) ) {
-			report_script_issue( fname, lineno, -1, "Missing neuron type and/or label in `add_neuron'");
-			return CN_INTERP_PARSEERROR;
-		}
-		if ( !Model->add_neuron_species( type_s, label_s, true) ) {
-			report_script_issue( fname, lineno, -1, "`add_neuron' failed");
-			return CN_INTERP_PARSEERROR;
-		}
-		regenerate_unit_labels = true;
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_add_synapse]) == 0 ) {
-		CHECK_MODEL;
-		char *type_s, *src_s, *tgt_s, *g_s;
-		if ( !operand ||
-		     !(type_s = (strtok( operand, " \t"))) ||
-		     !(src_s = strtok( nullptr, " \t")) ||
-		     !(tgt_s = strtok( nullptr, " \t")) ||
-		     !(g_s = strtok( nullptr, "\n")) ) {
-			report_script_issue( fname, lineno, -1, "Missing synapse type, source or target label, and/or gsyn in `add_synapse'");
-			return CN_INTERP_PARSEERROR;
-		}
-		double g;
-		if ( expr( g_s, g, &varlist) ) {
-			report_script_issue( fname, lineno, -1, "Bad value for gsyn in `add_synapse'");
-			return CN_INTERP_PARSEERROR;
-		}
-
-		if ( !Model->add_synapse_species( type_s, src_s, tgt_s, g, true, true) ) {
-			report_script_issue( fname, lineno, -1, "`add_synapse' failed (reason given above)", operand);
-			return CN_INTERP_SYSFAIL;
-		}
-		regenerate_unit_labels = true;
-
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_reset]) == 0 ) {
-		CHECK_MODEL;
-		Model->reset();
-		lprintf( 0, "Reset model and state of all units");
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_reset_revert_params]) == 0 ) {
-		CHECK_MODEL;
-		Model->reset( true);
-		lprintf( 0, "Reset model and reverted all units' state and parameters");
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_reset_state_units]) == 0 ) {
-		CHECK_MODEL;
-		if ( !operand )
-			operand = const_cast<char*>(".*");
-		regex_t RE;
-		if ( 0 != regcomp( &RE, operand, REG_EXTENDED | REG_NOSUB) ) {
-			report_script_issue( fname, lineno, -1, "Invalid regexp for `reset_state_units' arg");
-			return CN_INTERP_PARSEERROR;
-		}
-		size_t cnt = 0;
-		for_model_units (Model,U)
-			if ( regexec( &RE, (*U)->label(), 0, 0, 0) == 0 ) {
-				(*U) -> reset_state();
-				++cnt;
-			}
-		if ( cnt )
-			lprintf( 0, "Reset %zd unit(s)", cnt);
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_advance_until]) == 0 ) {
-		CHECK_MODEL;
-		expr.silent = true;
-		if ( !operand || expr( operand, result, &varlist) ) {
-			report_script_issue( fname, lineno, -1, "No or bad time value for `advance_until'");
-			return CN_INTERP_PARSEERROR;
-		}
-		if ( Model->model_time() > result ) {
-			report_script_issue( fname, lineno, 0, "Cannot go back in time (now is %g)", Model->model_time());
-			return CN_INTERP_WARN;
-		}
-
-		Model -> advance( result - Model->model_time());
-		for_model_spikelogging_neurons (Model,N)
-			(*N)->sync_spikelogging_history();
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_advance]) == 0 ) {
-		CHECK_MODEL;
-		if ( !operand || expr( operand, result, &varlist) ) {
-			report_script_issue( fname, lineno, -1, "No or bad time value for `advance'");
-			return CN_INTERP_PARSEERROR;
-		}
-
-		Model -> advance( result);
-		for_model_spikelogging_neurons (Model,N)
-			(*N)->sync_spikelogging_history();
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_putout]) == 0 ) {
-		CHECK_MODEL;
-		char *label_s;
-		if ( !operand ||
-		     !(label_s = (strtok( operand, " \t"))) ) {
-			report_script_issue( fname, lineno, -1, "Missing label in `putout'");
-			return CN_INTERP_PARSEERROR;
-		}
-
-		list<CModel::STagGroup> tags;
-		tags.push_back( CModel::STagGroup (label_s));
-		Model->process_putout_tags( tags);
-
-		regenerate_unit_labels = true;
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_decimate]) == 0 ) {
-		CHECK_MODEL;
-		char *label_s, *frac_s;
-		if ( !operand ||
-		     !(label_s = (strtok( operand, " \t"))) ||
-		     !(frac_s = (strtok( nullptr, "\n"))) ) {
-			report_script_issue( fname, lineno, -1, "Missing fraction or label in `decimate'");
-			return CN_INTERP_PARSEERROR;
-		}
-		if ( expr( frac_s, result, &varlist) ) {
-			report_script_issue( fname, lineno, -1, "Unparsable expression for decimation fraction: \"%s\"", operand);
-			return CN_INTERP_PARSEERROR;
-		}
-		if ( result < 0. || result > 1. ) {
-			report_script_issue( fname, lineno, -1, "Decimation fraction outside [0..1]");
-			return CN_INTERP_PARSEERROR;
-		}
-
-		list<CModel::STagGroupDecimate> tags;
-		tags.push_back( CModel::STagGroupDecimate( label_s, result));
-		Model -> process_decimate_tags( tags);
-
-		regenerate_unit_labels = true;
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_start_listen]) == 0 ) {
-		CHECK_MODEL;
-		if ( !operand ||
-		     !(operand = (strtok( operand, " \t")) ) ) {
-			report_script_issue( fname, lineno, -1, "Missing label in `start_listen'");
-			return CN_INTERP_PARSEERROR;
-		}
-		list<CModel::STagGroupListener> tags;
-		tags.push_back( CModel::STagGroupListener (operand, true, 0
-					      | (Options.listen_1varonly ? CN_ULISTENING_1VARONLY : 0)
-					      | (Options.listen_deferwrite ? CN_ULISTENING_DEFERWRITE : 0)
-					      | (Options.listen_binary ? CN_ULISTENING_BINARY : CN_ULISTENING_DISK)));
-		Model->process_listener_tags( tags);
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_stop_listen]) == 0 ) {
-		CHECK_MODEL;
-		if ( !operand ||
-		     !(operand = (strtok( operand, " \t"))) ) {
-			report_script_issue( fname, lineno, -1, "Missing label in `stop_listen'");
-			return CN_INTERP_PARSEERROR;
-		}
-		list<CModel::STagGroupListener> tags;
-		tags.push_back( CModel::STagGroupListener (operand, false));
-		Model->process_listener_tags( tags);
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_listen_dt]) == 0 ) {
-		if ( !operand ) {
-			lprintf( 0, "listen_dt is %g", Options.listen_dt);
-			return 0;
-		}
-		if ( expr( operand, result, &varlist) ) {
-			report_script_issue( fname, lineno, -1, "Unparsable expression for value in `listen_dt'");
-			return CN_INTERP_PARSEERROR;
-		}
-		if ( Model )
-			Model->listen_dt = Options.listen_dt = result;
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_listen_mode]) == 0 ) {
-		if ( !operand )
-			lprintf( 0, "listen mode is 1%sd%sb%s (%s%s%s)",
-				  Options.listen_1varonly ? "+" : "",
-				  Options.listen_deferwrite ? "+" : "",
-				  Options.listen_binary ? "+" : "",
-				  Options.listen_1varonly ? "one var-only, " : "all vars, ",
-				  Options.listen_deferwrite ? "deferred write, " : "continuous write, ",
-				  Options.listen_binary ? "binary" : "ascii");
-		else {
-			char *c;
-			if ( (c = strchr( operand, '1')) ) Options.listen_1varonly   = (*(c+1) != '-');
-			if ( (c = strchr( operand, 'd')) ) Options.listen_deferwrite = (*(c+1) != '-');
-			if ( (c = strchr( operand, 'b')) ) Options.listen_binary     = (*(c+1) != '-');
-		}
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_integration_dt_min]) == 0 ) {
-		if ( !operand ) {
-			lprintf( 0, "integration_dt_min is %g", Options.integration_dt_min);
-			return 0;
-		}
-		if ( expr( operand, result, &varlist) ) {
-			report_script_issue( fname, lineno, -1, "Unparsable expression for value in `integration_dt_min'");
-			return CN_INTERP_PARSEERROR;
-		}
-		Options.integration_dt_min = result;
-		if ( Model )
-			Model->dt_min() = result;
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_integration_dt_max]) == 0 ) {
-		if ( !operand ) {
-			lprintf( 0, "integration_dt_max is %g", Options.integration_dt_max);
-			return 0;
-		}
-		if ( expr( operand, result, &varlist) ) {
-			report_script_issue( fname, lineno, -1, "Unparsable expression for value in `integration_dt_max'");
-			return CN_INTERP_PARSEERROR;
-		}
-		Options.integration_dt_max = result;
-		if ( Model )
-			Model->dt_max() = result;
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_integration_dt_cap]) == 0 ) {
-		if ( !operand ) {
-			lprintf( 0, "integration_dt_cap is %g", Options.integration_dt_max_cap);
-			return 0;
-		}
-		if ( expr( operand, result, &varlist) ) {
-			report_script_issue( fname, lineno, -1, "Unparsable expression for value in `integration_dt_cap'");
-			return CN_INTERP_PARSEERROR;
-		}
-		Options.integration_dt_max_cap = result;
-		if ( Model )
-			(static_cast<CIntegrateRK65*>(Model->_integrator)) -> _dt_max_cap = result;
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_set_sxf_params]) == 0 ) {
-		if ( !operand ) {
-			lprintf( 0, "sxf_start_delay:sxf_period:sdf_sigma is %g:%g:%g",
-				  Options.sxf_start_delay, Options.sxf_sample, Options.sdf_sigma);
-			return 0;
-		}
-		if ( sscanf( operand, "%g:%g:%g",
-			     &Options.sxf_start_delay, &Options.sxf_sample, &Options.sdf_sigma) < 3 ) {
-			report_script_issue( fname, lineno, -1, "Expecting <double>:<double>:<double> with set_sxf_params");
-			return CN_INTERP_PARSEERROR;
-		}
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_start_log_spikes]) == 0 ) {
-		CHECK_MODEL;
-		char *label_s;
-		if ( !operand ||
-		     !(label_s = (strtok( operand, " \t"))) ) {
-			report_script_issue( fname, lineno, -1, "Missing label in `start_log_spikes'");
-			return CN_INTERP_PARSEERROR;
-		}
-		if ( Options.sxf_sample <= 0. || Options.sdf_sigma <= 0. ) {
-			report_script_issue( fname, lineno, 1, "SDF parameters not set up, will only log spike times");
-		}
-		list<CModel::STagGroupSpikelogger> specs;
-		specs.push_back( CModel::STagGroupSpikelogger (label_s, true,
-							       Options.sxf_sample, Options.sdf_sigma, Options.sxf_start_delay));
-		Model->process_spikelogger_tags( specs);
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_stop_log_spikes]) == 0 ) {
-		CHECK_MODEL;
-		char *label_s;
-		if ( !operand ||
-		     !(label_s = (strtok( operand, " \t"))) ) {
-			report_script_issue( fname, lineno, -1, "Missing label in `stop_log_spikes'");
-			return CN_INTERP_PARSEERROR;
-		}
-		list<CModel::STagGroupSpikelogger> specs;
-		specs.push_back( CModel::STagGroupSpikelogger (label_s, false));
-		Model->process_spikelogger_tags( specs);
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_set_parm_neuron]) == 0 ) {
-		CHECK_MODEL;
-		char *label_s, *parm_s, *value_s;
-		if ( !operand ||
-		     !(label_s = (strtok( operand, " \t"))) ||
-		     !(parm_s = strtok( nullptr, " \t")) ||
-		     !(value_s = strtok( nullptr, "\n")) ) {
-			report_script_issue( fname, lineno, -1, "Missing label, parameter or value in `set_parm_neuron'");
-			return CN_INTERP_PARSEERROR;
-		}
-		if ( expr( value_s, result, &varlist) ) {
-			report_script_issue( fname, lineno, -1, "Unparsable expression for value in `set_parm_neuron'");
-			return CN_INTERP_PARSEERROR;
-		}
-		list<CModel::STagGroupNeuronParmSet> specs = { CModel::STagGroupNeuronParmSet (label_s, true, parm_s, result) };
-		Model->process_paramset_static_tags( specs);
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_set_parm_synapse]) == 0 ) {
-		CHECK_MODEL;
-		char *src_s, *tgt_s, *parm_s, *value_s;
-		if ( !operand ||
-		     !(src_s = (strtok( operand, " \t"))) ||
-		     !(tgt_s = (strtok( nullptr, " \t"))) ||
-		     !(parm_s = strtok( nullptr, " \t")) ||
-		     !(value_s = strtok( nullptr, "\n")) ) {
-			report_script_issue( fname, lineno, -1, "Missing source or target label, parameter and/or value in `set_parm_synapse'");
-			return CN_INTERP_PARSEERROR;
-		}
-		if ( expr( value_s, result, &varlist) ) {
-			report_script_issue( fname, lineno, -1, "Unparsable value in `set_parm_synapse'");
-			return CN_INTERP_PARSEERROR;
-		}
-		list<CModel::STagGroupSynapseParmSet> specs = { CModel::STagGroupSynapseParmSet (src_s, tgt_s, true, parm_s, result) };
-		Model->process_paramset_static_tags( specs);
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_new_source]) == 0 ) {
-		CHECK_MODEL;
-		char *type_s, *name_s;
-		if ( !operand ||
-		     !(type_s = (strtok( operand, " \t"))) ||
-		     !(name_s = (strtok( nullptr, " \t"))) ) {
-			report_script_issue( fname, lineno, -1, "Missing source type or name in `new_source'");
-			return CN_INTERP_PARSEERROR;
-		}
-
-		if ( Model->source_by_id( name_s) ) {
-			report_script_issue( fname, lineno, -1, "A source named \"%s\" already exists", name_s);
-			return CN_INTERP_PARSEERROR;
-		}
-
-		char *arg1, *arg2;
-		if ( strcmp( type_s, __SourceTypes[SRC_TAPE]) == 0 ) {
-			if ( !(arg1 = strtok( nullptr, "\n")) ) {
-				report_script_issue( fname, lineno, -1, "Missing filename for a Tape source in `new_source'");
-				return CN_INTERP_PARSEERROR;
-			} else {
-				CSourceTape *source = new CSourceTape( name_s, arg1);
-				if ( source && source->name.size() )
-					if ( count( Model->Sources.begin(), Model->Sources.end(), source) == 0 )
-						Model->Sources.push_back( source);
-					else {
-						report_script_issue( fname, lineno, -1, "Duplicate name (\"%s\") for a source", arg1);
-						return CN_INTERP_SYSFAIL;
-					}
-				else {
-					delete source;
-					report_script_issue( fname, lineno, -1, "Failed to set up a Tape source from \"%s\"", arg1);
-					return CN_INTERP_SYSFAIL;
-				}
-			}
-		} else if ( strcmp( type_s, __SourceTypes[SRC_PERIODIC]) == 0 ) {
-			if ( !(arg1 = strtok( nullptr, "\n")) ) {
-				report_script_issue( fname, lineno, -1, "Missing filename for a Periodic source in `new_source'");
-				return CN_INTERP_PARSEERROR;
-			} else {
-				CSourcePeriodic *source = new CSourcePeriodic( name_s, arg1);
-				if ( source && source->name.size() )
-					if ( count( Model->Sources.begin(), Model->Sources.end(), source) == 0 )
-						Model->Sources.push_back( source);
-					else {
-						report_script_issue( fname, lineno, -1, "Duplicate name (\"%s\") for a source", arg1);
-						return CN_INTERP_SYSFAIL;
-					}
-				else {
-					delete source;
-					report_script_issue( fname, lineno, -1, "Failed to set up a Periodic source from \"%s\"", arg1);
-					return CN_INTERP_SYSFAIL;
-				}
-			}
-		} else if ( strcmp( type_s, __SourceTypes[SRC_NOISE]) == 0 ) {
-			if ( !(arg1 = strtok( nullptr, ":")) ||
-			     !(arg2 = strtok( nullptr, "\n")) ) {
-				report_script_issue( fname, lineno, -1, "Incomplete min:max set for a Noise source in `new_source'");
-				return CN_INTERP_PARSEERROR;
-			} else {
-				double _min, _max;
-				if ( expr( arg1, _min, &varlist) ||
-				     expr( arg2, _max, &varlist) ) {
-					report_script_issue( fname, lineno, -1, "Bad min:max values for a Noise source");
-					return CN_INTERP_PARSEERROR;
-				}
-				CSourceNoise *source = new CSourceNoise( name_s, _min, _max);
-				if ( source && source->name.size() ) {
-					Model->Sources.push_back( source);
-				} else {
-					delete source;
-					report_script_issue( fname, lineno, -1, "Failed to set up a Noise source");
-					return CN_INTERP_SYSFAIL;
-				}
-			}
-		} else if ( strcmp( type_s, __SourceTypes[SRC_FUNCTION]) == 0 ) {
-			report_script_issue( fname, lineno, -1, "Go code, Chris!");
-			return CN_INTERP_SYSFAIL;
-		} else {
-			report_script_issue( fname, lineno, -1, "Unrecognised source type in `new_source'");
-			return CN_INTERP_PARSEERROR;
-		}
-
-		regenerate_source_ids = true;
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_show_sources]) == 0 ) {
-		CHECK_MODEL;
-		for ( list<C_BaseSource*>::iterator S = Model->Sources.begin(); S != Model->Sources.end(); S++ )
-			(*S)->dump();
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_connect_source]) == 0 ) {
-		CHECK_MODEL;
-		char *label_s, *parm_s, *source_s;
-		if ( !operand ||
-		     !(source_s = strtok( operand, " \t")) ||
-		     !(label_s = strtok( nullptr, " \t")) ||
-		     !(parm_s = strtok( nullptr, "\n")) ) {
-			report_script_issue( fname, lineno, -1, "Missing source id, unit label and/or parameter in `connect_source'");
-			return CN_INTERP_PARSEERROR;
-		}
-		C_BaseSource *source = Model->source_by_id( source_s);
-		if ( !source ) {
-			report_script_issue( fname, lineno, -1, "Unknown source \"%s\"", source_s);
-			return CN_INTERP_PARSEERROR;
-		}
-
-		list<CModel::STagGroupSource> tags;
-		tags.push_back( CModel::STagGroupSource (label_s, true, parm_s, source));
-		Model->process_paramset_source_tags( tags);
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_disconnect_source]) == 0 ) {
-		CHECK_MODEL;
-		char *label_s, *parm_s, *source_s;
-		if ( !operand ||
-		     !(label_s = (strtok( operand, " \t"))) ||
-		     !(parm_s = strtok( nullptr, " \t")) ||
-		     !(source_s = strtok( nullptr, "\n")) ) {
-			report_script_issue( fname, lineno, -1, "Missing label, parameter or source in `disconnect_source'");
-			return CN_INTERP_PARSEERROR;
-		}
-		C_BaseSource *source = Model->source_by_id( source_s);
-		if ( !source ) {
-			report_script_issue( fname, lineno, -1, "Unknown source \"%s\"", source_s);
-			return CN_INTERP_PARSEERROR;
-		}
-
-		list<CModel::STagGroupSource> specs;
-		specs.push_back( CModel::STagGroupSource (label_s, false, parm_s, source));
-		Model->process_paramset_source_tags( specs);
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_cull_deaf_synapses]) == 0 ) {
-	 	CHECK_MODEL;
-	 	Model->cull_deaf_synapses();
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_describe_model]) == 0 ) {
-		CHECK_MODEL;
-		Model->dump_metrics();
-		Model->dump_units();
-		Model->dump_state();
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_show_units]) == 0 ) {
-		CHECK_MODEL;
-		if ( !operand )
-			operand = const_cast<char*>(".*");
-
-		regex_t RE;
-		if ( 0 != regcomp( &RE, operand, REG_EXTENDED | REG_NOSUB) ) {
-			report_script_issue( fname, lineno, -1, "Invalid regexp for `show_units' arg");
-			return CN_INTERP_PARSEERROR;
-		}
-		size_t cnt = 0;
-		for_model_units (Model,U)
-			if ( regexec( &RE, (*U)->label(), 0, 0, 0) == 0 ) {
-				(*U) -> dump( true);
-				cnt++;
-			}
-		if ( cnt )
-			lprintf( 0, "------------\n%zd units total\n", cnt);
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_exec]) == 0 ) {
-		return interpreter_run( operand, level+1, Options.interp_howstrict,
-					true, true, varlist);
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_verbosity]) == 0 ) {
-		if ( !operand )
-			lprintf( 0, "verbosity level is %d", Options.verbosely);
-		else if ( sscanf( operand, "%d", &Options.verbosely) < 1 ) {
-			report_script_issue( fname, lineno, -1, "Bad value for `verbosity'");
-			return CN_INTERP_PARSEERROR;
-		}
-		if ( Model )
-			Model->verbosely = Options.verbosely;
-		__cn_verbosely = Options.verbosely;
-
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_exit]) == 0 ) {
-		delete Model;
-		Model = nullptr;
-		return CN_INTERP_EXIT;
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_show_vars]) == 0 ) {
-		if ( !operand )
-			operand = const_cast<char*>(".*");
-		regex_t RE;
-		if ( 0 != regcomp( &RE, operand, REG_EXTENDED | REG_NOSUB) ) {
-			report_script_issue( fname, lineno, -1, "Invalid regexp for `show_vars' arg");
-			return CN_INTERP_PARSEERROR;
-		}
-		size_t	cnt = 0;
-		size_t	longest_id = 0;
-		for ( auto& V : varlist )
-			if ( regexec( &RE, V.name, 0, 0, 0) == 0 )
-				if ( longest_id < strlen( V.name) )
-					longest_id = strlen( V.name);
-		for ( auto& V : varlist )
-			if ( regexec( &RE, V.name, 0, 0, 0) == 0 ) {
-				lprintf( 0, "  %*s = %g", (int)longest_id, V.name, V.value);
-				++cnt;
-			}
-		if ( cnt > 1 )
-			lprintf( 0, "---------- %zu variables\n", cnt);
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_clear_vars]) == 0 ) {
-		if ( !operand )
-			varlist.clear();
-		else {
-			regex_t RE;
-			if ( 0 != regcomp( &RE, operand, REG_EXTENDED | REG_NOSUB) ) {
-				report_script_issue( fname, lineno, -1, "Invalid regexp for `clear_vars' arg");
-				return CN_INTERP_PARSEERROR;
-			}
-			for ( list<SVariable>::iterator V = varlist.begin(); V != varlist.end(); V++ )
-				if ( regexec( &RE, V->name, 0, 0, 0) == 0 ) {
-					varlist.erase( V);
-					break;
-				}
-		}
-
-		regenerate_var_names = true;
-
-
-	} else if ( strcmp( cmd, cnrun_cmd[CNCMD_pause]) == 0 ) {
-		if ( operand ) {
-			double s;
-			if ( expr( operand, s, &varlist) )
-				return CN_INTERP_PARSEERROR;
-			if ( s < 0 ) {
-				lprintf( 0, "Can't sleep backwards in time");
-				return CN_INTERP_WARN;
-			}
-			printf( "(Paused for %u sec)", (unsigned int)s); fflush(stdin);
-			sleep( rintf( s));
-			printf( "\n");
-		} else {
-			printf( "Paused: press Enter ...");
-			getchar();
-		}
-
-	} else {  // feed it to exprparser
-		if ( expr( raw, result, &varlist) ) {
-			report_script_issue( fname, lineno, -1, "%s", expr.status_s());
-			return CN_INTERP_PARSEERROR;
-		}
-		if ( expr.toplevel_op != '=' )
-			lprintf( 0, "%g", result);
-
-		regenerate_var_names = true;
-	}
-
-	return 0;
-}
-
-
-} // inline namespace
-
-
-
-
-int
-cnrun::
-interpreter_run( const char *script_fname, int level, int howstrict,
-		 bool env_import, bool env_export, list<SVariable> &varlist)
-{
-	int retval = 0;
-
-	list<SVariable> our_variables;
-	current_shell_variables = &our_variables;
-
-	if ( env_import ) {
-		our_variables.splice( our_variables.begin(), varlist);
-//		printf( "imported %zu vars\n", our_variables.size());
-	}
-
-	if ( script_fname && strlen(script_fname) > 0 ) {
-		ifstream script_stream( script_fname);
-		if ( !script_stream.good() ) {
-			lprintf( -1, "Failed to open script file \"%s\"", script_fname);
-			return -1;
-		}
-		lprintf( 1, "execing %s\n", script_fname);
-
-		unsigned lineno = 0;
-		string buf;
-		while ( getline( script_stream, buf) || script_stream.gcount() ) {
-			lineno++;
-			if ( buf.size() ) {
-
-				char	*pp = strchr( (char*)buf.c_str(), '#');
-				if ( pp )
-					buf.resize( pp - buf.c_str());
-				size_t	buflen = buf.size();
-				while ( strlen(buf.c_str()) && isspace(buf[buflen-1]) )
-					buf[--buflen] = '\0';
-
-				char	*_stmt = &buf[0],
-					*stmt;
-				while ( _stmt - &buf[0] < (int)buflen && (stmt = strtok( _stmt, ";\n")) ) {
-					_stmt = _stmt + strlen(_stmt)+1;
-
-					retval = do_single_cmd( stmt, our_variables, level, script_fname, lineno);
-					if ( retval == CN_INTERP_EXIT ||
-					     (retval < CN_INTERP_WARN && howstrict == CN_INTRP_STRICT) )
-						break;
-				}
-			}
-		}
-	} else {
-		if ( level == 0 ) {
-			using_history();
-			read_history( CNRUN_HISTORY);
-			rl_attempted_completion_function = cnrun_completion;
-			rl_bind_key( '\t', rl_complete);
-		}
-		HISTORY_STATE *the_history_state = history_get_history_state();
-		if ( the_history_state && the_history_state->length == 0 )
-			printf( "Hit TAB for context completions\n");
-
-		char *buf, prompt[256];
-		while ( snprintf( prompt, 255, "%s @%g%.*s ",
-				  Model ? Model->name.c_str() : "-",
-				  Model ? Model->model_time() : 0,
-				  level+1, "]]]]]]]"),
-			(buf = readline( prompt)) ) {
-
-			the_history_state = history_get_history_state();
-			if ( the_history_state &&
-			     (the_history_state->length < 2 ||
-			      (the_history_state->length > 1 &&
-			       *buf &&
-			       strcmp( history_get( the_history_state->length)->line, buf) != 0) ) ) {
-				add_history( buf);
-			}
-
-			char	*pp = strchr( buf, '#');
-			if ( pp )
-				*pp = '\0';
-			size_t	buflen = strlen( buf);
-			while ( buflen && strchr( " \t", buf[buflen-1]) )
-				buf[--buflen] = '\0';
-			if ( !buflen )
-				continue;
-
-			char	*_stmt = buf,  // will hold the pointer to the next statement
-				*stmt;
-			while ( _stmt - buf < (int)buflen  &&  (stmt = strtok( _stmt, ";\n")) ) {
-				_stmt += (strlen(_stmt) + 1);
-
-				retval = do_single_cmd( stmt, our_variables, level, nullptr, -1);
-				if ( retval == CN_INTERP_EXIT ) {
-					free( buf);
-					goto out;
-				}
-			}
-			free( buf);
-			if ( level == 0 ) {
-//				rl_attempted_completion_function = cnrun_completion;
-				rl_bind_key( '\t', rl_complete);
-			}
-		}
-	out:
-		if ( level == 0 )
-			write_history( CNRUN_HISTORY);
-		printf( "\n");
-	}
-
-	if ( env_export ) {
-	      // update varlist
-		varlist.merge( our_variables);
-		varlist.sort();
-		varlist.unique();
-	}
-	current_shell_variables = &varlist;
-
-	return retval;
-}
-
diff --git a/upstream/src/cnrun/main.cc b/upstream/src/cnrun/main.cc
deleted file mode 100644
index 1d2dd2c..0000000
--- a/upstream/src/cnrun/main.cc
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *
- * License: GPL-2+
- *
- * Initial version: 2008-09-02
- *
- * CNModel runner (main, cmdline parser)
- */
-
-
-#include <cstdarg>
-#include <unistd.h>
-#include <list>
-
-#include "libstilton/exprparser.hh"
-#include "runner.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-using namespace std;
-using namespace cnrun;
-
-
-void
-cnrun::
-lprintf( int level, const char* fmt, ...)
-{
-	if ( level > Options.verbosely ) {
-		va_list ap;
-		va_start (ap, fmt);
-		vprintf( fmt, ap);
-		va_end (ap);
-	}
-}
-
-
-CModel *cnrun::Model;
-
-
-SCNRunnerOptions cnrun::Options;
-const char *ScriptFileName = ""; // CNRUN_DEFAULT_SCRIPT;
-
-
-
-#define CNRUN_CLPARSE_HELP_REQUEST	-1
-#define CNRUN_CLPARSE_ERROR		-2
-#define CNRUN_DUMP_PARAMS		-3
-
-namespace {
-static int parse_options( int argc, char **argv, list<SVariable>&);
-static void usage( const char *argv0);
-}
-
-#define CNRUN_EARGS		-1
-#define CNRUN_ESETUP		-2
-#define CNRUN_ETRIALFAIL	-3
-
-
-
-void print_version( const char* appname);
-
-int
-main( int argc, char *argv[])
-{
-	print_version( "cnrun");
-
-	int retval = 0;
-
-	list<SVariable> Variables;
-	switch ( parse_options( argc, argv, Variables) ) {
-	case CNRUN_CLPARSE_ERROR:
-		fprintf( stderr, "Problem parsing command line or sanitising values; try -h for help\n");
-		return CNRUN_EARGS;
-	case CNRUN_CLPARSE_HELP_REQUEST:
-		usage( argv[0]);
-		return 0;
-	}
-
-      // purely informational, requires no model
-	if ( Options.list_units ) {
-		cnmodel_dump_available_units();
-		return 0;
-	}
-
-      // cd as requested
-	char *pwd = nullptr;
-	if ( Options.working_dir ) {
-		pwd = getcwd( nullptr, 0);
-		if ( chdir( Options.working_dir) ) {
-			fprintf( stderr, "Failed to cd to \"%s\"", Options.working_dir);
-			return CNRUN_EARGS;
-		}
-	}
-
-	__cn_verbosely = Options.verbosely;
-	__cn_default_unit_precision = Options.precision;
-
-	interpreter_run( ScriptFileName, 0, Options.interp_howstrict,
-			 true, true, Variables);
-
-	delete Model;
-
-	if ( pwd )
-		if ( chdir( pwd) )
-			;
-
-	return retval;
-}
-
-
-
-
-namespace {
-int
-parse_options( int argc, char **argv, list<SVariable>& Variables)
-{
-	int	c;
-
-	while ( (c = getopt( argc, argv, "e:t::L:E:g:k:UsC:n:D:v:h")) != -1 )
-		switch ( c ) {
-
-		case 'e':	ScriptFileName = optarg;					break;
-
-		case 't':	switch ( optarg[0] ) {
-				case 'T':	if ( sscanf( optarg+1, "%lg", &Options.integration_dt_max) != 1 ) {
-							fprintf( stderr, "-tT takes a double");
-							return CNRUN_CLPARSE_ERROR;
-						}						break;
-				case 't':	if ( sscanf( optarg+1, "%lg", &Options.integration_dt_min) != 1 ) {
-							fprintf( stderr, "-tt takes a double");
-							return CNRUN_CLPARSE_ERROR;
-						}						break;
-				case 'x':	if ( sscanf( optarg+1, "%lg", &Options.integration_dt_max_cap) != 1 ) {
-							fprintf( stderr, "-tx takes a double");
-							return CNRUN_CLPARSE_ERROR;
-						}						break;
-				default:	fprintf( stderr, "Unrecognised option modifier for -i");
-							return CNRUN_CLPARSE_ERROR;
-				}								break;
-
-		case 'L':	if ( strchr( optarg, 'd') != nullptr )
-					Options.listen_deferwrite = true;
-				if ( strchr( optarg, '1') != nullptr )
-					Options.listen_1varonly = true;
-				if ( strchr( optarg, 'b') != nullptr )
-					Options.listen_binary = true;
-				if ( strchr( optarg, 'L') != nullptr )
-					Options.log_dt = true;				break;
-
-		case 'E':	if ( sscanf( optarg, "%g", &Options.listen_dt) != 1 ) {
-					fprintf( stderr, "-E takes a double");
-					return CNRUN_CLPARSE_ERROR;
-				}						break;
-		case 'g':	if ( sscanf( optarg, "%u", &Options.precision) != 1 ) {
-					fprintf( stderr, "-g takes a short unsigned int");
-					return CNRUN_CLPARSE_ERROR;
-				}						break;
-
-		case 'n':	if ( optarg && *optarg == 'c' )
-					Options.dont_coalesce = true;		break;
-
-		case 'k':	Options.log_spikers = true;
-				switch ( *optarg ) {
-				case '0':	Options.log_spikers_use_serial_id = true;	break;
-				case 'l':	Options.log_spikers_use_serial_id = false;	break;
-				case 'S':	if ( sscanf( optarg+1, "%g", &Options.spike_threshold) != 1 ) {
-							fprintf( stderr, "-kS takes a double");
-							return CNRUN_CLPARSE_ERROR;
-						}
-				default:	fprintf( stderr, "Expecting 0, l, or S<double> after -k");
-						return CNRUN_CLPARSE_ERROR;
-				}						break;
-
-		case 'U':	Options.list_units = true;			break;
-
-
-		case 's':	Options.sort_units = true;			break;
-
-
-		case 'C':	Options.working_dir = optarg;			break;
-
-		case 'D':	{
-					double	unused;
-					CExpression expr;
-					if ( expr( optarg, unused, &Variables) ) {
-						fprintf( stderr, "Malformed variable assignment with -D");
-						return CNRUN_CLPARSE_ERROR;
-					}
-				}
-
-		case 'v':	Options.verbosely = strtol( optarg, nullptr, 10);
-				{	char *p;
-					if ( (p = strchr( optarg, '%')) )
-						Options.display_progress_percent = (*(p+1) != '-');
-					if ( (p = strchr( optarg, 't')) )
-						Options.display_progress_time    = (*(p+1) != '-');
-				}
-										break;
-		case 'h':
-			return CNRUN_CLPARSE_HELP_REQUEST;
-		case '?':
-		default:
-			return CNRUN_CLPARSE_ERROR;
-		}
-
-	return 0;
-}
-
-
-
-
-void
-usage( const char *argv0)
-{
-	cout << "Usage: " << argv0 << "\n" <<
-		" -e <script_fname>\tExecute script\n"
-		" -D \t\t\tDump all unit types in the model and exit\n"
-		" -C <dir>\t\tWork in dir\n"
-		" -s \t\t\tSort units\n"
-		"\n"
-		" -L[d1Lx] \t\tLogging & listeners:\n"
-		"    d \t\t\tdefer writing to disk until done rather than writing continuously\n"
-		"    1\t\t\tonly log the first variable\n"
-		"    x\t\t\twrite in native binary form rather than in ASCII\n"
-		"    L\t\t\tlog integrator dt\n"
-		" -E<double>\t\tListen at this interval (default " << Options.listen_dt << ";\n"
-		"\t\t\t   set to 0 to listen every cycle)\n"
-		"\n"
-		" -kl \t\t\tWrite a model-wide log of spiking neurons, using labels\n"
-		" -k0 \t\t\t... use unit id instead\n"
-		" -kS<double>\t\tSpike detection threshold (default " << Options.spike_threshold << ")\n"
-		"\n"
-		" -e <uint>\t\tSet precision for all output (default " << Options.precision << ")\n"
-		"\n"
-		" -tT <double>\t\tdt_max (default " << Options.integration_dt_max << ")\n"
-		" -tt <double>\t\tdt_min (default " << Options.integration_dt_min << ")\n"
-		" -tx <double>\t\tCap dt by current dt value x this (default " << Options.integration_dt_max_cap << ")\n"
-		"\n"
-		" -D EXPR\t\tAny valid expression, will inject whatever variables get assigned in it\n"
-		"\n"
-		" -v <int>\t\tSet verbosity level (default " << Options.verbosely << ")\n"
-		"\t\t\t Use a negative value to show the progress percentage only,\n"
-		" -v[%[-]t[-]]\t\tDisplay (with -, suppress) progress percent and/or time\n"
-		"\t\t\t indented on the line at 8 x (minus) this value.\n"
-		"\n"
-		" -U \t\t\tList available unit types with parameter names\n"
-		"\t\t\t  and values, and exit\n"
-		" -h \t\t\tDisplay this help\n"
-		"\n";
-}
-} // namespace
-
-
-// EOF
diff --git a/upstream/src/cnrun/runner.hh b/upstream/src/cnrun/runner.hh
deleted file mode 100644
index a98488b..0000000
--- a/upstream/src/cnrun/runner.hh
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *
- * License: GPL-2+
- *
- * Initial version: 2008-11-04
- *
- * CNModel runner
- */
-
-#ifndef CN_RUNNER_H
-#define CN_RUNNER_H
-
-#include <vector>
-#include <list>
-#include <iostream>
-
-#include "libstilton/exprparser.hh"
-
-#include "libcn/model.hh"
-#include "libcn/sources.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-namespace cnrun {
-
-enum {
-	CN_INTRP_STRICT,
-	CN_INTRP_LOOSE
-};
-
-struct SCNRunnerOptions {
-	bool	listen_1varonly:1,
-		listen_deferwrite:1,
-		listen_binary:1,
-		dump_params:1,
-		list_units:1,
-		sort_units:1,
-		log_dt:1,
-		log_spikers:1,
-		log_spikers_use_serial_id:1,
-		log_sdf:1,
-		display_progress_percent:1,
-		display_progress_time:1,
-		dont_coalesce:1;
-	const char *working_dir;
-	unsigned precision;
-	float	spike_threshold,
-		spike_lapse,
-		listen_dt;
-	double	//discrete_dt,
-		integration_dt_max,
-		integration_dt_min,
-		integration_dt_max_cap;
-	float	sxf_start_delay,
-		sxf_sample,
-		sdf_sigma;
-	int	interp_howstrict,
-		verbosely;
-
-	SCNRunnerOptions()
-	      : listen_1varonly (true), listen_deferwrite (false), listen_binary (false),
-		dump_params(false), list_units (false),
-		sort_units (false),
-		log_dt (false),
-		log_spikers (false), log_spikers_use_serial_id (false),
-		log_sdf (false),
-		display_progress_percent (true),
-		display_progress_time (false),
-		dont_coalesce (false),
-		working_dir ("."),
-		precision (8),
-		spike_threshold (0.), spike_lapse (3.),
-		listen_dt(1.),
-		//discrete_dt(.5),
-		integration_dt_max (.5), integration_dt_min (1e-5), integration_dt_max_cap (5.),
-		sxf_start_delay (0.), sxf_sample (0.), sdf_sigma (0.),
-		interp_howstrict (CN_INTRP_LOOSE),
-		verbosely (1)
-		{}
-};
-
-#define CNRUN_DEFAULT_SCRIPT "./Default.cnsh"
-
-extern SCNRunnerOptions Options;
-
-
-
-extern CModel *Model;
-
-int interpreter_run( const char *script_fname, int level, int howstrict,
-		     bool env_import, bool env_export, list<cnrun::SVariable> &varlist);
-
-
-enum {
-	CNCMD__noop = -1,
-	CNCMD_new_model = 0,
-	CNCMD_load_nml,
-	CNCMD_merge_nml,
-	CNCMD_add_neuron,
-	CNCMD_add_synapse,
-	CNCMD_reset,
-	CNCMD_reset_revert_params,
-	CNCMD_reset_state_units,
-	CNCMD_advance_until,
-	CNCMD_advance,
-	CNCMD_putout,
-	CNCMD_decimate,
-	CNCMD_start_listen,
-	CNCMD_stop_listen,
-	CNCMD_listen_dt,
-	CNCMD_listen_mode,
-	CNCMD_integration_dt_min,
-	CNCMD_integration_dt_max,
-	CNCMD_integration_dt_cap,
-	CNCMD_start_log_spikes,
-	CNCMD_stop_log_spikes,
-	CNCMD_set_sxf_params,
-	CNCMD_new_source,
-	CNCMD_show_sources,
-	CNCMD_connect_source,
-	CNCMD_disconnect_source,
-	CNCMD_set_parm_neuron,
-	CNCMD_set_parm_synapse,
-	CNCMD_cull_deaf_synapses,
-	CNCMD_describe_model,
-	CNCMD_show_units,
-	CNCMD_exec,
-	CNCMD_verbosity,
-	CNCMD_show_vars,
-	CNCMD_clear_vars,
-	CNCMD_pause,
-	CNCMD_exit
-};
-
-extern const char* const cnrun_cmd[];
-
-extern bool regenerate_unit_labels;
-extern bool regenerate_var_names;
-extern bool regenerate_source_ids;
-
-char** cnrun_completion( const char *text, int start, int end);
-
-
-extern list<cnrun::SVariable> *current_shell_variables;
-
-
-void lprintf( int level, const char* fmt, ...) __attribute__ ((format (printf, 2, 3)));
-
-
-}  // namespace cnrun
-
-#endif
-
-// Local Variables:
-// Mode: c++
-// indent-tabs-mode: nil
-// tab-width: 8
-// c-basic-offset: 8
-// End:
diff --git a/upstream/src/libcn/Makefile.am b/upstream/src/libcn/Makefile.am
deleted file mode 100644
index 7679188..0000000
--- a/upstream/src/libcn/Makefile.am
+++ /dev/null
@@ -1,45 +0,0 @@
-include $(top_srcdir)/src/Common.mk
-
-pkglib_LTLIBRARIES = libcn.la
-
-libcn_la_SOURCES = \
-	sources.cc \
-	types.cc \
-	base-unit.cc \
-	standalone-neurons.cc \
-	standalone-synapses.cc \
-	hosted-neurons.cc \
-	hosted-synapses.cc \
-	model-struct.cc \
-	model-cycle.cc \
-	model-nmlio.cc \
-	sources.hh \
-	types.hh \
-	param-unit-literals.hh \
-	mx-attr.hh \
-	base-unit.hh	standalone-attr.hh    	hosted-attr.hh \
-	base-synapse.hh	standalone-neurons.hh 	hosted-neurons.hh  \
-	base-neuron.hh	standalone-synapses.hh	hosted-synapses.hh \
-	model.hh \
-	integrate-base.hh integrate-rk65.hh
-
-libcn_la_LDFLAGS = \
-	-avoid-version \
-	-rpath $(libdir)/$(PACKAGE) \
-	-shared -module
-
-
-if DO_PCH
-BUILT_SOURCES = \
-	sources.hh.gch \
-	types.hh.gch \
-	param-unit-literals.hh.gch \
-	mx-attr.hh.gch \
-	base-unit.hh.gch	standalone-attr.hh.gch    	hosted-attr.hh.gch \
-	base-synapse.hh.gch	standalone-neurons.hh.gch 	hosted-neurons.hh.gch  \
-	base-neuron.hh.gch	standalone-synapses.hh.gch	hosted-synapses.hh.gch \
-	model.hh.gch \
-	integrate-base.hh.gch integrate-rk65.hh.gch
-
-CLEANFILES = $(BUILT_SOURCES)
-endif
diff --git a/upstream/src/libcn/base-neuron.hh b/upstream/src/libcn/base-neuron.hh
deleted file mode 100644
index 47a6ee0..0000000
--- a/upstream/src/libcn/base-neuron.hh
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
- *
- * License: GPL-2+
- *
- * Initial version: 2009-03-31
- *
- */
-
-
-#ifndef LIBCN_BASE_NEURON_H
-#define LIBCN_BASE_NEURON_H
-
-#include <list>
-#include <cstring>
-#include <cmath>
-#include <map>
-#include <numeric>
-
-#include "base-unit.hh"
-#include "base-synapse.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-
-using namespace std;
-
-namespace cnrun {
-
-#define CN_MIN_DISTANCE .1
-
-
-
-
-class CModel;
-struct SSpikeloggerService;
-
-
-typedef map<C_BaseSynapse*, double> SCleft;
-inline double operator+ ( double a, const pair<C_BaseSynapse*, double>& b) { return a + b.second; }
-
-class C_BaseNeuron
-  : public C_BaseUnit {
-
-	struct SCoord {
-		double x, y, z;
-
-		SCoord( const double& inx, const double& iny, const double& inz)
-		      : x (inx), y (iny), z (inz)
-			{}
-
-	      // distance
-		double operator - ( const SCoord &p)
-			{
-				return sqrt( pow(x - p.x, 2) + pow(y - p.y, 2) + pow(z - p.z, 2));
-			}
-
-		bool operator == ( const SCoord &p) const
-			{
-				return x == p.x && y == p.y && z == p.z;
-			}
-		bool operator != ( const SCoord &p) const
-			{
-				return x != p.x || y != p.y || z != p.z;
-			}
-		bool too_close( const SCoord& p, double mindist = CN_MIN_DISTANCE) const
-			{
-				return	fabs(x - p.x) < mindist &&
-					fabs(y - p.y) < mindist &&
-					fabs(z - p.z) < mindist;
-			}
-	};
-
-    friend class CModel;
-    friend class C_BaseSynapse;
-
-    protected:
-	C_BaseNeuron();
-
-	SCleft	_dendrites;
-	list<C_BaseSynapse*>
-		_axonal_harbour;
-    public:
-	SCoord	pos;
-
-	size_t axonal_conns() const	{ return _axonal_harbour.size(); }
-	size_t dendrites() const	{ return _dendrites.size(); }
-
-	bool connects_to( const C_BaseNeuron &to) const __attribute__ ((pure));
-	C_BaseSynapse *connects_via( C_BaseNeuron &to,
-				     SCleft::mapped_type *g_ptr = nullptr) const;
-
-    protected:
-	C_BaseNeuron( TUnitType intype, const char *inlabel,
-		      double inx, double iny, double inz,
-		      CModel* inM, int s_mask = 0)
-	      : C_BaseUnit (intype, inlabel, inM, s_mask),
-		pos (inx, iny, inz),
-		_spikelogger_agent (nullptr)
-		{}
-
-	virtual ~C_BaseNeuron();
-
-    public:
-	void reset_state();
-
-      // even though for rate-based neurons, E is not meaningful
-      // leave these here to make the method available to synapses wanting _target-E
-	virtual double E() const
-		{  return __cn_dummy_double;  }
-	virtual double E( vector<double>&) const
-		{  return __cn_dummy_double;  }
-      // likewise, for those needing _source->F
-	virtual double F() const
-		{  return __cn_dummy_double;  }
-	virtual double F( vector<double>&) const
-		{  return __cn_dummy_double;  }
-
-	// struct __SCleft_second_plus {
-	// 	double operator() ( double a, const SCleft::value_type &i) { return a + i.second; }
-	// };
-	double Isyn() const  // is the sum of Isyn() on all dendrites
-		{
-			double I = 0.;
-			for ( auto &Y : _dendrites )
-				I += Y.first->Isyn(*this, Y.second);
-			return I;
-		}
-
-	double Isyn( vector<double> &x) const  // an honourable mention
-		{
-			double I = 0.;
-			for ( auto &Y : _dendrites )
-				I += Y.first->Isyn(x, *this, Y.second);
-			return I;
-		}
-
-	virtual void possibly_fire()
-		{}
-
-      // Even though rate-based neurons do not track individual spikes,
-      // we can estimate a probability of such a neuron spiking as F*dt*rand().
-      // Which makes this method a valid one
-
-      // Note this assumes P[0] is F for all rate-based neurons, and E
-      // for those conductance-based, which by now is hard-coded for all neurons.
-	virtual unsigned n_spikes_in_last_dt() const
-		{  return 0;  }
-	virtual void do_detect_spike_or_whatever()
-		{}
-
-    protected:
-	SSpikeloggerService *_spikelogger_agent;
-
-    public:
-	SSpikeloggerService* spikelogger_agent()  { return _spikelogger_agent;  }
-	SSpikeloggerService* enable_spikelogging_service( int s_mask = 0);
-	SSpikeloggerService* enable_spikelogging_service( double sample_period, double sigma, double from = 0.,
-							  int s_mask = 0);
-	void disable_spikelogging_service();
-	void sync_spikelogging_history();
-
-	double distance_to( C_BaseNeuron*) const; // will do on demand
-
-	void dump( bool with_params = false, FILE *strm = stdout) const;
-};
-
-
-
-
-
-#define CN_KL_COMPUTESDF	(1 << 0)
-#define CN_KL_ISSPIKINGNOW	(1 << 1)
-#define CN_KL_PERSIST		(1 << 2)  // should not be deleted at disable_spikelogging_service
-#define CN_KL_IDLE		(1 << 3)  // should not be placed on spikelogging_neu_list on enable_spikelogging_service
-
-
-struct SSpikeloggerService {
-
-	friend class C_BaseNeuron;
-	friend class C_HostedConductanceBasedNeuron;  // accesses _status from do_spikelogging_or_whatever
-	friend class COscillatorDotPoisson;  // same
-	friend class COscillatorPoisson;  // same
-	friend class CModel;  // checks CN_KL_IDLE in include_unit
-    private:
-	SSpikeloggerService();
-
-	int _status;
-
-    public:
-	SSpikeloggerService( C_BaseNeuron *client,
-			     int s_mask = 0)
-	      : _status (s_mask & ~CN_KL_COMPUTESDF),
-		_client (client),
-		t_last_spike_start (-INFINITY), t_last_spike_end (-INFINITY),
-		sample_period (42), sigma (42), start_delay (0.)
-		{}
-	SSpikeloggerService( C_BaseNeuron *client,
-			     double insample_period, double insigma, double instart_delay = 0.,
-			     int s_mask = 0)
-	      : _status (s_mask | CN_KL_COMPUTESDF),
-		_client (client),
-		t_last_spike_start (-INFINITY), t_last_spike_end (-INFINITY),
-		sample_period (insample_period), sigma (insigma), start_delay (instart_delay)
-		{}
-
-	C_BaseNeuron *_client;
-
-	double	t_last_spike_start,
-		t_last_spike_end;
-
-	size_t n_spikes_since( double since = 0.) const __attribute__ ((pure));
-
-	double	sample_period,
-		sigma,
-		start_delay;
-
-//	void spike_detect();  // multiplexing units will have a different version
-	// replaced by do_spikelogging_or_whatever on the client side
-
-	vector<double> spike_history;
-
-	void reset()
-		{
-			_status &= ~CN_KL_ISSPIKINGNOW;
-			t_last_spike_start = t_last_spike_end
-				/*= t_firing_started = t_firing_ended */ = -INFINITY;
-			spike_history.clear();
-		}
-
-    protected:
-	void sync_history();
-
-    public:
-      // spike density function
-	double sdf( double at, double sample_length, double sigma, unsigned* nspikes = nullptr) const;
-      // spike homogeneity function
-	double shf( double at, double sample_length) const;
-
-      // why not allow custom sampling?
-	size_t get_sxf_vector_custom( vector<double> *sdf_buf, vector<double> *shf_buf, vector<unsigned> *nsp_buf,
-			       double sample_period_custom, double sigma_custom,
-			       double from = 0., double to = 0.) const; // "to == 0." for model_time()
-	size_t get_sxf_vector( vector<double> *sdf_buf, vector<double> *shf_buf, vector<unsigned> *nsp_buf,
-			       double from = 0., double to = 0.) const
-		{
-			return get_sxf_vector_custom( sdf_buf, shf_buf, nsp_buf,
-						      sample_period, sigma,
-						      from, to);
-		}
-};
-
-
-
-
-inline void
-C_BaseNeuron::reset_state()
-{
-	C_BaseUnit::reset_state();
-	if ( _spikelogger_agent )
-		_spikelogger_agent->reset();
-}
-
-
-
-inline void
-C_BaseNeuron::sync_spikelogging_history()
-{
-	if ( _spikelogger_agent )
-		_spikelogger_agent->sync_history();
-}
-
-
-
-inline double
-C_BaseSynapse::g_on_target( const C_BaseNeuron &which) const
-{
-	return (find( _targets.begin(), _targets.end(), &which) != _targets.end())
-		? which._dendrites.at(const_cast<C_BaseSynapse*>(this)) : __cn_dummy_double;
-}
-inline void
-C_BaseSynapse::set_g_on_target( C_BaseNeuron &which, double g)
-{
-	if ( find( _targets.begin(), _targets.end(), &which) != _targets.end() )
-		which._dendrites[this] = g;
-}
-
-
-}
-
-#endif
-
-// EOF
diff --git a/upstream/src/libcn/base-synapse.hh b/upstream/src/libcn/base-synapse.hh
deleted file mode 100644
index 286b5b1..0000000
--- a/upstream/src/libcn/base-synapse.hh
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
- *
- * License: GPL-2+
- *
- * Initial version: 2009-03-31
- *
- * Synapse units: alpha-beta
- */
-
-
-#ifndef LIBCN_BASE_SYNAPSE_H
-#define LIBCN_BASE_SYNAPSE_H
-
-#include <cmath>
-#include <vector>
-#include <list>
-#include <map>
-#include <algorithm>
-
-#include "base-unit.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-using namespace std;
-
-
-namespace cnrun {
-
-class C_BaseSynapse;
-class C_BaseNeuron;
-class CModel;
-
-
-typedef map<C_BaseSynapse*, double> SCleft;
-
-class C_BaseSynapse
-  : public C_BaseUnit {
-
-    friend class CModel;
-    friend class C_BaseNeuron;
-
-    protected:
-	C_BaseSynapse(); // not constructible without parameters
-
-	C_BaseNeuron	*_source;
-	list<C_BaseNeuron*>
-			_targets;
-	typedef list<C_BaseNeuron*>::iterator lni;
-	typedef list<C_BaseNeuron*>::reverse_iterator lnri;
-	typedef list<C_BaseNeuron*>::const_iterator lnci;
-	bool has_target( const C_BaseNeuron *tgt) __attribute__ ((pure))
-		{
-			return find( _targets.begin(), _targets.end(), tgt) != _targets.end();
-		}
-
-	double t_last_release_started;
-
-    public:
-	C_BaseNeuron
-		*source()	{  return _source;  }
-
-	double g_on_target( const C_BaseNeuron &which) const;
-	void set_g_on_target( C_BaseNeuron &which, double g);
-
-	C_BaseSynapse *clone_to_target( C_BaseNeuron *nt, double g);
-	C_BaseSynapse *make_clone_independent( C_BaseNeuron *target);
-
-    protected:
-	C_BaseSynapse( TUnitType intype,
-		       C_BaseNeuron *insource, C_BaseNeuron *intarget,
-		       double ing, CModel *inM, int s_mask = 0);
-	virtual ~C_BaseSynapse();
-
-    public:
-	void reset_state()
-		{
-			C_BaseUnit::reset_state();
-			t_last_release_started = -INFINITY;
-		}
-
-	virtual double Isyn( const C_BaseNeuron &with_neuron, double g) const = 0;
-	virtual double Isyn( vector<double> &base, const C_BaseNeuron &with_neuron, double g) const = 0;
-	// no gsyn known to the synapse: now C_BaseNeuron::SCleft knows it
-
-	void dump( bool with_params = false, FILE *strm = stdout) const;
-
-    private:
-	virtual void update_queue()
-		{}
-};
-
-}
-
-#endif
-
-// EOF
diff --git a/upstream/src/libcn/base-unit.cc b/upstream/src/libcn/base-unit.cc
deleted file mode 100644
index 56ec66d..0000000
--- a/upstream/src/libcn/base-unit.cc
+++ /dev/null
@@ -1,681 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
- *
- * License: GPL-2+
- *
- * Initial version: 2008-08-02
- *
- */
-
-
-#include <fcntl.h>
-#include <unistd.h>
-#include <iostream>
-#include <limits>
-#include <algorithm>
-
-#include <gsl/gsl_statistics_double.h>
-
-#include "base-unit.hh"
-#include "model.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-using namespace std;
-
-
-
-double cnrun::__cn_dummy_double;
-
-unsigned short cnrun::__cn_default_unit_precision = 8;
-
-int cnrun::__cn_verbosely = 2;
-
-
-
-// mind that child
-
-cnrun::C_BaseUnit::
-C_BaseUnit( TUnitType intype, const char *inlabel,
-	    CModel* inM, int s_mask)
-      : _type (intype), _status (0 |/* CN_UENABLED |*/ s_mask),
-	M (inM),
-	_binwrite_handle (-1), _listener_disk (nullptr), _listener_mem (nullptr),
-	precision (__cn_default_unit_precision)
-{
-	memset( _label, 0, CN_MAX_LABEL_SIZE);
-	if ( inlabel ) {
-		strncpy( _label, inlabel, CN_MAX_LABEL_SIZE);
-		if ( inM && inM->unit_by_label( _label) ) {
-			fprintf( stderr, "Model %s already has a unit labelled \"%s\"\n", inM->name.c_str(), _label);
-			_status |= CN_UERROR;
-		}
-	} else
-		snprintf( _label, CN_MAX_LABEL_SIZE-1, "fafa%p", this);
-
-	reset_params();
-	// don't have field idx to do reset_vars() safely
-}
-
-
-
-void
-cnrun::C_BaseUnit::
-reset_state()
-{
-	if ( M && M->verbosely > 3 )
-		fprintf( stderr, "Resetting \"%s\"\n", _label);
-	reset_vars();
-	if ( is_listening() )
-		restart_listening();
-}
-
-
-int
-cnrun::C_BaseUnit::
-param_idx_by_sym( const char *sym) const
-{
-	for ( int i = 0; i < p_no(); ++i )
-		if ( strcmp( sym, __CNUDT[_type].stock_param_syms[i]) == 0 )
-			return i;
-	return -1;
-}
-
-int
-cnrun::C_BaseUnit::
-var_idx_by_sym( const char *sym) const
-{
-	for ( unsigned short i = 0; i < v_no(); ++i )
-		if ( strcmp( sym, __CNUDT[_type].stock_var_syms[i]) == 0 )
-			return i;
-	return -1;
-}
-
-
-
-
-
-
-
-
-void
-cnrun::C_BaseUnit::
-start_listening( int mask)
-{
-	if ( !M ) {
-		fprintf( stderr, "start_listening() called for an unattached unit \"%s\"\n", _label);
-		return;
-	}
-	if ( _listener_disk || _listener_mem || _binwrite_handle != -1 ) { // listening already; check if user wants us to listen differently
-		if ( (_status | (mask & (CN_ULISTENING_DISK | CN_ULISTENING_MEM | CN_ULISTENING_BINARY | CN_ULISTENING_1VARONLY | CN_ULISTENING_DEFERWRITE)))
-		     != mask ) {
-			stop_listening();  // this will nullptrify _listener_{mem,disk}, avoiding recursion
-			start_listening( mask);
-			if ( M->verbosely > 4 )
-				fprintf( stderr, "Unit \"%s\" was already listening\n", _label);
-			return;
-		}
-	}
-
-      // deferred write implies a mem listener
-	if ( mask & CN_ULISTENING_DEFERWRITE && !(mask & CN_ULISTENING_MEM) )
-		mask |= CN_ULISTENING_MEM;
-
-	if ( mask & CN_ULISTENING_MEM )
-		_listener_mem = new vector<double>;
-
-	if ( mask & CN_ULISTENING_DISK ) {
-		if ( M->_status & CN_MDL_DISKLESS )
-			fprintf( stderr, "Cannot get Unit \"%s\" to listen to disk in a diskless model\n", _label);
-		else {
-			_listener_disk = new ofstream( (string(_label)+".var").c_str(), ios_base::trunc);
-			_listener_disk->precision( precision);
-
-			*_listener_disk << "# " << _label << " variables\n#<time>";
-			if ( mask & CN_ULISTENING_1VARONLY )
-				*_listener_disk << "\t<" << var_sym(0) << ">";
-			else
-				for ( unsigned short v = 0; v < v_no(); ++v )
-					*_listener_disk << "\t<" << var_sym(v) << ">";
-			*_listener_disk << endl;
-			if ( M->verbosely > 4 )
-				fprintf( stderr, "Unit \"%s\" now listening\n", _label);
-		}
-	}
-
-	if ( mask & CN_ULISTENING_BINARY )
-		_binwrite_handle = open( (string(_label)+".varx").c_str(), O_WRONLY|O_CREAT|O_TRUNC, S_IRUSR | S_IWUSR);
-
-	_status |= (mask & (CN_ULISTENING_DISK | CN_ULISTENING_MEM | CN_ULISTENING_BINARY |
-			    CN_ULISTENING_1VARONLY | CN_ULISTENING_DEFERWRITE));
-
-      // inform the model
-	M->register_listener( this);
-}
-
-
-void
-cnrun::C_BaseUnit::
-stop_listening()
-{
-      // do deferred write
-	if ( _status & CN_ULISTENING_DEFERWRITE && _listener_mem ) {
-		if ( _listener_disk ) {
-			for ( auto mI = _listener_mem->begin(); mI != _listener_mem->end(); ) {
-				*_listener_disk << *(mI++);
-				if ( _status & CN_ULISTENING_1VARONLY )
-					*_listener_disk << "\t" << *(mI++);
-				else
-					for ( size_t v = 0; v < v_no(); ++v )
-						*_listener_disk << "\t" << *(mI++);
-				*_listener_disk << endl;
-			}
-		}
-		if ( _binwrite_handle != -1 )
-			if ( write( _binwrite_handle, _listener_mem->data(),
-				    sizeof(double) * _listener_mem->size()) < 1 )
-				fprintf( stderr, "write() failed on \"%s.varx\"\n", _label);
-	}
-
-	if ( _listener_mem ) {
-		delete _listener_mem;
-		_listener_mem = nullptr;
-	}
-
-	if ( _listener_disk ) {
-		_listener_disk->close();
-		delete _listener_disk;
-		_listener_disk = nullptr;
-	}
-
-	if ( _binwrite_handle != -1 ) {
-		close( _binwrite_handle);
-		_binwrite_handle = -1;
-	}
-
-	_status &= ~(CN_ULISTENING_MEM | CN_ULISTENING_DISK | CN_ULISTENING_BINARY);
-
-	if ( M )
-		M->unregister_listener( this);
-	if ( M->verbosely > 4 )
-		fprintf( stderr, "Unit \"%s\" not listening now\n", _label);
-
-}
-
-
-
-
-void
-cnrun::C_BaseUnit::
-tell()
-{
-	if ( _binwrite_handle != -1 && !(_status & CN_ULISTENING_DEFERWRITE) ) {
-		if ( write( _binwrite_handle, &M->V[0], sizeof(double)) < 1 ||
-		     write( _binwrite_handle, &var_value(0),
-			    sizeof(double) * ((_status & CN_ULISTENING_1VARONLY) ? 1 : v_no())) < 1 )
-			fprintf( stderr, "write() failed in tell() for \"%s\"\n", _label);
-	}
-
-	if ( _listener_disk && !(_status & CN_ULISTENING_DEFERWRITE) ) {
-		*_listener_disk << model_time();
-		if ( _status & CN_ULISTENING_1VARONLY )
-			*_listener_disk << "\t" << var_value(0);
-		else
-			for ( size_t v = 0; v < v_no(); ++v )
-				*_listener_disk << "\t" << var_value(v);
-		*_listener_disk << endl;
-	}
-
-	if ( _listener_mem ) {
-//		_listener_mem->push_back( 999);
-		_listener_mem->push_back( model_time());
-		if ( _status & CN_ULISTENING_1VARONLY )
-			_listener_mem->push_back( var_value(0));
-		else
-			for ( size_t v = 0; v < v_no(); ++v )
-				_listener_mem->push_back( var_value(v));
-	}
-}
-
-
-
-
-
-
-void
-cnrun::C_BaseUnit::
-dump( bool with_params, FILE *strm) const
-{
-	fprintf( strm, "[%lu] (%s) \"%s\"\n", _serial_id, species(), _label);
-
-	if ( with_params ) {
-		fprintf( strm, "    Pp: ");
-		for ( size_t p = 0; p < p_no(); ++p )
-			if ( *param_sym(p) != '.' || M->verbosely > 5 )
-				fprintf( strm, "%s = %g; ", param_sym(p), get_param_value(p));
-		fprintf( strm, "\n");
-	}
-	fprintf( strm, "    Vv: ");
-	for ( size_t v = 0; v < v_no(); ++v )
-		if ( *var_sym(v) != '.' || M->verbosely > 5 )
-			fprintf( strm, "%s = %g; ", var_sym(v), get_var_value(v));
-	fprintf( strm, "\n");
-
-	if ( sources.size() ) {
-		fprintf( strm, "   has sources:  ");
-		for ( auto &S : sources )
-			fprintf( strm, "%s << %s;  ",
-				 (S.sink_type == SINK_PARAM) ? param_sym(S.idx) : var_sym(S.idx),
-				 S.source->name.c_str());
-		fprintf( strm, "\n");
-	}
-
-	if ( is_listening() ) {
-		fprintf( strm, "   listening to %s%s%s\n",
-			 _listener_mem ? "mem" : "",
-			 _listener_mem && _listener_disk ? ", " : "",
-			 _listener_disk ? "disk" : "");
-	}
-}
-
-
-
-
-
-
-// source interface
-
-void
-cnrun::C_BaseUnit::
-detach_source( C_BaseSource *s, TSinkType sink_type, unsigned short idx)
-{
-	list <SSourceInterface <C_BaseSource> >::iterator K;
-	while ( (K = find( sources.begin(), sources.end(),
-			   SSourceInterface<C_BaseSource> (s, sink_type, idx))) != sources.end() )
-		sources.erase( K);
-	M->unregister_unit_with_sources(this);
-}
-
-
-void
-cnrun::C_BaseUnit::
-apprise_from_sources()
-{
-	for ( auto &S : sources )
-		switch ( S.sink_type ) {
-		case SINK_PARAM:
-//			printf( "apprise_from_sources() for %s{%d} = %g\n", _label, S->idx, (*S->source)( model_time()));
-			param_value( S.idx) = (*S.source)( model_time());
-			param_changed_hook();
-		    break;
-		case SINK_VAR:
-			var_value( S.idx) = (*S.source)( model_time());
-		    break;
-		}
-}
-
-cnrun::C_BaseUnit::
-~C_BaseUnit()
-{
-	if ( M && M->verbosely > 5 )
-		fprintf( stderr, "   deleting base unit \"%s\"\n", _label);
-
-	if ( is_listening() ) {
-		stop_listening();
-		if ( M && M->model_time() == 0. )
-		      // nothing has been written yet, delete the files on disk
-			unlink( (string(_label) + ".var").c_str());
-	}
-	if ( M )
-		M->exclude_unit( this, false);
-}
-
-
-
-
-
-
-
-
-// ----- C_BaseNeuron
-
-
-bool
-cnrun::C_BaseNeuron::
-connects_to( const C_BaseNeuron &to) const
-{
-	for ( auto &A : _axonal_harbour )
-		if ( A->has_target( &to) )
-			return true;
-	return false;
-}
-
-cnrun::C_BaseSynapse*
-cnrun::C_BaseNeuron::
-connects_via( C_BaseNeuron &to,
-	      SCleft::mapped_type *g_ptr) const
-{
-	for ( auto &A : _axonal_harbour )
-		if ( A->has_target( &to) ) {
-			if ( g_ptr ) *g_ptr = to._dendrites[A];
-			return A;
-		}
-	if ( g_ptr ) *g_ptr = NAN;
-	return nullptr;
-}
-
-
-
-
-
-
-void
-cnrun::C_BaseNeuron::
-dump( bool with_params, FILE *strm) const
-{
-	C_BaseUnit::dump( with_params);
-	if ( _spikelogger_agent && !(_spikelogger_agent->_status & CN_KL_IDLE) )
-		fprintf( strm, "   logging spikes at %g:%g\n", _spikelogger_agent->sample_period, _spikelogger_agent->sigma);
-	fprintf( strm, "\n");
-
-}
-
-
-
-
-
-
-
-
-cnrun::C_BaseNeuron::
-~C_BaseNeuron()
-{
-	if ( M && M->verbosely > 4 )
-		fprintf( stderr, "  deleting base neuron \"%s\"\n", _label);
-
-      // kill all efferents
-	for ( auto Y = _axonal_harbour.rbegin(); Y != _axonal_harbour.rend(); ++Y ) {
-		(*Y) -> _source = nullptr;
-		delete (*Y);
-	}
-      // unlink ourselves from all afferents
-	for ( auto Y = _dendrites.rbegin(); Y != _dendrites.rend(); ++Y )
-		Y->first->_targets.remove( this);
-
-	if ( _spikelogger_agent ) {
-		if ( M && !(_spikelogger_agent->_status & CN_KL_IDLE) )
-			M->unregister_spikelogger( this);
-		delete _spikelogger_agent;
-		_spikelogger_agent = nullptr;
-	}
-}
-
-
-
-
-
-
-// --- SSpikeloggerService
-
-double
-cnrun::SSpikeloggerService::
-sdf( double at, double sample_width, double sigma, unsigned *nspikes) const
-{
-	if ( nspikes )
-		*nspikes = 0;
-
-	double	dt,
-		result = 0.;
-	for ( auto &T : spike_history ) {
-		dt = T - at;
-		if ( dt < -sample_width/2. )
-			continue;
-		if ( dt >  sample_width/2. )
-			break;
-		if ( nspikes )
-			(*nspikes)++;
-		result += exp( -dt*dt/(sigma * sigma));
-	}
-	return result;
-}
-
-
-
-double
-cnrun::SSpikeloggerService::
-shf( double at, double sample_width) const
-{
-	double	dt,
-		last_spike_at;
-	vector<double> intervals;
-	bool	counted_one = false;
-	for ( auto &T : spike_history ) {
-		dt = T - at;
-		if ( dt < -sample_width/2. )
-			continue;
-		if ( dt >  sample_width/2. )
-			break;
-
-		if ( counted_one )
-			intervals.push_back( last_spike_at - T);
-		else
-			counted_one = true;
-
-		last_spike_at = T;
-	}
-
-	return (intervals.size() < 3) ? 0 : gsl_stats_sd( intervals.data(), 1, intervals.size());
-}
-
-
-
-
-
-size_t
-cnrun::SSpikeloggerService::
-get_sxf_vector_custom( vector<double> *sdf_buffer, vector<double> *shf_buffer,
-		       vector<unsigned> *nspikes_buffer,
-		       double sample_period_custom, double sigma_custom,
-		       double from, double to) const
-{
-	if ( to == 0. )
-		to = _client->M->model_time();
-
-	if ( sdf_buffer )	sdf_buffer->clear();
-	if ( shf_buffer )	shf_buffer->clear();
-	if ( nspikes_buffer)	nspikes_buffer->clear();
-
-	for ( double t = from; t <= to; t += sample_period_custom ) {
-		unsigned nspikes = 0;
-		double	sdf_value = sdf( t, sample_period_custom, sigma_custom, &nspikes);
-		if ( sdf_buffer )	sdf_buffer->push_back( sdf_value);
-		if ( shf_buffer )	shf_buffer->push_back( shf( t, sample_period_custom));
-		if ( nspikes_buffer )	nspikes_buffer->push_back( nspikes);
-	}
-
-	return (to - from) / sample_period_custom;
-}
-
-
-
-
-
-
-
-void
-cnrun::SSpikeloggerService::
-sync_history()
-{
-	if ( !_client->M || (_client->M && _client->M->_status & CN_MDL_DISKLESS) )
-		return;
-
-	ofstream spikecnt_strm( (string(_client->_label) + ".spikes").c_str());
-	spikecnt_strm.precision( _client->precision);
-	spikecnt_strm << "#spike time\n";
-
-	for ( auto &V : spike_history )
-		spikecnt_strm << V << endl;
-
-	if ( _status & CN_KL_COMPUTESDF ) {
-		ofstream sdf_strm( (string(_client->_label) + ".sxf").c_str());
-		sdf_strm.precision( _client->precision);
-		sdf_strm << "#<time>\t<sdf>\t<shf>\t<nspikes>\n";
-
-		vector<double> sdf_vector, shf_vector;
-		vector<unsigned> nspikes_vector;
-		get_sxf_vector( &sdf_vector, &shf_vector, &nspikes_vector,
-				start_delay, 0);
-
-		double t = start_delay;
-		for ( size_t i = 0; i < sdf_vector.size(); ++i, t += sample_period )
-			sdf_strm << t << "\t"
-				 << sdf_vector[i] << "\t"
-				 << shf_vector[i] << "\t"
-				 << nspikes_vector[i] << endl;
-	}
-}
-
-
-
-
-
-size_t
-cnrun::SSpikeloggerService::
-n_spikes_since( double since) const
-{
-	size_t i = 0;
-	for ( auto &K : spike_history )
-		if ( K > since )
-			return spike_history.size() - i++;
-	return 0;
-}
-
-
-
-
-
-// ----- CSynapse
-
-
-cnrun::C_BaseSynapse::
-C_BaseSynapse( TUnitType intype,
-	       C_BaseNeuron *insource, C_BaseNeuron *intarget,
-	       double ing, CModel *inM, int s_mask)
-      : C_BaseUnit (intype, "overwrite-me", inM, s_mask),
-	_source (insource),
-	t_last_release_started (-INFINITY)
-{
-	if ( M && M->verbosely > 5 )
-		printf( "Creating a \"%s\" base synapse\n", species());
-	_targets.push_back( intarget);
-	intarget->_dendrites[this] = ing;
-	_source->_axonal_harbour.push_back( this);
-	snprintf( _label, CN_MAX_LABEL_SIZE-1, "%s:1", _source->_label);
-}
-
-
-
-
-
-
-cnrun::C_BaseSynapse*
-cnrun::C_BaseSynapse::
-clone_to_target( C_BaseNeuron *tgt, double g)
-{
-      // check if we have no existing connection already to tgt
-	if ( find( _targets.begin(), _targets.end(), tgt) != _targets.end() ) {
-		fprintf( stderr, "Neuron \"%s\" already synapsing onto \"%s\"\n",
-			 _source->_label, tgt->_label);
-			return nullptr;
-		}
-
-	tgt -> _dendrites[this] = g;
-	_targets.push_back( tgt);
-
-	snprintf( _label, CN_MAX_LABEL_SIZE-1, "%s:%zu", _source->_label, _targets.size());
-
-	return this;
-}
-
-
-
-
-cnrun::C_BaseSynapse*
-cnrun::C_BaseSynapse::
-make_clone_independent( C_BaseNeuron *tgt)
-{
-	double g = g_on_target( *tgt);
-	if ( !isfinite(g) || !M )
-		return nullptr;
-
-	if ( M && M->verbosely > 4 )
-		printf( "promoting a clone of %s synapse from \"%s\" to \"%s\"\n", species(), _label, tgt->_label);
-	if ( find( _targets.begin(), _targets.end(), tgt) == _targets.end() )
-		fprintf( stderr, "ебать!\n");
-	_targets.erase( find( _targets.begin(), _targets.end(), tgt));
-
-	if ( tgt->_dendrites.find(this) == tgt->_dendrites.end() )
-		fprintf( stderr, "ебать-колотить!\n");
-	tgt -> _dendrites.erase( tgt->_dendrites.find(this));
-
-	snprintf( _label, CN_MAX_LABEL_SIZE-1, "%s:%zu", _source->_label, _targets.size());
-
-	C_BaseSynapse* ret = M -> add_synapse_species( _type, _source, tgt, g, false /* prevents re-creation of a clone we have just excised */,
-						       true);
-	// the newly added synapse has stock paramaters yet: copy ours
-	if ( ret ) {
-		ret->P = P;
-		// also see to vars
-		for ( size_t i = 0; i < v_no(); ++i )
-			ret->var_value(i) = get_var_value(i);
-		return ret;
-	}
-	return nullptr;
-}
-
-
-
-
-
-
-void
-cnrun::C_BaseSynapse::
-dump( bool with_params, FILE *strm) const
-{
-	C_BaseUnit::dump( with_params);
-	fprintf( strm, "  gsyn on targets (%zu):  ", _targets.size());
-	for ( auto &T : _targets )
-		fprintf( strm, "%s: %g;  ", T->_label, g_on_target( *T));
-	fprintf( strm, "\n\n");
-}
-
-
-
-
-
-cnrun::C_BaseSynapse::
-~C_BaseSynapse()
-{
-	if ( M && M->verbosely > 4 )
-		fprintf( stderr, "  deleting base synapse \"%s\"\n", _label);
-
-	for ( auto &T : _targets )
-		if ( T )
-			T->_dendrites.erase( this);
-
-	if ( _source ) {
-		_source->_axonal_harbour.erase(
-			find( _source->_axonal_harbour.begin(), _source->_axonal_harbour.end(), this));
-		if ( M && M->verbosely > 5 )
-			printf( "    removing ourselves from \"%s\" axonals (%zu still there)\n",
-				_source->_label, _source->_axonal_harbour.size());
-	}
-}
-
-
-
-// eof
diff --git a/upstream/src/libcn/base-unit.hh b/upstream/src/libcn/base-unit.hh
deleted file mode 100644
index ef8fb72..0000000
--- a/upstream/src/libcn/base-unit.hh
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
- *
- * License: GPL-2+
- *
- * Initial version: 2008-08-02
- *
- */
-
-
-#ifndef LIBCN_BASE_UNIT_H
-#define LIBCN_BASE_UNIT_H
-
-#include <fstream>
-#include <cstring>
-#include <vector>
-#include <list>
-
-#include "types.hh"
-#include "sources.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-using namespace std;
-
-namespace cnrun {
-
-// this gets referenced in case of out-of-bounds idx or misspelled sym
-// in param accessors
-extern double __cn_dummy_double;
-
-
-
-// forward decls
-class CModel;
-class C_BaseUnit;
-
-
-
-
-
-
-// for all units
-#define CN_UERROR			(1 << 0)
-#define CN_UOWNED			(1 << 1)
-#define CN_UHASPARAMRANGE		(1 << 2)
-#define CN_ULISTENING_MEM		(1 << 3)
-#define CN_ULISTENING_DISK		(1 << 4)
-#define CN_ULISTENING_1VARONLY		(1 << 5)
-#define CN_ULISTENING_DEFERWRITE	(1 << 6)
-#define CN_ULISTENING_BINARY		(1 << 7)
-//#define CN_NDYNPARAMS			(1 << 8)
-
-// only for neurons
-#define CN_NFIRING			(1 << 9)  // firing now
-#define CN_NREFRACT			(1 << 10)  // in refractory phase now
-
-
-#define CN_MAX_LABEL_SIZE 40
-
-
-// the base unit provides the methods for the following:
-// * classification;
-// * access to parameters, tape reader and range interface;
-// * attachment to the mother model;
-// * listening, i.e., keeping a history of vars along a timeline;
-class C_BaseUnit {
-
-    private:
-	C_BaseUnit();  // not callable
-
-    protected:
-	TUnitType
-		_type;  // will look up p, pno and vno from __CNUDT using _type as index
-    public:
-	TUnitType
-		type() const		{  return _type;   }
-
-      // classification
-	const int	traits()	const {  return __CNUDT[_type].traits;			}
-	const bool	is_hostable()	const {  return __CNUDT[_type].traits & UT_HOSTED;		}
-	const bool	is_ddtbound()	const {  return __CNUDT[_type].traits & UT_DDTSET;		}
-	const bool	is_neuron()	const {  return _type >= NT_FIRST && _type <= NT_LAST;	}
-	const bool	is_synapse()	const {  return _type >= YT_FIRST && _type <= YT_LAST;	}
-	const bool	is_oscillator()	const {  return __CNUDT[_type].traits & UT_OSCILLATOR;	}
-	const bool	is_conscious()	const {  return is_oscillator();				}
-
-	const char *class_name() const
-		{  return is_neuron() ? "Neuron" : "Synapse";	}
-	const char *species() const
-		{  return __CNUDT[_type].species;		}
-	const char *family() const
-		{  return __CNUDT[_type].family;		}
-	const char *type_description() const
-		{  return __CNUDT[_type].description;		}
-
-      // parameter & variable names and symbols
-	const char *const param_name( size_t i)	const { return __CNUDT[_type].stock_param_names[i]; }
-	const char *const param_sym( size_t i)	const { return __CNUDT[_type].stock_param_syms[i];  }
-	int param_idx_by_sym( const char*) const __attribute__ ((pure));
-	const char *const var_name( size_t i)	const { return __CNUDT[_type].stock_var_names[i];   }
-	const char *const var_sym( size_t i)	const { return __CNUDT[_type].stock_var_syms[i];    }
-	int var_idx_by_sym( const char*) const __attribute__ ((pure));
-	unsigned short v_no() const	{ return __CNUDT[_type].vno; }
-	unsigned short p_no() const	{ return __CNUDT[_type].pno; }
-
-    protected:
-      // identification
-	unsigned long
-		_serial_id;  // assigned incrementally as read by import_NetworkML
-	char	_label[CN_MAX_LABEL_SIZE];
-    public:
-	unsigned long serial() const
-		{  return _serial_id;  }
-	const char *label() const  // for synapses, it is "%s:%d", src->label, targets.size()
-		{  return _label;  }
-	void set_label( const char *new_label)
-		{  strncpy( _label, new_label, CN_MAX_LABEL_SIZE-1); }
-
-      // status bitfield & properties
-    protected:
-	int	_status;
-    public:
-	int	status()	{  return _status; }
-
-      // ctor & dtor
-    protected:
-	C_BaseUnit( TUnitType, const char *label,
-		    CModel*, int s_mask);
-    public:
-	virtual ~C_BaseUnit();  // surely virtual
-
-      // parent model
-	friend class CModel;
-	friend class SSpikeloggerService;
-    protected:
-	CModel	*M;
-    public:
-	const CModel&
-		parent_model() const	{ return *M; }
-	bool	is_owned() const	{ return _status & CN_UOWNED; }
-	const double&
-		model_time() const;  // defined in model.h
-
-    public:
-      // private copy of params
-	vector<double> P;
-	double get_param_value( size_t p) const
-		{  return P[p];  }
-	double get_param_value( const char *sym) const
-		{
-			int id = param_idx_by_sym( sym);
-			return (id == -1) ? __cn_dummy_double : P[id];
-		}
-	double &param_value( size_t p)	{  return P[p];  }
-	double &param_value( const char *sym)
-		{
-			int id = param_idx_by_sym( sym);
-			return (id == -1) ? __cn_dummy_double : P[id];
-		}
-	void reset_params()
-		{
-			P.resize( p_no());
-			memcpy( P.data(), __CNUDT[_type].stock_param_values,
-				sizeof(double) * p_no());
-			param_changed_hook();
-		}
-
-      // purity checks
-	bool is_not_altered() const
-		{
-			return (memcmp( P.data(), __CNUDT[_type].stock_param_values,
-				       sizeof (double) * p_no()) == 0) &&
-				!has_sources();
-		}
-	bool has_same_params( const C_BaseUnit &rv) const
-		{
-			return _type == rv._type &&
-				memcmp( P.data(), rv.P.data(), sizeof (double) * p_no()) == 0;
-		}
-	bool has_sources() const __attribute__ ((pure))
-		{
-			return not sources.empty();
-		}
-	bool has_same_sources( const C_BaseUnit &rv) const __attribute__ ((pure))
-		{
-			return sources == rv.sources;
-			// not sure taking the order of otherwise identical sources should matter
-		}
-	bool is_identical( const C_BaseUnit &rv) const __attribute__ ((pure))
-		{
-			return	_type == rv._type && has_same_params(rv) &&
-				((has_sources() && has_same_sources(rv)) ||
-				 (!has_sources() && !rv.has_sources()));
-		}
-	virtual void dump( bool with_params = false, FILE *strm = stdout) const;
-
-
-      // Source interface
-	enum TSinkType { SINK_PARAM, SINK_VAR };
-
-	template <class T>
-	struct SSourceInterface {
-	    friend class C_BaseUnit;
-	    friend class CModel;
-	    private:
-		C_BaseSource *source;
-		TSinkType sink_type;
-		unsigned short idx;
-
-		SSourceInterface( T *insource, TSinkType insink_type, unsigned short inidx)
-		      : source (insource), sink_type (insink_type), idx (inidx)
-			{}
-	    public:
-		bool operator== ( const SSourceInterface &rv) const
-			{  return source == rv.source && sink_type == rv.sink_type && idx == rv.idx;  }
-	};
-	list <SSourceInterface <C_BaseSource> > sources;
-	template <class T> void attach_source( T *s, TSinkType t, unsigned short idx);
-
-	void detach_source( C_BaseSource*, TSinkType, unsigned short idx);
-
-	void apprise_from_sources();
-	virtual void param_changed_hook()
-		{}
-
-
-      // access to state variables: differs per hosted or standalone
-	virtual double &var_value( size_t) = 0;
-	virtual const double &get_var_value( size_t) const = 0;
-	virtual void reset_vars() = 0;
-	virtual void reset_state();
-
-      // state history
-	bool is_listening() const
-		{
-			return _status & (CN_ULISTENING_DISK | CN_ULISTENING_MEM);
-		}
-	void start_listening( int mask = 0 | CN_ULISTENING_DISK);
-	void stop_listening();
-	void restart_listening()
-		{
-			int lbits = _status & (CN_ULISTENING_DISK | CN_ULISTENING_MEM
-					       | CN_ULISTENING_1VARONLY | CN_ULISTENING_DEFERWRITE);
-			stop_listening();
-			start_listening( lbits);
-		}
-	void pause_listening();
-	void resume_listening();
-
-    private:
-      // where vars are written by tell()
-	int _binwrite_handle;
-	ofstream *_listener_disk;
-      // ... and/or stored, in a diskless model
-	vector<double> *_listener_mem;
-    public:
-      // by this method
-	void tell();
-	const vector<double> *listener_mem() const	{ return _listener_mem; }
-
-	unsigned short precision;
-
-      // one common method for all descendants
-};
-
-
-
-extern unsigned short __cn_default_unit_precision;
-
-extern int __cn_verbosely;
-
-
-
-
-class __C_BaseUnitCompareByLabel {
-    public:
-	bool operator () ( C_BaseUnit *&lv, C_BaseUnit *&rv)
-		{
-			return strcmp( lv->label(), rv->label()) < 0;
-		}
-};
-
-
-}
-
-#endif
-
-// EOF
diff --git a/upstream/src/libcn/hosted-attr.hh b/upstream/src/libcn/hosted-attr.hh
deleted file mode 100644
index d971578..0000000
--- a/upstream/src/libcn/hosted-attr.hh
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
- *
- * License: GPL-2+
- *
- * Initial version: 2009-03-31
- *
- */
-
-
-#ifndef LIBCN_HOSTED_ATTR_H
-#define LIBCN_HOSTED_ATTR_H
-
-#include <vector>
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-using namespace std;
-
-namespace cnrun {
-
-class C_HostedAttributes {
-
-    friend class CIntegrateRK65;
-
-    protected:
-	C_HostedAttributes()
-		{}
-
-    friend class CModel;
-      // variables for units in the model are catenated on a single
-      // vector<double>, as an essential optimization measure; each
-      // unit knows its own set of variables by this idx:
-	size_t idx;
-      // the containing model provides idx on registering our unit
-
-    public:
-	virtual void reset_vars() = 0;
-	virtual double &var_value( size_t) = 0;
-
-	virtual void derivative( vector<double>&, vector<double>&) = 0;
-};
-
-}
-
-#endif
-
-// EOF
diff --git a/upstream/src/libcn/hosted-neurons.cc b/upstream/src/libcn/hosted-neurons.cc
deleted file mode 100644
index 2a036e0..0000000
--- a/upstream/src/libcn/hosted-neurons.cc
+++ /dev/null
@@ -1,771 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
- *
- * License: GPL-2+
- *
- * Initial version: 2008-10-16
- *
- */
-
-
-#include <cmath>
-#include <iostream>
-
-#include "libstilton/lang.hh"
-
-#include "param-unit-literals.hh"
-#include "types.hh"
-#include "model.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-cnrun::C_HostedNeuron::
-C_HostedNeuron( TUnitType intype, const char *inlabel,
-		double inx, double iny, double inz,
-		CModel* inM, int s_mask,
-		bool do_allocations_immediately)
-      : C_BaseNeuron (intype, inlabel, inx, iny, inz, inM, s_mask)
-{
-	if ( M )
-		M->include_unit( this, do_allocations_immediately);
-	else {
-//		_status &= ~CN_UENABLED;
-		idx = (unsigned long)-1;
-	}
-}
-
-
-// C_HostedNeuron::~C_HostedNeuron()
-// {
-// 	if ( __cn_verbosely > 5 )
-// 		cout << " deleting hosted neuron " << label << endl;
-// }
-
-
-
-
-
-
-void
-cnrun::C_HostedConductanceBasedNeuron::
-do_detect_spike_or_whatever()
-{
-	if ( unlikely (E() >= M->spike_threshold) ) {
-		if ( !(_spikelogger_agent->_status & CN_KL_ISSPIKINGNOW ) ) {
-			_spikelogger_agent->spike_history.push_back(
-				_spikelogger_agent->t_last_spike_start = model_time());
-			_spikelogger_agent->_status |= CN_KL_ISSPIKINGNOW;
-		}
-	} else
-//		if ( model_time() - t_last_spike_end > M->spike_lapse ) {
-		if ( _spikelogger_agent->_status & CN_KL_ISSPIKINGNOW ) {
-			_spikelogger_agent->_status &= ~CN_KL_ISSPIKINGNOW;
-			_spikelogger_agent->t_last_spike_end = model_time();
-		}
-}
-
-
-
-
-
-
-
-
-// SPECIFIC NEURONS:
-
-// ===== HH and variations
-
-const char* const cnrun::__CN_ParamNames_NeuronHH_d[] = {
-	"Na conductance, " __CN_PU_CONDUCTANCE,
-	"Na equi potential, " __CN_PU_POTENTIAL,
-	"K conductance, " __CN_PU_CONDUCTANCE,
-	"K equi potential, " __CN_PU_POTENTIAL,
-	"Leak conductance, " __CN_PU_CONDUCTANCE,
-	"Leak equi potential, " __CN_PU_POTENTIAL,
-	"Membrane specific capacitance, " __CN_PU_CAPACITY_DENSITY,
-
-	".alpha_m_a",	".alpha_m_b",	".alpha_m_c",	".beta_m_a",	".beta_m_b",	".beta_m_c",
-	".alpha_h_a",	".alpha_h_b",	".alpha_h_c",	".beta_h_a",	".beta_h_b",	".beta_h_c",
-	".alpha_n_a",	".alpha_n_b",	".alpha_n_c",	".beta_n_a",	".beta_n_b",	".beta_n_c",
-
-	"Externally applied DC, " __CN_PU_CURRENT,
-};
-const char* const cnrun::__CN_ParamSyms_NeuronHH_d[] = {
-	"gNa",
-	"ENa",
-	"gK",
-	"EK",
-	"gl",
-	"El",
-	"Cmem",
-
-	".alpha_m_a",	".alpha_m_b",	".alpha_m_c",	".beta_m_a",	".beta_m_b",	".beta_m_c",
-	".alpha_h_a",	".alpha_h_b",	".alpha_h_c",	".beta_h_a",	".beta_h_b",	".beta_h_c",
-	".alpha_n_a",	".alpha_n_b",	".alpha_n_c",	".beta_n_a",	".beta_n_b",	".beta_n_c",
-
-	"Idc",
-};
-const double cnrun::__CN_Params_NeuronHH_d[] = {
-	7.15,		//   gNa: Na conductance in 1/(mOhms * cm^2)
-       50.0,		//   ENa: Na equi potential in mV
-	1.430,		//   gK: K conductance in 1/(mOhms * cm^2)
-      -95.0,		//   EK: K equi potential in mV
-	0.0267,		//   gl: leak conductance in 1/(mOhms * cm^2)
-      -63.563,		//   El: leak equi potential in mV
-	0.143,		//   Cmem: membr. specific capacitance, muF/cm^2
-
-	0.32,   52.,   4.,
-	0.28,   25.,   5.,
-	0.128,  48.,  18.,
-	4.0,    25.,   5.,
-	0.032,  50.,   5.,
-	0.5,    55.,  40.,
-
-	  0.		// Externally applied constant current
-};
-
-
-
-
-const double cnrun::__CN_Vars_NeuronHH_d[] = {
-	-66.81,		// 0 - membrane potential E
-	  0.023,	// 1 - prob. for Na channel activation m
-	  0.800,	// 2 - prob. for not Na channel blocking h
-	  0.220,	// 3 - prob. for K channel activation n
-};
-
-const char* const cnrun::__CN_VarNames_NeuronHH_d[] = {
-	"Membrane potential, " __CN_PU_POTENTIAL,
-	"Prob. of Na channel activation",
-	"1-Prob. of Na channel blocking",
-	"Prob. of K channel activation",
-};
-const char* const cnrun::__CN_VarSyms_NeuronHH_d[] = {
-	"E",
-	".m",
-	".h",
-	".n"
-};
-
-
-
-void
-__attribute__ ((hot))
-cnrun::CNeuronHH_d::
-derivative( vector<double>& x, vector<double>& dx)
-{
-      // differential eqn for E, the membrane potential
-	dE(dx) = (
-		     P[gNa] * gsl_pow_3(m(x)) * h(x) * (P[ENa] - E(x))
-		   + P[gK]  * gsl_pow_4(n(x))        * (P[EK]  - E(x))
-		   + P[gl]                           * (P[El]  - E(x)) + (Isyn(x) + P[Idc])
-		  ) / P[Cmem];
-
-	double _a, _b, K;
-      // diferential eqn for m, the probability for one Na channel activation
-      // particle
-	K = -P[alpha_m_b] - E(x),
-		_a = P[alpha_m_a] * K / expm1( K / P[alpha_m_c]);
-//	_a = 0.32 * (13.0 - E(x) - P[V0]) / expm1( (13.0 - E(x) - P[V0]) / 4.0);
-	K =  P[beta_m_b] + E(x),
-		_b = P[beta_m_a]  * K / expm1( K / P[beta_m_c]);
-//	_b = 0.28 * (E(x) + P[V0] - 40.0) / expm1( (E(x) + P[V0] - 40.0) / 5.0);
-	dm(dx) = _a * (1 - m(x)) - _b * m(x);
-
-      // differential eqn for h, the probability for the Na channel blocking
-      // particle to be absent
-	K = -P[alpha_h_b] - E(x),
-		_a = P[alpha_h_a] * exp( K / P[alpha_h_c]);
-//	_a = 0.128 * exp( (17.0 - E(x) - P[V0]) / 18.0);
-	K = -P[beta_h_b] - E(x),
-		_b = P[beta_h_a] / (exp( K / P[beta_h_c]) + 1);
-//	_b = 4.0 / (exp( (40.0 - E(x) - P[V0]) / 5.0) + 1.0);
-	dh(dx) = _a * (1 - h(x)) - _b * h(x);
-
-      // differential eqn for n, the probability for one K channel activation
-      // particle
-	K = -P[alpha_n_b] - E(x),
-		_a = P[alpha_n_a] * K / expm1( K / P[alpha_n_c]);
-//	_a = 0.032 * (15.0 - E(x) - P[V0]) / (exp( (15.0 - E(x) - P[V0]) / 5.0) - 1.0);
-	K = -P[beta_n_b] - E(x),
-		_b = P[beta_n_a] * exp( K / P[beta_n_c]);
-//	_b = 0.5 * exp( (10.0 - E(x) - P[V0]) / 40.0);
-	dn(dx)= _a * (1 - n(x)) -_b * n(x);
-}
-
-// void
-// CNeuronHH::derivative( vector<double>& x, vector<double>& dx)
-// {
-//	enum TParametersNeuronHH {
-//		gNa, ENa, gK,  EK, gl, El, Cmem, Idc
-//	};
-
-//       // differential eqn for E, the membrane potential
-//	dE(dx) = (
-//		   P[gNa] * ___pow3(m(x)) * h(x) * (P[ENa] - E(x))
-//		 + P[gK]  * ___pow4(n(x))        * (P[EK]  - E(x))
-//		 + P[gl]  *                        (P[El]  - E(x))  + (Isyn(x) + P[Idc])
-//		 ) / P[Cmem];
-
-//	double _a, _b;
-//       // diferential eqn for m, the probability for Na channel activation
-//	_a = (3.5 + 0.1 * E(x)) / -expm1( -3.5 - 0.1 * E(x));
-//	_b = 4.0 * exp( -(E(x) + 60.0) / 18.0);
-//	dm(dx) = _a * (1.0 - m(x)) - _b * m(x);
-
-//       // differential eqn for h, the probability for Na channel inactivation
-//	_a = 0.07 * exp( -E(x) / 20.0 - 3.0);
-//	_b = 1.0 / (exp( -3.0 - 0.1 * E(x)) + 1.0);
-//	dh(dx) = _a * (1.0 - h(x)) -_b * h(x);
-
-//       // differential eqn for n, the probability for K channel activation
-//	_a = (-0.5 - 0.01 * E(x)) / expm1( -5.0 - 0.1 * E(x));
-//	_b = 0.125 * exp( -(E(x) + 60.0) / 80.0);
-//	dn(dx) = _a * (1.0 - n(x)) - _b * n(x);
-// }
-
-
-
-
-
-
-
-
-const char* const cnrun::__CN_ParamNames_NeuronHH2_d[] = {
-	"Na conductance, " __CN_PU_CONDUCTANCE,
-	"Na equi potential, " __CN_PU_POTENTIAL,
-	"K conductance, " __CN_PU_CONDUCTANCE,
-	"K equi potential, " __CN_PU_POTENTIAL,
-	"Leak conductance, " __CN_PU_CONDUCTANCE,
-	"Leak equi potential, " __CN_PU_POTENTIAL,
-	"Membrane specific capacitance, " __CN_PU_CAPACITY_DENSITY,
-	"K leakage conductance, " __CN_PU_CONDUCTANCE,
-	"K leakage equi potential, " __CN_PU_POTENTIAL,
-
-	".alpha_m_a",	".alpha_m_b",	".alpha_m_c",	".beta_m_a",	".beta_m_b",	".beta_m_c",
-	".alpha_h_a",	".alpha_h_b",	".alpha_h_c",	".beta_h_a",	".beta_h_b",	".beta_h_c",
-	".alpha_n_a",	".alpha_n_b",	".alpha_n_c",	".beta_n_a",	".beta_n_b",	".beta_n_c",
-
-//	"Total equi potential (?), " __CN_PU_POTENTIAL,
-
-	"Externally applied DC, " __CN_PU_CURRENT,
-};
-const char* const cnrun::__CN_ParamSyms_NeuronHH2_d[] = {
-	"gNa",
-	"ENa",
-	"gK",
-	"EK",
-	"gl",
-	"El",
-	"Cmem",
-	"gKl",
-	"EKl",
-
-	".alpha_m_a",	".alpha_m_b",	".alpha_m_c",	".beta_m_a",	".beta_m_b",	".beta_m_c",
-	".alpha_h_a",	".alpha_h_b",	".alpha_h_c",	".beta_h_a",	".beta_h_b",	".beta_h_c",
-	".alpha_n_a",	".alpha_n_b",	".alpha_n_c",	".beta_n_a",	".beta_n_b",	".beta_n_c",
-
-//	"V0",
-
-	"Idc",
-};
-const double cnrun::__CN_Params_NeuronHH2_d[] = {
-	7.15,		//   gNa: Na conductance in 1/(mOhms * cm^2)
-       50.0,		//   ENa: Na equi potential in mV
-	1.43,		//   gK: K conductance in 1/(mOhms * cm^2)
-      -95.0,		//   EK: K equi potential in mV
-	0.0267,		//   gl: leak conductance in 1/(mOhms * cm^2)
-      -63.56,		//   El: leak equi potential in mV
-	0.143,		//   Cmem: membr. specific capacitance, muF/cm^2
-	0.00572,	//   gKl: potassium leakage conductivity
-      -95.0,		//   EKl: potassium leakage equi pot in mV
-
-	0.32,   52.,   4.,
-	0.28,   25.,   5.,
-	0.128,  48.,  18.,
-	4.0,    25.,   5.,
-	0.032,  50.,   5.,
-	0.5,    55.,  40.,
-
-//       65.0,		//   V0: ~ total equi potential (?)
-
-	0.,		//   Idc: constant, externally applied current
-};
-
-
-const double cnrun::__CN_Vars_NeuronHH2_d[] = {
-// as in a single-neuron run
-      -66.56,	// 0 - membrane potential E
-	0.0217,	// 1 - prob. for Na channel activation m
-	0.993,	// 2 - prob. for not Na channel blocking h
-	0.051,	// 3 - prob. for K channel activation n
-
-// previously thought to be resting state values
-//      -60.0,		// 0 - membrane potential E
-//	0.0529324,	// 1 - prob. for Na channel activation m
-//	0.3176767,	// 2 - prob. for not Na channel blocking h
-//	0.5961207,	// 3 - prob. for K channel activation n
-};
-
-
-
-
-
-void
-cnrun::CNeuronHH2_d::
-derivative( vector<double>& x, vector<double>& dx)
-{
-	enum TParametersNeuronHH2 {
-		gNa, ENa, gK,  EK, gl, El, Cmem,
-		gKl, EKl, //V0,
-		alpha_m_a,	alpha_m_b,	alpha_m_c,
-		beta_m_a,	beta_m_b,	beta_m_c,
-		alpha_h_a,	alpha_h_b,	alpha_h_c,
-		beta_h_a,	beta_h_b,	beta_h_c,
-		alpha_n_a,	alpha_n_b,	alpha_n_c,
-		beta_n_a,	beta_n_b,	beta_n_c,
-		Idc,
-	};
-
-      // differential eqn for E, the membrane potential
-	dE(dx) = (
-		     P[gNa] * gsl_pow_3(m(x)) * h(x) * (P[ENa] - E(x))
-		   + P[gK]  * gsl_pow_4(n(x))        * (P[EK]  - E(x))
-		   + P[gl]                           * (P[El]  - E(x))
-		   + P[gKl]                          * (P[EKl] - E(x)) + (Isyn(x) + P[Idc])
-		  ) / P[Cmem];
-
-	double _a, _b, K;
-      // diferential eqn for m, the probability for one Na channel activation
-      // particle
-	K = -P[alpha_m_b] - E(x),
-		_a = P[alpha_m_a] * K / expm1( K / P[alpha_m_c]);
-//	_a = 0.32 * (13.0 - E(x) - P[V0]) / expm1( (13.0 - E(x) - P[V0]) / 4.0);
-	K =  P[beta_m_b] + E(x),
-		_b = P[beta_m_a]  * K / expm1( K / P[beta_m_c]);
-//	_b = 0.28 * (E(x) + P[V0] - 40.0) / expm1( (E(x) + P[V0] - 40.0) / 5.0);
-	dm(dx) = _a * (1 - m(x)) - _b * m(x);
-
-      // differential eqn for h, the probability for the Na channel blocking
-      // particle to be absent
-	K = -P[alpha_h_b] - E(x),
-		_a = P[alpha_h_a] * exp( K / P[alpha_h_c]);
-//	_a = 0.128 * exp( (17.0 - E(x) - P[V0]) / 18.0);
-	K = -P[beta_h_b] - E(x),
-		_b = P[beta_h_a] / (exp( K / P[beta_h_c]) + 1);
-//	_b = 4.0 / (exp( (40.0 - E(x) - P[V0]) / 5.0) + 1.0);
-	dh(dx) = _a * (1 - h(x)) - _b * h(x);
-
-      // differential eqn for n, the probability for one K channel activation
-      // particle
-	K = -P[alpha_n_b] - E(x),
-		_a = P[alpha_n_a] * K / expm1( K / P[alpha_n_c]);
-//	_a = 0.032 * (15.0 - E(x) - P[V0]) / (exp( (15.0 - E(x) - P[V0]) / 5.0) - 1.0);
-	K = -P[beta_n_b] - E(x),
-		_b = P[beta_n_a] * exp( K / P[beta_n_c]);
-//	_b = 0.5 * exp( (10.0 - E(x) - P[V0]) / 40.0);
-	dn(dx)= _a * (1 - n(x)) -_b * n(x);
-}
-
-
-
-
-
-
-
-
-//#ifdef CN_WANT_MORE_NEURONS
-
-
-const char* const cnrun::__CN_ParamNames_NeuronEC_d[] = {
-	"Na conductance, " __CN_PU_CONDUCTANCE,
-	"Na equi potential, " __CN_PU_POTENTIAL,
-	"K conductance, " __CN_PU_CONDUCTANCE,
-	"K equi potential, " __CN_PU_POTENTIAL,
-	"Leak conductance, " __CN_PU_CONDUCTANCE,
-	"Leak equi potential, " __CN_PU_POTENTIAL,
-	"Membrane capacity density, " __CN_PU_CAPACITY_DENSITY,
-	"Externally applied DC, " __CN_PU_CURRENT,
-	"K leakage conductance, " __CN_PU_CONDUCTANCE,
-	"K leakage equi potential, " __CN_PU_POTENTIAL,
-	"Total equi potential, " __CN_PU_POTENTIAL,
-	"gh1",
-	"gh2",
-	"Vh, " __CN_PU_POTENTIAL
-};
-const char* const cnrun::__CN_ParamSyms_NeuronEC_d[] = {
-	"gNa",
-	"ENa",
-	"gK",
-	"EK",
-	"gl",
-	"El",
-	"Cmem",
-	"Idc",
-	"gKl",
-	"EKl",
-	"V0",
-	"gh1",
-	"gh2",
-	"Vh"
-};
-const double cnrun::__CN_Params_NeuronEC_d[] = {
-	7.15,	//  0 - gNa: Na conductance in 1/(mOhms * cm^2)
-       50.0,	//  1 - ENa: Na equi potential in mV
-	1.43,	//  2 - gK: K conductance in 1/(mOhms * cm^2)
-      -95.0,	//  3 - EK: K equi potential in mV
-	0.021,	//  4 - gl: leak conductance in 1/(mOhms * cm^2)
-      -55.0,	//  5 - El: leak equi potential in mV
-	0.286,	//  6 - Cmem: membr. capacity density in muF/cm^2 // 0.143
-	0.,	//  7 - Externally applied constant current
-	0.035,	//  8 - gKl: potassium leakage conductivity
-      -95.0,	//  9 - EKl: potassium leakage equi pot in mV
-       65.0,	// 10 - V0: ~ total equi potential (?)
-	0.0185,	// 11 - gh1 // 1.85
-	0.01,	// 12 - gh2
-      -20.0,	// 13 - Vh
-};
-
-const char* const cnrun::__CN_VarNames_NeuronEC_d[] = {
-	"Membrane potential",
-	"Prob. of Na channel activation",
-	"Prob. of not Na channel blocking",
-	"Prob. of K channel activation",
-	"Ih1 activation",
-	"Ih2 activation"
-};
-const char* const cnrun::__CN_VarSyms_NeuronEC_d[] = {
-	"E",
-	".m",
-	".h",
-	".n",
-	".Ih1",
-	".Ih2"
-};
-const double cnrun::__CN_Vars_NeuronEC_d[] = {
-      -64.1251,		// 0 - membrane potential E
-	0.0176331,	// 1 - prob. for Na channel activation m
-	0.994931,	// 2 - prob. for not Na channel blocking h
-	0.0433969,	// 3 - prob. for K channel activation n
-	0.443961,	// 4 - Ih1 activation
-	0.625308	// 5 - Ih2 activation
-};
-
-
-
-
-#define _xfunc(a,b,k,V)  ((a) * (V) + (b)) / (1.0 - exp(((V)+(b)/(a))/(k)))
-
-void
-cnrun::CNeuronEC_d::
-derivative( vector<double>& x, vector<double>& dx)
-{
-	enum TParametersNeuronEC {
-		gNa, ENa, gK,  EK, gl, El, Cmem, Idc,
-		gKl, EKl, V0,
-		gh1, gh2,
-		Vh
-	};
-
-	double _a, _b;
-      // differential eqn for E, the membrane potential
-	dE(dx) = -(gsl_pow_3( m(x)) * h(x) * P[gNa] * (E(x) - P[ENa]) +
-		 gsl_pow_4( n(x)) * P[gK] * (E(x) - P[EK]) +
-		 (Ih1(x) * P[gh1] + Ih2(x) * P[gh2]) * (E(x) - P[Vh])+
-		 P[gl] * (E(x) - P[El]) + P[gKl] * (E(x) - P[EKl]) - Isyn(x)) / P[Cmem];
-
-      // diferential eqn for m, the probability for one Na channel activation particle
-	_a = 0.32 * (13.0 - E(x) - P[V0]) / expm1( (13.0 - E(x) - P[V0]) / 4.0);
-	_b = 0.28 * (E(x) + P[V0] - 40.0) / expm1( (E(x) + P[V0] - 40.0) / 5.0);
-	dm(dx) = _a * (1.0 - m(x)) - _b * m(x);
-
-      // differential eqn for h, the probability for the Na channel blocking particle to be absent
-	_a = 0.128 * exp( (17.0 - E(x) - P[V0]) / 18.0);
-	_b = 4.0 / (exp( (40.0 - E(x) - P[V0]) / 5.0) + 1.0);
-	dh(dx) = _a * (1.0 - h(x)) - _b * h(x);
-
-      // differential eqn for n, the probability for one K channel activation particle
-	_a = 0.032 * (15.0 - E(x) - P[V0]) / expm1( (15.0 - E(x) - P[V0]) / 5.0);
-	_b = 0.5 * exp( (10.0 - E(x) - P[V0]) / 40.0);
-	dn(dx) = _a * (1.0 - n(x)) - _b * n(x);
-
-      // differential equation for the Ih1 activation variable
-	_a = _xfunc (-2.89e-3, -0.445,  24.02, E(x));
-	_b = _xfunc ( 2.71e-2, -1.024, -17.40, E(x));
-	dIh1(dx) = _a * (1.0 - Ih1(x)) - _b * Ih1(x);
-
-      // differential equation for the Ih2 activation variable
-	_a = _xfunc (-3.18e-3, -0.695,  26.72, E(x));
-	_b = _xfunc ( 2.16e-2, -1.065, -14.25, E(x));
-	dIh2(dx) = _a * (1.0 - Ih2(x)) - _b * Ih2(x);
-}
-
-#undef _xfunc
-
-
-
-
-
-
-
-
-
-
-
-
-const char* const cnrun::__CN_ParamNames_NeuronECA_d[] = {
-	"Na conductance, " __CN_PU_CONDUCTANCE,
-	"Na equi potential, " __CN_PU_POTENTIAL,
-	"K conductance, " __CN_PU_CONDUCTANCE,
-	"K equi potential, " __CN_PU_POTENTIAL,
-	"Leak conductance, " __CN_PU_CONDUCTANCE,
-	"Leak equi potential, " __CN_PU_POTENTIAL,
-	"Membrane capacity density, " __CN_PU_CAPACITY_DENSITY,
-	"Externally applied DC, " __CN_PU_CURRENT,
-	"gNap",
-	"gh",
-	"Vh",
-};
-const char* const cnrun::__CN_ParamSyms_NeuronECA_d[] = {
-	"gNa",
-	"ENa",
-	"gK",
-	"EK",
-	"gl",
-	"El",
-	"Cmem",
-	"Idc",
-	"gNap",
-	"gh",
-	"Vh",
-};
-const double cnrun::__CN_Params_NeuronECA_d[] = {
-	52.0,	//  0 - Na conductance in 1/(mOhms * cm^2)
-	55.0,	//  1 - Na equi potential in mV
-	11.0,	//  2 - K conductance in 1/(mOhms * cm^2)
-       -90.0,	//  3 - K equi potential in mV
-	 0.5,	//  4 - Leak conductance in 1/(mOhms * cm^2)
-       -65.0,	//  5 - Leak equi potential in mV
-	 1.5,	//  6 - Membr. capacity density in muF/cm^2
-	 0.,	//  7 - Externally applied constant current
-	 0.5,	//  8 - gNap
-	 1.5,	//  9 - gh
-       -20.0,	// 10 - Vh
-};
-
-const char* const cnrun::__CN_VarNames_NeuronECA_d[] = {
-	"Membrane potential",
-	"Prob. of Na channel activation",
-	"Prob. of Na channel blocking",
-	"Prob. of K channel activation",
-	"mNap",
-	"Ih1 activation",
-	"Ih2 activation"
-};
-const char* const cnrun::__CN_VarSyms_NeuronECA_d[] = {
-	"E",
-	".m",
-	".h",
-	".n",
-	".mNap",
-	".Ih1",
-	".Ih2"
-};
-const double cnrun::__CN_Vars_NeuronECA_d[] = {
-      -53.77902178,	// E
-	0.0262406368,	// prob. for Na channel activation m
-	0.9461831106,	// prob. for not Na channel blocking h
-	0.1135915933,	// prob. for K channel activation n
-	0.08109646237,	// Nap
-	0.06918464221,	// Ih1 activation
-	0.09815937825	// Ih2 activation
-};
-
-
-
-void
-cnrun::CNeuronECA_d::
-derivative( vector<double>& x, vector<double>& dx)
-{
-	enum TParametersNeuronECA {  // lacks SParametersNeuronEC's gKl and EKl, so derives directly from HH
-		gNa, ENa, gK,  EK, gl, El, Cmem, Idc,
-		gNap, gh,
-		Vh
-	};
-
-      // differential eqn for E, the membrane potential
-	dE(dx) = -((gsl_pow_3( m(x)) * h(x) * P[gNa] + P[gNap] * mNap(x)) * (E(x) - P[ENa]) +
-		   gsl_pow_4( n(x)) * P[gK] * (E(x) - P[EK]) +
-		   P[gh] * (Ih1(x) * 0.65 + Ih2(x) * 0.35) * (E(x) - P[Vh]) +
-		   P[gl] * (E(x) - P[El]) - (Isyn(x) + P[Idc]) + 2.85) / P[Cmem];
-
-	double _a, _b;
-      // diferential eqn for m, the probability for one Na channel activation particle
-	_a = -0.1 * (E(x) + 23) / expm1( -0.1 * (E(x) + 23));
-	_b =  4.  * exp( -(E(x) + 48) / 18);
-	dm(dx) = _a * (1. - m(x)) - _b * m(x);
-
-      // differential eqn for h, the probability for the Na channel blocking particle to be absent
-	_a = 0.07 * exp( -(E(x) + 37.0) / 20.0);
-	_b = 1. / (exp( -0.1 * (E(x) + 7.)) + 1.0);
-	dh(dx) = _a * (1.0 - h(x)) - _b * h(x);
-
-      // differential eqn for n, the probability for one K channel activation particle
-	_a = -0.01  * (E(x) + 27) / expm1( -0.1 * (E(x) + 27));
-	_b =  0.125 * exp( -(E(x) + 37) / 80);
-	dn(dx) = _a * (1.0 - n(x)) - _b * n(x);
-
-	_a = 1. / (0.15 * (1 + exp( -(E(x) + 38) / 6.5)));
-	_b = exp( -(E(x) + 38) / 6.5) / (0.15 * (1 + exp( -(E(x) + 38) / 6.5)));
-	dmNap(dx) = _a * (1.0 - mNap(x)) - _b * mNap(x);
-
-      // differential equation for the Ihf activation variable
-	_a = 1. / (1 + exp( (E(x) + 79.2) / 9.78));
-	_b = 0.51 / (exp( (E(x) - 1.7) / 10) + exp( -(E(x) + 340) / 52)) + 1;
-	dIh1(dx) = (_a - Ih1(x)) / _b;
-
-      // differential equation for the Ihs activation variable
-	_a = 1. / (1 + exp( (E(x) + 71.3) / 7.9));
-	_b = 5.6 / (exp( (E(x) - 1.7) / 14) + exp( -(E(x) + 260) / 43)) + 1;
-	dIh2(dx) = (_a - Ih2(x)) / _b;
-}
-
-
-
-
-// =========== oscillators
-
-const char* const cnrun::__CN_ParamNames_OscillatorColpitts[] = {
-	"a",
-	"g",
-	"q",
-	"\316\267"
-};
-const char* const cnrun::__CN_ParamSyms_OscillatorColpitts[] = {
-	"a",
-	"g",
-	"q",
-	"eta"
-};
-const double cnrun::__CN_Params_OscillatorColpitts[] = {
-	1.0,	// a
-	0.0797,	// g
-	0.6898,	// q
-	6.2723	// eta
-};
-
-
-const char* const cnrun::__CN_VarNames_OscillatorColpitts[] = {
-	"x0",
-	"x1",
-	"x2"
-};
-const char* const cnrun::__CN_VarSyms_OscillatorColpitts[] = {
-	"x0",
-	"x1",
-	"x2"
-};
-const double cnrun::__CN_Vars_OscillatorColpitts[] = {
-	0.02,
-	0.69,
-       -0.53
-};
-
-
-void
-cnrun::COscillatorColpitts::
-derivative( vector<double>& x, vector<double>& dx)
-{
-	enum TParametersOscilColpitts {
-		a, g, q,
-		eta
-	};
-
-	dx0(dx) =  P[a]   *  x1(x) + Isyn(x);
-	dx1(dx) = -P[g]   * (x0(x) + x2(x)) - P[q] * x1(x);
-	dx2(dx) =  P[eta] * (x1(x) + 1.0 - exp( -x0(x)));
-//	dx[idx  ] =  p[0] *  x[idx+1] + Isyn;
-//	dx[idx+1] = -p[1] * (x[idx  ] + x[idx+2]) - p[2] * x[idx+1];
-//	dx[idx+2] =  p[3] * (x[idx+1] + 1.0 - exp(-x[idx]));
-}
-
-
-
-
-
-
-/*
-
-const char* const __CN_ParamNames_OscillatorLV[] = {
-	"Self inhibition",
-};
-const char* const __CN_ParamSyms_OscillatorLV[] = {
-	"rho_ii",
-};
-const double __CN_Params_OscillatorLV[] = {
-	1.0,	// 0 - rho_ii: "self inhibition"
-};
-
-
-const char* const __CN_VarNames_OscillatorLV[] = {
-	"Membrane potential, " __CN_PU_POTENTIAL,
-	"Firing rate"
-};
-const char* const __CN_VarSyms_OscillatorLV[] = {
-	"E",
-	"fr"
-};
-const double __CN_Vars_OscillatorLV[] = {
-	0.,	// 0 - added a place for E
-	0.1	// 1 - firing rate
-};
-
-
-*/
-
-
-
-
-
-
-
-const char* const cnrun::__CN_ParamNames_OscillatorVdPol[] = {
-	"\316\267",
-	"\317\211\302\262",
-//	"\317\203"
-};
-const char* const cnrun::__CN_ParamSyms_OscillatorVdPol[] = {
-	"eta",
-	"omegasq", // omega^2
-//	"sigma"
-};
-const double cnrun::__CN_Params_OscillatorVdPol[] = {
-	1.0,	// eta
-	0.1,	// omega^2
-//	0.0	// noise level
-};
-
-const char* const cnrun::__CN_VarNames_OscillatorVdPol[] = {
-	"Amplitude",
-	"v"
-};
-const char* const cnrun::__CN_VarSyms_OscillatorVdPol[] = {
-	"A",
-	"v"
-};
-const double cnrun::__CN_Vars_OscillatorVdPol[] = {
-	0.1,	// amplitude
-	0.0	// internal var
-};
-
-
-
-
-//#endif // CN_WANT_MORE_NEURONS
-
-
-// eof
diff --git a/upstream/src/libcn/hosted-neurons.hh b/upstream/src/libcn/hosted-neurons.hh
deleted file mode 100644
index 18c2398..0000000
--- a/upstream/src/libcn/hosted-neurons.hh
+++ /dev/null
@@ -1,349 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
- *
- * License: GPL-2+
- *
- * Initial version: 2008-10-11
- *
- */
-
-
-
-#ifndef LIBCN_HOSTED_NEURONS_H
-#define LIBCN_HOSTED_NEURONS_H
-
-#include "gsl/gsl_math.h"
-
-#include "base-neuron.hh"
-#include "hosted-attr.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-namespace cnrun {
-
-class CModel;
-
-class C_HostedNeuron
-  : public C_BaseNeuron, public C_HostedAttributes {
-
-    private:
-	C_HostedNeuron();
-
-    protected:
-	C_HostedNeuron (TUnitType intype, const char *inlabel,
-			double x, double y, double z,
-			CModel*, int s_mask,
-			bool do_allocations_immediately);
-    public:
-	void reset_vars();
-	double &var_value( size_t);
-	const double &get_var_value( size_t) const;
-};
-
-
-
-
-
-class C_HostedConductanceBasedNeuron
-  : public C_HostedNeuron {
-
-    private:
-	C_HostedConductanceBasedNeuron();
-    protected:
-	C_HostedConductanceBasedNeuron (TUnitType intype, const char *inlabel,
-					double inx, double iny, double inz,
-					CModel* inM, int s_mask,
-					bool do_allocations_immediately)
-	      : C_HostedNeuron (intype, inlabel, inx, iny, inz, inM, s_mask, do_allocations_immediately)
-		{}
-    public:
-
-	double  E() const; // needs access to parent model var vector, defined in model.h
-	double  E( vector<double> &b) const	{ return b[idx+0]; }
-	double& dE( vector<double> &b)		{ return b[idx+0]; }
-
-	unsigned n_spikes_in_last_dt() const;
-
-	void do_detect_spike_or_whatever();
-};
-
-
-
-
-
-// for completeness' sake -- no descendants yet
-class C_HostedRateBasedNeuron
-  : public C_HostedNeuron {
-
-    private:
-	C_HostedRateBasedNeuron();
-//		{}
-    protected:
-	C_HostedRateBasedNeuron (TUnitType intype, const char *inlabel,
-				 double inx, double iny, double inz,
-				 CModel* inM, int s_mask,
-				 bool do_allocations_immediately)
-	      : C_HostedNeuron (intype, inlabel, inx, iny, inz, inM, s_mask, do_allocations_immediately)
-		{}
-
-    public:
-	unsigned n_spikes_in_last_dt() const;
-};
-
-
-
-
-
-
-
-
-
-
-// Hodgkin-Huxley classic
-
-class CNeuronHH_d
-  : public C_HostedConductanceBasedNeuron {
-
-    public:
-      // parameters (since gcc 4.4, accessible from within member functions defined outside class definition, gee!)
-	enum {
-		gNa, ENa, gK,  EK, gl, El, Cmem,
-		alpha_m_a,	alpha_m_b,	alpha_m_c,	beta_m_a,	beta_m_b,	beta_m_c,
-		alpha_h_a,	alpha_h_b,	alpha_h_c,	beta_h_a,	beta_h_b,	beta_h_c,
-		alpha_n_a,	alpha_n_b,	alpha_n_c,	beta_n_a,	beta_n_b,	beta_n_c,
-		Idc,
-	};
-
-      // current state
-      // these wrappers mainly for code legibility in derivative(); otherwise, not used
-      // for reporting, CModel accesses vars as V[idx+n]
-	double   m( vector<double>& b) const	{ return b[idx+1]; }
-	double   h( vector<double>& b) const	{ return b[idx+2]; }
-	double   n( vector<double>& b) const	{ return b[idx+3]; }
-	double& dm( vector<double>& b)		{ return b[idx+1]; }
-	double& dh( vector<double>& b)		{ return b[idx+2]; }
-	double& dn( vector<double>& b)		{ return b[idx+3]; }
-
-	CNeuronHH_d( const char *inlabel,
-		     double x, double y, double z,
-		     CModel *inM, int s_mask = 0,
-		     bool do_allocations_immediately = true)
-	      : C_HostedConductanceBasedNeuron (NT_HH_D, inlabel, x, y, z,
-						inM, s_mask, do_allocations_immediately)
-		{}
-
-	void derivative( vector<double>&, vector<double>&) __attribute__ ((hot));
-};
-
-
-
-
-
-
-
-class CNeuronHH2_d
-  : public C_HostedConductanceBasedNeuron {
-
-    public:
-	double   m( vector<double>& b) const	{ return b[idx+1]; }
-	double   h( vector<double>& b) const	{ return b[idx+2]; }
-	double   n( vector<double>& b) const	{ return b[idx+3]; }
-	double& dm( vector<double>& b)		{ return b[idx+1]; }
-	double& dh( vector<double>& b)		{ return b[idx+2]; }
-	double& dn( vector<double>& b)		{ return b[idx+3]; }
-
-	CNeuronHH2_d( const char *inlabel,
-		      double x, double y, double z,
-		      CModel *inM, int s_mask = 0,
-		      bool do_allocations_immediately = true)
-	      : C_HostedConductanceBasedNeuron( NT_HH2_D, inlabel, x, y, z,
-						inM, s_mask, do_allocations_immediately)
-		{}
-
-	void derivative( vector<double>&, vector<double>&);
-};
-
-
-
-//#ifdef CN_WANT_MORE_NEURONS
-
-// Entorhinal cortex stellate cell
-
-class CNeuronEC_d
-  : public C_HostedConductanceBasedNeuron {
-
-    public:
-	double 	   m( vector<double>& b) const	{ return b[idx+1]; }
-	double 	   h( vector<double>& b) const	{ return b[idx+2]; }
-	double 	   n( vector<double>& b) const	{ return b[idx+3]; }
-	double 	 Ih1( vector<double>& b) const	{ return b[idx+4]; }
-	double	 Ih2( vector<double>& b) const	{ return b[idx+5]; }
-	double&   dm( vector<double>& b)	{ return b[idx+1]; }
-	double&   dh( vector<double>& b)	{ return b[idx+2]; }
-	double&   dn( vector<double>& b)	{ return b[idx+3]; }
-	double& dIh1( vector<double>& b)	{ return b[idx+4]; }
-	double& dIh2( vector<double>& b)	{ return b[idx+5]; }
-
-
-	CNeuronEC_d( const char *inlabel,
-		     double x, double y, double z,
-		     CModel *inM, int s_mask = 0,
-		     bool do_allocations_immediately = true)
-	      : C_HostedConductanceBasedNeuron (NT_EC_D, inlabel, x, y, z,
-						inM, s_mask, do_allocations_immediately)
-		{}
-
-	void derivative( vector<double>&, vector<double>&);
-};
-
-
-
-
-
-
-class CNeuronECA_d
-  : public C_HostedConductanceBasedNeuron {
-
-    public:
-	double      m( vector<double>& b) const	{ return b[idx+1]; }
-	double      h( vector<double>& b) const	{ return b[idx+2]; }
-	double      n( vector<double>& b) const	{ return b[idx+3]; }
-	double   mNap( vector<double>& b) const	{ return b[idx+4]; }
-	double    Ih1( vector<double>& b) const	{ return b[idx+5]; }
-	double    Ih2( vector<double>& b) const	{ return b[idx+6]; }
-
-	double&    dm( vector<double>& b)	{ return b[idx+1]; }
-	double&    dh( vector<double>& b)	{ return b[idx+2]; }
-	double&    dn( vector<double>& b)	{ return b[idx+3]; }
-	double& dmNap( vector<double>& b)	{ return b[idx+4]; }
-	double&  dIh1( vector<double>& b)	{ return b[idx+5]; }
-	double&  dIh2( vector<double>& b)	{ return b[idx+6]; }
-
-	CNeuronECA_d( const char *inlabel,
-		      double x, double y, double z,
-		      CModel *inM, int s_mask = 0,
-		      bool do_allocations_immediately = true)
-	      : C_HostedConductanceBasedNeuron( NT_ECA_D, inlabel, x, y, z,
-						inM, s_mask, do_allocations_immediately)
-		{}
-
-	void derivative( vector<double>&, vector<double>&);
-};
-
-//#endif  // CN_WANT_MORE_NEURONS
-
-
-
-
-
-
-
-
-
-
-
-
-
-//#ifdef CN_WANT_MORE_NEURONS
-
-class COscillatorColpitts
-  : public C_HostedConductanceBasedNeuron {
-
-    public:
-	double   x0( vector<double>& b) const	{ return b[idx+0]; }  // there's no E() for this one
-	double   x1( vector<double>& b) const	{ return b[idx+1]; }
-	double   x2( vector<double>& b) const	{ return b[idx+2]; }
-	double& dx0( vector<double>& b)		{ return b[idx+0]; }
-	double& dx1( vector<double>& b)		{ return b[idx+1]; }
-	double& dx2( vector<double>& b)		{ return b[idx+2]; }
-
-	COscillatorColpitts( const char *inlabel,
-			     double x, double y, double z,
-			     CModel *inM, int s_mask = 0,
-			     bool do_allocations_immediately = true)
-	      : C_HostedConductanceBasedNeuron (NT_COLPITTS, inlabel, x, y, z,
-						inM, s_mask, do_allocations_immediately)
-		{}
-
-	virtual void derivative( vector<double>&, vector<double>&);
-};
-
-
-
-
-
-
-
-/*
-// ne marche pas
-
-class COscillatorLV
-  : public C_HostedConductanceBasedNeuron {
-
-    public:
-	double   fr( vector<double>& b) const	{ return b[idx+1]; }
-	double& dfr( vector<double>& b)		{ return b[idx+1]; }
-
-	COscillatorLV( const char *inlabel,
-		       double x, double y, double z,
-		       CModel *inM, int s_mask = 0,
-		       bool do_allocations_immediately = true)
-	      : C_HostedConductanceBasedNeuron( NT_LV, inlabel, x, y, z,
-						inM, s_mask, do_allocations_immediately)
-		{}
-
-	enum TParametersOscilLV {
-		rho
-	};
-	void derivative( vector<double>& x, vector<double>& dx)
-		{
-			dE(dx) = fr(x) * (1.0 - P[rho] * fr(x)) - Isyn(x);
-		}
-};
-
-
-*/
-
-
-
-
-class COscillatorVdPol
-  : public C_HostedConductanceBasedNeuron {
-
-     public:
-	double   amp( vector<double>& b) const	{ return b[idx+0]; }
-	double    _x( vector<double>& b) const	{ return b[idx+1]; }
-	double& damp( vector<double>& b)	{ return b[idx+0]; }
-	double&  d_x( vector<double>& b)	{ return b[idx+1]; }
-
-	COscillatorVdPol( const char *inlabel,
-			  double x, double y, double z,
-			  CModel *inM, int s_mask = 0,
-			  bool do_allocations_immediately = true)
-	      : C_HostedConductanceBasedNeuron (NT_VDPOL, inlabel, x, y, z,
-						inM, s_mask, do_allocations_immediately)
-		{}
-
-	enum TParametersOscilVdPol {
-		eta, omega2
-	};
-	void derivative( vector<double> &x, vector<double> &dx)
-		{
-			damp(dx) = _x(x);
-			d_x(dx) = (P[eta] - gsl_pow_2( amp(x))) * _x(x) - P[omega2] * amp(x) + Isyn(x);
-		}
-};
-
-
-//#endif  // CN_WANT_MORE_NEURONS
-
-
-
-}
-
-#endif
-
-// EOF
diff --git a/upstream/src/libcn/hosted-synapses.cc b/upstream/src/libcn/hosted-synapses.cc
deleted file mode 100644
index cda2029..0000000
--- a/upstream/src/libcn/hosted-synapses.cc
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
- *
- * License: GPL-2+
- *
- * Initial version: 2009-04-03
- *
- */
-
-
-#include <iostream>
-
-#include "hosted-synapses.hh"
-#include "param-unit-literals.hh"
-
-#include "types.hh"
-#include "model.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-using namespace std;
-
-
-// the base synapse here
-cnrun::C_HostedSynapse::
-C_HostedSynapse( TUnitType intype,
-		 C_BaseNeuron *insource, C_BaseNeuron *intarget,
-		 double ing, CModel *inM, int s_mask,
-		 bool do_allocations_immediately)
-      : C_BaseSynapse( intype, insource, intarget, ing, inM, s_mask),
-	C_HostedAttributes()
-{
-	if ( M )
-		M->include_unit( this, do_allocations_immediately);
-	else
-		idx = (unsigned long)-1;
-}
-
-
-
-cnrun::C_HostedSynapse::
-~C_HostedSynapse()
-{
-	if ( __cn_verbosely > 5 )
-		fprintf( stderr, " deleting hosted synapse \"%s\"\n", _label);
-}
-
-
-
-
-
-
-// -- parameters
-
-const char* const cnrun::__CN_ParamNames_SynapseAB_dd[] = {
-//	"Synaptic strength g, " __CN_PU_CONDUCTANCE,
-	"Reversal potential Esyn, " __CN_PU_POTENTIAL,
-	"Presyn threshold potential Epre, " __CN_PU_POTENTIAL,
-	"Rise rate \316\261, " __CN_PU_RATE,
-	"Decay rate \316\262, " __CN_PU_RATE,
-	"Time of transmitter release, " __CN_PU_TIME,
-//	"Noise level \317\203",
-};
-const char* const cnrun::__CN_ParamSyms_SynapseAB_dd[] = {
-//	"gsyn",
-	"Esyn",
-	"Epre",
-	"alpha",
-	"beta",
-	"trel",
-//	"sigma",
-};
-
-const double cnrun::__CN_Params_SynapseAB_dd[] = {
-//	0.12,
-	0,
-      -20,
-	0.5,
-	0.05,
-	5.0,
-//	0.
-};
-
-const double cnrun::__CN_Params_SynapseABMinus_dd[] = {
-//	0.12,
-	0,
-      -20,
-	0.27785150819749,
-	0.05,
-	5.0,
-//	0.
-};
-
-const double cnrun::__CN_Params_SynapseMxAB_dd[] = {
-//	0.12,
-	0,
-      -20,
-	0.27785150819749,  // the only parameter differing from its AB namesake,
-			   // which is also by principle the same as in the ABMinus variation
-	0.05,
-	5.0,
-//	0.
-};
-
-
-const char* const cnrun::__CN_ParamNames_SynapseAB_dr[] = {
-//	"Synaptic strength g, " __CN_PU_CONDUCTANCE,
-	"Assumed (target->E - Esyn), " __CN_PU_POTENTIAL,
-	"Presyn threshold potential Epre, " __CN_PU_POTENTIAL,
-	"Rise rate \316\261, " __CN_PU_RATE,
-	"Decay rate \316\262, " __CN_PU_RATE,
-	"Time of transmitter release, " __CN_PU_TIME,
-//	"Noise level \317\203",
-};
-const char* const cnrun::__CN_ParamSyms_SynapseAB_dr[] = {
-//	"gsyn",
-	"Ediff",
-	"Epre",
-	"alpha",
-	"beta",
-	"trel",
-//	"sigma",
-};
-
-
-const double cnrun::__CN_Params_SynapseMxAB_dr[] = {
-//	0.12,
-      -60 - 0,  // Ediff: a reasonable Esyn - target->E, the latter being -60 mV at rest
-      -20,
-	0.27785150819749,
-	0.05,
-	5.0,
-//	0.
-};
-
-
-
-
-
-
-
-const char* const cnrun::__CN_ParamNames_SynapseAB_rr[] = {
-//	"Synaptic strength g, " __CN_PU_CONDUCTANCE,
-	"Assumed (target->E - Esyn), " __CN_PU_VOLTAGE,
-	"Rise rate \316\261, " __CN_PU_RATE,
-	"Decay rate \316\262, " __CN_PU_RATE,
-	"Refractory period T, " __CN_PU_TIME,
-//	"Noise level \317\203",
-};
-const char* const cnrun::__CN_ParamSyms_SynapseAB_rr[] = {
-//	"gsyn",
-	"Ediff",
-	"alpha",
-	"beta",
-	"T",
-//	"sigma",
-};
-const double cnrun::__CN_Params_SynapseAB_rr[] = {
-//	0.12,
-      -60 - 0,
-	0.27785150819749,
-	0.05,
-	5,
-//	0.
-};
-
-
-
-const char* const cnrun::__CN_ParamNames_SynapseRall_dd[] = {
-//	"Synaptic strength g, " __CN_PU_CONDUCTANCE,
-	"Reversal potential, " __CN_PU_POTENTIAL,
-	"Presynaptic threshold potential, " __CN_PU_POTENTIAL,
-	"\317\204, " __CN_PU_RATE,
-//	"Noise level \317\203",
-};
-const char* const cnrun::__CN_ParamSyms_SynapseRall_dd[] = {
-//	"gsyn",
-	"Esyn",
-	"Epre",
-	"tau",
-//	"sigma",
-};
-const double cnrun::__CN_Params_SynapseRall_dd[] = {
-//	0.12,
-	0,
-      -20,
-	2,
-//	0.
-};
-
-
-
-
-// -- variables
-
-const char* const cnrun::__CN_VarNames_SynapseAB[] = {
-	"Amount of neurotransmitter released S"
-};
-const char* const cnrun::__CN_VarSyms_SynapseAB[] = {
-	"S"
-};
-const double cnrun::__CN_Vars_SynapseAB[] = {
-	0.
-};
-
-
-const char* const cnrun::__CN_VarNames_SynapseRall[] = {
-	"Amount of neurotransmitter released S",
-	"Amount of neurotransmitter absorbed R",
-};
-const char* const cnrun::__CN_VarSyms_SynapseRall[] = {
-	"S",
-	"R",
-};
-const double cnrun::__CN_Vars_SynapseRall[] = {
-	0.,
-	0.
-};
-
-
-
-
-
-
-
-void
-cnrun::CSynapseAB_dd::
-derivative( vector<double>& x, vector<double>& dx)
-{
-	if ( x[0] - t_last_release_started <= P[_rtime_] ) {
-	      // continue release from an old spike
-		dS(dx) = P[_alpha_] * (1 - S(x)) - P[_beta_] * S(x);
-	} else
-		if ( _source->E(x) > P[_Epre_] ) {
-		      // new spike ... start releasing
-			t_last_release_started = x[0];
-			dS(dx) = P[_alpha_] * (1 - S(x)) - P[_beta_] * S(x);
-		} else {
-		      // no release
-			dS(dx) = -P[_beta_] * S(x);
-		}
-}
-
-
-
-
-void
-cnrun::CSynapseABMinus_dd::
-derivative( vector<double>& x, vector<double>& dx)
-{
-	if ( x[0] - t_last_release_started <= P[_rtime_] ) {
-	      // continue release from an old spike
-		dS(dx) = P[_alpha_] * 1 - P[_beta_] * S(x);
-	} else
-		if ( _source->E(x) > P[_Epre_] ) {
-		      // new spike ... start releasing
-			t_last_release_started = x[0];
-			dS(dx) = P[_alpha_] * 1 - P[_beta_] * S(x);
-		} else {
-		      // no release
-			dS(dx) = -P[_beta_] * S(x);
-		}
-}
-
-
-
-
-// -------- Multiplexing AB
-
-void
-cnrun::CSynapseMxAB_dd::
-derivative( vector<double>& x, vector<double>& dx)
-{
-//	printf( "%s %lu %d %g\n", _source->label, _source->serial_id, _source->idx, _source->E(x));
-
-	if ( q() > 0 ) {
-		unsigned effective_q = q();
-	      // as we nudge along a little within RK's operational
-	      // dt, some spikes can expire in that brief while:
-	      // decrement q then, just for this while
-		while ( effective_q  &&  M->model_time(x) - _kq[q()-effective_q] > P[_rtime_] )
-			--effective_q;
-#ifdef __CN_MORECODE__
-		if ( effective_q < q() && M->verbosely > 6 )
-			printf( "YMxAB %s smacks %u spike(s) of %u at %g(+%g)\n", label,
-				(unsigned)q() - effective_q, (unsigned)q(),
-				M->model_time(),
-				M->model_time(x) - M->model_time());
-#endif
-		dS(dx) = P[_alpha_] * effective_q - P[_beta_] * S(x);
-	} else
-	      // no release, decay
-		dS(dx) = -P[_beta_] * S(x);
-}
-
-
-
-void
-cnrun::CSynapseMxAB_dd::
-update_queue()
-{
-	unsigned k = _source -> n_spikes_in_last_dt();
-	while ( k-- )
-		_kq.push_back( model_time());
-
-      // see if the oldest spike has gone past synapse release time
-      // disregard spike duration, measure time from saved spike_start
-      // (which is == spike_end)
-	while ( true ) {
-		if ( q() > 0 && model_time() - _kq.front() > P[_rtime_] )
-			_kq.erase( _kq.begin());
-		else
-			break;
-//		cout << "q--\n";
-	}
-}
-
-
-
-
-
-
-
-
-
-
-void
-cnrun::CSynapseAB_rr::
-derivative( vector<double>& x, vector<double>& dx)
-{
-	// if ( source()->F(x) > 0 )
-	// 	printf( "%s->F(x) = %g\n", _source->label, source()->F(x));
-	dS(dx) = -P[_beta_] * S(x)
-		+ P[_alpha_] * _numerator / (exp( P[_beta_] / source()->F(x)) + 1);
-}
-
-
-
-
-
-
-
-inline int Heaviside( double val)  { return (val >= 0) ? 1 : 0; }
-
-void
-cnrun::CSynapseRall_dd::
-derivative( vector<double>& x, vector<double>& dx)
-{
-	dR(dx) = 1 / P[_tau_] * (-R(x) + Heaviside( _source->E(x) - P[_Epre_]));
-	dS(dx) = 1 / P[_tau_] * (-S(x) + R(x));
-}
-
-
-
-// eof
diff --git a/upstream/src/libcn/hosted-synapses.hh b/upstream/src/libcn/hosted-synapses.hh
deleted file mode 100644
index f709133..0000000
--- a/upstream/src/libcn/hosted-synapses.hh
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
- *
- * License: GPL-2+
- *
- * Initial version: 2009-04-01
- *
- * Synapse units: alpha-beta
- */
-
-
-#ifndef LIBCN_HOSTED_SYNAPSES_H
-#define LIBCN_HOSTED_SYNAPSES_H
-
-#include <vector>
-#include <queue>
-#include <cfloat>
-
-#include "base-synapse.hh"
-#include "hosted-attr.hh"
-#include "mx-attr.hh"
-#include "hosted-neurons.hh"
-#include "standalone-neurons.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-using namespace std;
-
-namespace cnrun {
-
-class C_HostedSynapse
-  : public C_BaseSynapse, public C_HostedAttributes {
-
-    private:
-	C_HostedSynapse();
-    protected:
-	C_HostedSynapse( TUnitType intype,
-			 C_BaseNeuron *insource, C_BaseNeuron *intarget,
-			 double ing, CModel*, int s_mask = 0,
-			 bool do_allocations_immediately = true);
-    public:
-       ~C_HostedSynapse();
-
-	void reset_vars();
-	double &var_value( size_t);
-	const double &get_var_value( size_t) const;
-
-	double  S() const; // needs access to parent model var vector, defined in model.h
-	double  S( vector<double> &b) const	{ return b[idx+0]; }
-	double& dS( vector<double> &b) const	{ return b[idx+0]; }
-};
-
-
-
-
-// Note on synapses classification per source/target being a tonic
-// (rate) or phasic (discrete) unit:
-//
-// * Where a synapse connects _to_ a Rate neuron, it will have Ediff
-//   in lieu of Esyn and compute Isyn accordingly, otherwise inheriting
-//   its parameters.
-//
-// * Where a synapse connects _from_ a Rate unit, its derivative
-//   method follows a completely different equation.  It now has a
-//   different set of parameters, too.
-
-// The suffix in a class name, _xy, means x, source, y, target, with
-// `d' for discrete, `r' for rate.
-
-
-// The `multiplexing' part has a relevance for the source of the
-// synapse, with magic to collect and multiplex more than a single
-// spike per dt.
-//
-// * The source is a specialized (`dot'), and inherently phasic, unit.
-// * All parameters are inherited from the base class.
-
-
-// Alpha-Beta family
-
-class CSynapseAB_dd
-  : public C_HostedSynapse {
-
-    public:
-	CSynapseAB_dd( C_BaseNeuron *insource, C_BaseNeuron *intarget,
-		       double ing, CModel *inM, int s_mask = 0,
-		       bool do_allocations_immediately = true,
-		       TUnitType alt_type = YT_AB_DD)
-	      : C_HostedSynapse( alt_type, insource, intarget,
-				 ing, inM, s_mask, do_allocations_immediately)
-		{}
-
-	enum {
-		_Esyn_, _Epre_, _alpha_, _beta_, _rtime_
-	};
-
-	double Isyn( const C_BaseNeuron &with_neuron, double g) const  __attribute__ ((hot))
-		{
-			return -g * S() * (with_neuron.E() - P[_Esyn_]);
-//			return -P[_gsyn_] * S() * (_target->E() - P[_Esyn_]);
-		}
-	double Isyn( vector<double>& b, const C_BaseNeuron &with_neuron, double g) const  __attribute__ ((hot))
-		{
-			return -g * S(b) * (with_neuron.E(b) - P[_Esyn_]);
-//			return -P[_gsyn_] * S(b) * (_target->E(b) - P[_Esyn_]);
-		}
-
-	void derivative( vector<double>&, vector<double>&)  __attribute__ ((hot));
-};
-
-
-class CNeuronHHRate;
-
-// TODO
-class CSynapseAB_dr;
-class CSynapseAB_rd;
-
-
-class CSynapseAB_rr
-  : public C_HostedSynapse {
-
-    public:
-	CSynapseAB_rr( C_BaseNeuron *insource, C_BaseNeuron *intarget,
-		       double ing, CModel *inM, int s_mask = 0,
-		       bool do_allocations_immediately = true,
-		       TUnitType alt_type = YT_AB_RR)
-	      : C_HostedSynapse( alt_type, insource, intarget,
-				 ing, inM, s_mask, do_allocations_immediately)
-		{}
-
-	enum {
-		_Ediff_, _alpha_, _beta_, _T_, _sigma_
-	};
-
-      // supply own Isyn to avoid referencing target->E
-	double Isyn( const C_BaseNeuron &with_neuron, double g) const
-		{
-			return -g * S() * P[_Ediff_];
-		}
-	double Isyn( vector<double>& x, const C_BaseNeuron &with_neuron, double g) const
-		{
-			return -g * S(x) * P[_Ediff_];
-		}
-
-	void derivative( vector<double>&, vector<double>&);
-
-	void param_changed_hook()
-		{
-			_numerator = exp( P[_beta_] * P[_T_]) + 1;
-		}
-    private:
-	double	_numerator;
-};
-
-
-
-
-
-
-
-
-class CSynapseMxAB_dd
-  : public CSynapseAB_dd, public C_MultiplexingAttributes {
-
-    public:
-	CSynapseMxAB_dd( C_BaseNeuron *insource, C_BaseNeuron *intarget,
-			 double ing, CModel *inM, int s_mask = 0,
-			 bool do_allocations_immediately = true,
-			 TUnitType alt_type = YT_MXAB_DD)
-	      : CSynapseAB_dd( insource, intarget,
-			       ing, inM, s_mask, do_allocations_immediately,
-			       alt_type)
-		{}
-
-	void reset_state()
-		{
-			C_HostedSynapse::reset_state();
-			C_MultiplexingAttributes::reset();
-		}
-
-      // because Mx*'s synapse source is always a standalone, non-integratable neuron,
-      // which don't propagate vars onto M->V, we fold S(x) to make the actual S value available
-      // from within the integrator
-	double S() const			{ return C_HostedSynapse::S(); }
-	double S( vector<double> &unused) const	{ return C_HostedSynapse::S(); }
-
-	void derivative( vector<double>&, vector<double>&)  __attribute__ ((hot));
-
-    private:
-	friend class CModel;
-	void update_queue();
-};
-
-
-
-
-
-class CSynapseMxAB_dr
-  : public CSynapseMxAB_dd {
-
-    public:
-	CSynapseMxAB_dr( C_BaseNeuron *insource, C_BaseNeuron *intarget,
-			 double ing, CModel *inM, int s_mask = 0,
-			 bool do_allocations_immediately = true)
-	      : CSynapseMxAB_dd( insource, intarget,
-				 ing, inM, s_mask, do_allocations_immediately,
-				 YT_MXAB_DR)
-		{}
-
-	enum { _Ediff_, /* ... */ };
-	double Isyn( const C_BaseNeuron &with_neuron, double g) const
-		{
-			return -g * S() * P[_Ediff_];
-		}
-	double Isyn( vector<double>& unused, const C_BaseNeuron &with_neuron, double g) const
-		{
-			return -g * S() * P[_Ediff_];
-		}
-};
-
-
-
-
-
-
-
-
-
-
-
-
-class CSynapseABMinus_dd
-  : public CSynapseAB_dd {
-
-    public:
-	CSynapseABMinus_dd( C_BaseNeuron *insource, C_BaseNeuron *intarget,
-			    double ing, CModel *inM, int s_mask = 0,
-			    bool do_allocations_immediately = true)
-	      : CSynapseAB_dd( insource, intarget,
-			       ing, inM, s_mask, do_allocations_immediately,
-			       YT_ABMINUS_DD)
-		{}
-
-	enum {
-		_Esyn_, _Epre_, _alpha_, _beta_, _rtime_, _sigma_
-	};
-
-	void derivative( vector<double>&, vector<double>&);
-};
-
-
-// TODO
-class CSynapseABMinus_dr;
-class CSynapseABMinus_rd;
-class CSynapseABMinus_rr;
-
-
-
-
-// Rall
-
-class CSynapseRall_dd
-  : public C_HostedSynapse {
-
-    public:
-	CSynapseRall_dd( C_BaseNeuron *insource, C_BaseNeuron *intarget,
-			 double ing, CModel *inM, int s_mask = 0,
-			 bool do_allocations_immediately = true)
-	      : C_HostedSynapse( YT_RALL_DD, insource, intarget,
-				 ing, inM, s_mask, do_allocations_immediately)
-		{}
-
-	double&  R( vector<double>& b)	{ return b[idx+1]; }
-	double& dR( vector<double>& b)	{ return b[idx+1]; }
-
-	enum {
-		_Esyn_, _Epre_, _tau_, _sigma_
-	};
-
-	double Isyn( const C_BaseNeuron &with_neuron, double g) const
-		{
-			return -g * S() * (with_neuron.E() - P[_Esyn_]);
-		}
-	double Isyn( vector<double>&b, const C_BaseNeuron &with_neuron, double g) const
-		{
-			return -g * S(b) * (with_neuron.E(b) - P[_Esyn_]);
-		}
-
-	void derivative( vector<double>&, vector<double>&);
-};
-
-// TODO
-class CSynapseRall_rd;
-class CSynapseRall_dr;
-class CSynapseRall_rr;
-
-
-}
-
-#endif
-
-// EOF
diff --git a/upstream/src/libcn/integrate-base.hh b/upstream/src/libcn/integrate-base.hh
deleted file mode 100644
index 558a180..0000000
--- a/upstream/src/libcn/integrate-base.hh
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny
- *
- * License: GPL-2+
- *
- * Initial version: 2008-09-23
- *
- * A base class for integrators, to be plugged into CModel
- */
-
-
-#ifndef LIBCN_INTEGRATE_BASE_H
-#define LIBCN_INTEGRATE_BASE_H
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-namespace cnrun {
-
-class CModel;
-
-class CIntegrate_base {
-
-//    friend class CModel;
-//    protected:
-    public:
-	double	_dt_min, _dt_max,
-		_eps, _eps_abs, _eps_rel,
-		dt;  // that which is current
-
-	bool	is_owned;
-
-	CModel *model;
-
-	CIntegrate_base( double dt_min, double dt_max,
-			 double eps,  double eps_abs, double eps_rel,
-			 bool inis_owned)
-	      : _dt_min (dt_min), _dt_max (dt_max),
-		_eps (eps), _eps_abs (eps_abs), _eps_rel (eps_rel),
-		dt (dt_min),
-		is_owned (inis_owned)
-		{}
-	virtual ~CIntegrate_base()
-		{}
-
-	virtual void cycle() = 0;
-	virtual void fixate() = 0;
-	virtual void prepare() = 0;
-};
-
-}
-
-#endif
-
-// EOF
diff --git a/upstream/src/libcn/integrate-rk65.hh b/upstream/src/libcn/integrate-rk65.hh
deleted file mode 100644
index ee37e74..0000000
--- a/upstream/src/libcn/integrate-rk65.hh
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny
- *
- * License: GPL-2+
- *
- * Initial version: 2008-09-23
- *
- * A Runge-Kutta 6-5 integrator
- */
-
-
-#ifndef LIBCN_INTEGRATE_RK65_H
-#define LIBCN_INTEGRATE_RK65_H
-
-#include <vector>
-#include "integrate-base.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-namespace cnrun {
-
-class CIntegrateRK65
-  : public CIntegrate_base {
-
-    public:
-	double	_dt_max_cap;
-
-	CIntegrateRK65( double dt_min = 1e-6, double dt_max = .5, double dt_max_cap = 5,
-			double eps = 1e-8,  double eps_abs = 1e-12, double eps_rel = 1e-6,
-			bool inis_owned = true)
-	      : CIntegrate_base (dt_min, dt_max, eps, eps_abs, eps_rel, is_owned),
-		_dt_max_cap (dt_max_cap)
-		{}
-
-	void cycle() __attribute__ ((hot));
-	void fixate() __attribute__ ((hot));
-	void prepare();
-
-    private:
-	std::vector<double> Y[9], F[9], y5;
-};
-
-
-}
-
-#endif
-
-// EOF
diff --git a/upstream/src/libcn/model-cycle.cc b/upstream/src/libcn/model-cycle.cc
deleted file mode 100644
index e5de3d9..0000000
--- a/upstream/src/libcn/model-cycle.cc
+++ /dev/null
@@ -1,595 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
- *
- * License: GPL-2+
- *
- * Initial version: 2008-08-02
- *
- * CModel top cycle
- */
-
-
-#include <iostream>
-#include <ctime>
-#include <cstdlib>
-
-#include "libstilton/lang.hh"
-
-#include "integrate-rk65.hh"
-#include "model.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-using namespace std;
-
-
-/*--------------------------------------------------------------------------
-  Implementation of a 6-5 Runge-Kutta method with adaptive time step
-  mostly taken from the book "The numerical analysis of ordinary differential
-  equations - Runge-Kutta and general linear methods" by J.C. Butcher, Wiley,
-  Chichester, 1987 and a free adaptation to a 6 order Runge Kutta method
-  of an ODE system with additive white noise
---------------------------------------------------------------------------*/
-
-inline namespace {
-
-double __Butchers_a[9][8] = {
-	{ },
-	{ 1./9 },
-	{ .5/9,	.5/9 },
-	{ 0.416666666666667e-1,	0., 0.125 },
-	{ 1./6, 0., -0.5, 2./3 },
-	{ 0.1875e+1, 0., -0.7875e+1, 0.7e+1, -0.5 },
-	{ -0.4227272727272727e+1, 0., 0.176995738636364e+2, -0.142883522727273e+2, 0.522017045454545, 0.104403409090909e+1 },
-	{ 0.840622673179752e+1, 0., -0.337303717185049e+2, 0.271460231129622e+2, 0.342046929709216, -0.184653767923258e+1, 0.577349465373733 },
-	{ 0.128104575163399, 0., 0., -0.108433734939759, 0.669375, -0.146666666666667, 0.284444444444444, 0.173176381998583 },
-};
-
-
-double __Butchers_b[9] = {
-	0.567119155354449e-1,
-	0.,
-	0.,
-	0.210909572355356,
-	0.141490384615385,
-	0.202051282051282,
-	0.253186813186813,
-	0.843679809736684e-1,
-	0.512820512820513e-1
-};
-} // inline namespace
-
-
-
-void
-cnrun::CIntegrateRK65::
-prepare()
-{
-	for ( unsigned short i = 0; i < 9; ++i )
-		Y[i].resize( model->_var_cnt), F[i].resize( model->_var_cnt);
-	y5.resize( model->_var_cnt);
-
-	if ( model->standalone_unit_cnt() > 0 )
-		if ( _dt_max > model->_discrete_dt ) {
-			_dt_max = model->_discrete_dt;
-			if ( model->verbosely > 1 )
-				cout << "CIntegrateRK65: Set dt_max to model->discrete_dt: " << _dt_max << endl;
-		}
-}
-
-
-void
-__attribute__ ((hot))
-cnrun::CIntegrateRK65::
-cycle()
-{
-      // omp stuff found inapplicable due to considerable overhead in sys time
-      // (thread creation)
-	unsigned int i, j, k;
-
-	double	aF;
-
-      // calculate iterative terms rk65_Y[__i] and rk65_F[__i] (to sixth order)
-	for ( i = 0; i < 9; ++i ) {
-//#pragma omp parallel for schedule(static,model->_var_cnt/2+1) firstprivate(aF,j,i)
-		for ( k = 0; k < model->_var_cnt; ++k ) {
-			aF = 0.0;
-			for ( j = 0; j < i; ++j )
-				aF += __Butchers_a[i][j] * F[j][k];
-			Y[i][k] = model->V[k] + dt * aF;
-		}
-	      // see to this vector's dt
-		F[i][0] = 1.;
-
-//#pragma omp consider...
-		for_model_hosted_neurons (model,N)
-			(*N) -> derivative( Y[i], F[i]);
-		for_model_hosted_synapses (model,S)
-			(*S) -> derivative( Y[i], F[i]);
-	}
-
-      // sum up Y[i] and F[i] to build 5th order scheme -> y5
-//#pragma omp parallel for private(aF,j)
-	for ( k = 0; k < model->_var_cnt; ++k ) {
-		aF = 0.0;
-		for ( j = 0; j < 8; ++j )
-			aF += __Butchers_a[8][j] * F[j][k];
-		y5[k] = model->V[k] + dt * aF;
-	}
-
-      // sum up Y[i] and F[i] to build 6th order scheme -> W
-//#pragma omp parallel for schedule(static,model->_var_cnt/2+1) private(aF,j)
-	for ( k = 0; k < model->_var_cnt; ++k ) {
-		aF = 0.0;
-		for ( j = 0; j < 9; ++j )
-			aF += __Butchers_b[j] * F[j][k];
-		model->W[k] = model->V[k] + dt * aF;
-	}
-
-      // kinkiness in synapses causes dt to rocket
-	double	dtx = min( _dt_max, dt * _dt_max_cap);
-
-      // determine minimal necessary new dt to get error < eps based on the
-      // difference between results in y5 and W
-	double try_eps, delta, try_dt;
-      // exclude time (at index 0)
-//#pragma omp parallel for private(try_eps,delta,try_dtj)
-	for ( k = 1; k < model->_var_cnt; ++k ) {
-		try_eps = max (_eps_abs, min (_eps, abs(_eps_rel * model->W[k])));
-		delta = abs (model->W[k] - y5[k]);
-		if ( delta > DBL_EPSILON * y5[k] ) {
-			try_dt = exp( (log(try_eps) - log(delta)) / 6) * dt;
-			if ( try_dt < dtx )
-				dtx = try_dt;
-		}
-	}
-      // make sure we don't grind to a halt
-	if ( dtx < _dt_min )
-		dtx = _dt_min;
-
-      // set the new step
-	dt = dtx;
-}
-
-
-
-
-
-
-
-
-// -------------- CModel::advance and dependents
-
-volatile sig_atomic_t chris_at_kbd;
-void
-ctrl_c_handler( int signum)
-{
-	chris_at_kbd = true;
-}
-
-
-unsigned int
-cnrun::CModel::
-advance( double dist, double *cpu_time_used_p)
-{
-	chris_at_kbd = 0;
-	signal( SIGINT, ctrl_c_handler);
-
-	if ( unit_list.size() == 0 ) {
-		fprintf( stderr, "Model is empty\n");
-		return 0;
-	}
-	if ( _status & CN_MDL_NOTREADY )
-		prepare_advance();
-
-	bool	have_hosted_units = (hosted_unit_cnt() > 0),
-		have_standalone_units = (standalone_unit_cnt() > 0),
-		have_ddtbound_units = (ddtbound_unit_cnt() > 0);
-
-	if ( have_hosted_units && !have_standalone_units && !have_ddtbound_units )
-		return _do_advance_on_pure_hosted( dist, cpu_time_used_p);
-	if ( !have_hosted_units && have_standalone_units && !have_ddtbound_units )
-		return _do_advance_on_pure_standalone( dist, cpu_time_used_p);
-	if ( !have_hosted_units && !have_standalone_units && have_ddtbound_units )
-		return _do_advance_on_pure_ddtbound( dist, cpu_time_used_p);
-
-	unsigned int retval = _do_advance_on_mixed( dist, cpu_time_used_p);
-	signal( SIGINT, SIG_IGN);
-	return retval;
-}
-
-void
-__attribute__ ((hot))
-cnrun::CModel::
-_setup_schedulers()
-{
-	regular_periods.clear();
-	regular_periods_last_checked.clear();
-	if ( units_with_periodic_sources.size() > 0 ) { // determine period(s) at which to wake up reader update loop
-		for_all_units_with_periodic_sources (U)
-			for ( auto S = (*U) -> sources.begin(); S != (*U)->sources.end(); ++S )
-				regular_periods.push_back( (reinterpret_cast<CSourcePeriodic*>(S->source)) -> period);
-		regular_periods.unique();
-		regular_periods.sort();
-		regular_periods_last_checked.resize( regular_periods.size());
-	}
-
-	if ( verbosely > 2 && regular_periods.size() > 0 ) {
-		printf( "%zd timepoint(s) in scheduler_update_periods: ", regular_periods.size());
-		auto I = regular_periods.begin();
-		for ( size_t i = 0; i < regular_periods.size()-1; ++i, ++I )
-			printf( "%g, ", *I);
-		printf( "%g\n\n", regular_periods.back());
-	}
-
-      // ensure all schedulers are effective at the beginning, too
-	for_all_units_with_periodic_sources (U)
-		(*U) -> apprise_from_sources();
-}
-
-
-void
-cnrun::CModel::
-prepare_advance()
-{
-	if ( _status & CN_MDL_LOGDT && !_dt_logger ) {
-		string	fname = name + ".dt";
-		_dt_logger = new ofstream( fname.data());
-	}
-	if ( _status & CN_MDL_LOGSPIKERS && !_spike_logger ) {
-		string	fname = name + ".spikes";
-		_spike_logger = new ofstream( fname.data());
-	}
-
-	_setup_schedulers();
-
-	if ( !hosted_unit_cnt() )
-		_integrator->dt = _discrete_dt;
-
-	if ( ddtbound_unit_cnt() )
-		_status |= CN_MDL_HAS_DDTB_UNITS;
-	else
-		_status &= ~CN_MDL_HAS_DDTB_UNITS;
-
-	_status &= ~CN_MDL_NOTREADY;
-
-	if ( verbosely > 5 )
-		fprintf( stderr, "Model prepared\n");
-}
-
-
-
-// comment concerning for_all_conscious_neurons loop:
-// these have no next_time_E or suchlike, have `fixate' implicit herein; also,
-// conscious neurons fire irrespective of whatever happens elsewhere in the model, and
-// they, logically, have no inputs
-
-#define _DO_ADVANCE_COMMON_INLOOP_BEGIN \
-	if ( chris_at_kbd ) {		\
-		printf( "\nInterrupted\n");	\
-		break;			\
-	}								\
-	for_all_units_with_contiuous_sources (U)			\
-		(*U)->apprise_from_sources();				\
-	{								\
-		auto I = regular_periods.begin(); \
-		auto Ic = regular_periods_last_checked.begin(); \
-		for ( ; I != regular_periods.end(); ++I, ++Ic ) \
-			if ( unlikely(model_time() >= *I * (*Ic + 1)) ) { \
-				(*Ic)++;				\
-				for_all_units_with_periodic_sources (U)	\
-					(*U)->apprise_from_sources();	\
-			}						\
-	}								\
-	for_all_conscious_neurons (N)					\
-	        (*N) -> possibly_fire();		       		\
-									\
-	for ( auto Yc = mx_syn_list.begin(); Yc != mx_syn_list.end(); ++Yc ) \
-		if ( (*Yc)->_source )					\
-			(*Yc) -> update_queue();
-
-
-#define _DO_ADVANCE_COMMON_INLOOP_MID \
-	if ( have_listeners ) {						\
-		if ( have_discrete_listen_dt ) {			\
-			if ( model_time() - last_made_listen >= listen_dt ) { \
-				for_all_listening_units (U)		\
-					(*U) -> tell();			\
-				last_made_listen += listen_dt;		\
-			}						\
-		} else							\
-			for_all_listening_units (U)			\
-				(*U) -> tell();				\
-	}								\
-	if ( unlikely (_status & CN_MDL_LOGDT) )			\
-		(*_dt_logger) << model_time() << "\t" << dt() << endl;	\
-									\
-	for_all_spikelogging_neurons (N) {				\
-		(*N) -> do_detect_spike_or_whatever();			\
-		if ( !(_status & CN_MDL_DISKLESS) &&			\
-		     (*N)->n_spikes_in_last_dt() &&			\
-		     _status & CN_MDL_LOGSPIKERS ) {			\
-			(*_spike_logger) << model_time() << "\t";	\
-			if ( _status & CN_MDL_LOGUSINGID )		\
-				(*_spike_logger) << (*N)->_serial_id << endl; \
-			else						\
-				(*_spike_logger) << (*N)->_label << endl; \
-		}							\
-	}
-
-
-#define _DO_ADVANCE_COMMON_INLOOP_END \
-	++_cycle;							\
-	++steps;							\
-	if ( verbosely != 0 ) {						\
-		if ( unlikely (((double)(clock() - cpu_time_lastchecked)) / CLOCKS_PER_SEC > 2) ) { \
-			cpu_time_lastchecked = clock();			\
-			if ( _status & CN_MDL_DISPLAY_PROGRESS_PERCENT && !(_status & CN_MDL_DISPLAY_PROGRESS_TIME) ) \
-				fprintf( stderr, "\r\033[%dC%4.1f%%\r", \
-					 (verbosely < 0) ? -(verbosely+1)*8 : 0, \
-					 100 - (model_time() - time_ending) / (time_started - time_ending) * 100); \
-			else if ( _status & CN_MDL_DISPLAY_PROGRESS_TIME && !(_status & CN_MDL_DISPLAY_PROGRESS_PERCENT) ) \
-				fprintf( stderr, "\r\033[%dC%'6.0fms\r", \
-					 (verbosely < 0) ? -(verbosely+1)*16 : 0, \
-					 model_time());			\
-			else if ( _status & CN_MDL_DISPLAY_PROGRESS_PERCENT && _status & CN_MDL_DISPLAY_PROGRESS_TIME ) \
-				fprintf( stderr, "\r\033[%dC%'6.0fms %4.1f%%\r", \
-					 (verbosely < 0) ? -(verbosely+1)*24 : 0, \
-					 model_time(),			\
-					 100 - (model_time() - time_ending) / (time_started - time_ending) * 100); \
-			fflush( stderr);				\
-		}							\
-	}
-
-
-#define _DO_ADVANCE_COMMON_EPILOG \
-	cpu_time_ended = clock();					\
-	double cpu_time_taken_seconds = ((double) (cpu_time_ended - cpu_time_started)) / CLOCKS_PER_SEC; \
-	if ( cpu_time_used_p )						\
-		*cpu_time_used_p = cpu_time_taken_seconds;		\
-	if ( verbosely > 0 || verbosely <= -1 ) {			\
-		fprintf( stderr, "\r\033[K");				\
-		fflush( stderr);					\
-	}								\
-	if ( verbosely > 0 )						\
-		printf( "@%.1fmsec (+%.1f in %lu cycles in %.2f sec CPU time:" \
-			" avg %.3g \302\265s/cyc, ratio to CPU time %.2g)\n\n", \
-			model_time(), dist, steps, cpu_time_taken_seconds, \
-			model_time()/_cycle * 1e3, model_time() / cpu_time_taken_seconds / 1e3);
-
-
-
-
-
-unsigned int
-__attribute__ ((hot))
-cnrun::CModel::
-_do_advance_on_pure_hosted( double dist, double *cpu_time_used_p)
-{
-	bool	have_listeners = (lisn_unit_list.size() > 0),
-		have_discrete_listen_dt = (listen_dt > 0.);
-
-	clock_t	cpu_time_started = clock(),
-		cpu_time_ended,
-		cpu_time_lastchecked = cpu_time_started;
-
-	double	time_started = model_time(),
-		time_ending = time_started + dist,
-		last_made_listen = time_started;
-
-	unsigned long steps = 0;
-	do {
-		_DO_ADVANCE_COMMON_INLOOP_BEGIN
-
-		_integrator->cycle();
-
-		_DO_ADVANCE_COMMON_INLOOP_MID
-
-	      // fixate
-		_integrator->fixate();
-
-		_DO_ADVANCE_COMMON_INLOOP_END
-
-	      // model_time is advanced implicitly in _integrator->cycle()
-	} while ( model_time() < time_ending );
-
-	_DO_ADVANCE_COMMON_EPILOG
-
-	return steps;
-}
-
-
-
-unsigned int
-__attribute__ ((hot))
-cnrun::CModel::
-_do_advance_on_pure_standalone( double dist, double *cpu_time_used_p)
-{
-	bool	have_listeners = (lisn_unit_list.size() > 0),
-		have_discrete_listen_dt = (listen_dt > 0.);
-
-	clock_t	cpu_time_started = clock(),
-		cpu_time_ended,
-		cpu_time_lastchecked = cpu_time_started;
-
-	double	time_started = model_time(),
-		time_ending = time_started + dist,
-		last_made_listen = time_started;
-
-	unsigned long steps = 0;
-	do {
-		_DO_ADVANCE_COMMON_INLOOP_BEGIN
-
-	      // service simple units w/out any vars on the integration vector V
-		for_all_standalone_neurons (N)
-			if ( !(*N)->is_conscious() )
-				(*N) -> preadvance();
-		for_all_standalone_synapses (Y)
-			(*Y) -> preadvance();
-
-	      // even in the case of n_hosted_{neurons,units} == 0, we would need _integrator->cycle() to advance V[0],
-	      // which is our model_time(); which is kind of expensive, so here's a shortcut
-		V[0] += _discrete_dt;
-		// _discrete_time += _discrete_dt;  // not necessary
-
-		_DO_ADVANCE_COMMON_INLOOP_MID
-
-	      // fixate
-		for_all_standalone_neurons (N)
-			if ( !(*N)->is_conscious() )
-				(*N) -> fixate();
-		for_all_standalone_synapses (Y)
-			(*Y) -> fixate();
-
-		_DO_ADVANCE_COMMON_INLOOP_END
-
-	} while ( model_time() < time_ending );
-
-	_DO_ADVANCE_COMMON_EPILOG
-
-	return steps;
-}
-
-
-
-
-
-
-
-unsigned int
-__attribute__ ((hot))
-cnrun::CModel::
-_do_advance_on_pure_ddtbound( double dist, double *cpu_time_used_p)
-{
-	bool	have_listeners = (lisn_unit_list.size() > 0),
-		have_discrete_listen_dt = (listen_dt > 0.);
-
-	clock_t	cpu_time_started = clock(),
-		cpu_time_ended,
-		cpu_time_lastchecked = cpu_time_started;
-
-	double	time_started = model_time(),
-		time_ending = time_started + dist,
-		last_made_listen = time_started;
-
-	unsigned long steps = 0;
-	do {
-		_DO_ADVANCE_COMMON_INLOOP_BEGIN
-
-	      // lastly, service units only serviceable at discrete dt
-		for_all_ddtbound_neurons (N)
-			if ( !(*N)->is_conscious() )
-				(*N) -> preadvance();
-		for_all_ddtbound_synapses (Y)
-			(*Y) -> preadvance();
-
-		V[0] += _discrete_dt;
-		_discrete_time += _discrete_dt;
-
-		_DO_ADVANCE_COMMON_INLOOP_MID
-
-	      // fixate
-		for_all_ddtbound_neurons (N)
-			if ( !(*N)->is_conscious() )
-				(*N) -> fixate();
-		for_all_ddtbound_synapses (Y)
-			(*Y) -> fixate();
-
-		_DO_ADVANCE_COMMON_INLOOP_END
-
-	} while ( model_time() < time_ending );
-
-	_DO_ADVANCE_COMMON_EPILOG
-
-	return steps;
-}
-
-
-
-
-
-unsigned int
-__attribute__ ((hot))
-cnrun::CModel::
-_do_advance_on_mixed( double dist, double *cpu_time_used_p)
-{
-	bool	have_hosted_units = (hosted_unit_cnt() > 0),
-		is_discrete_dt_bound = _status & CN_MDL_HAS_DDTB_UNITS,
-		have_listeners = (lisn_unit_list.size() > 0),
-		have_discrete_listen_dt = (listen_dt > 0.),
-		need_fixate_ddtbound_units;
-
-	clock_t	cpu_time_started = clock(),
-		cpu_time_ended,
-		cpu_time_lastchecked = cpu_time_started;
-
-	double	time_started = model_time(),
-		time_ending = time_started + dist,
-		last_made_listen = time_started;
-
-	unsigned long steps = 0;
-	do {
-		_DO_ADVANCE_COMMON_INLOOP_BEGIN
-
-		_integrator->cycle();
-
-	      // service simple units w/out any vars on the integration vector V
-		for_all_standalone_neurons (N)
-			if ( !(*N)->is_conscious() )
-				(*N) -> preadvance();
-		for_all_standalone_synapses (Y)
-			(*Y) -> preadvance();
-
-	      // lastly, service units only serviceable at discrete dt
-		if ( is_discrete_dt_bound && model_time() >= _discrete_time ) {
-			for_all_ddtbound_neurons (N)
-				if ( !(*N)->is_conscious() )
-					(*N) -> preadvance();
-			for_all_ddtbound_synapses (Y)
-				(*Y) -> preadvance();
-
-			_discrete_time += _discrete_dt;
-			need_fixate_ddtbound_units = true;
-		} else
-			need_fixate_ddtbound_units = false;
-
-		if ( !have_hosted_units )
-			V[0] += _discrete_dt;
-
-
-		_DO_ADVANCE_COMMON_INLOOP_MID
-
-
-	      // fixate
-		_integrator->fixate();
-
-		for_all_standalone_neurons (N)
-			if ( !(*N)->is_conscious() )
-				(*N) -> fixate();
-		for_all_standalone_synapses (Y)
-			(*Y) -> fixate();
-
-		if ( need_fixate_ddtbound_units ) {
-			for_all_ddtbound_neurons (N)
-				if ( !(*N)->is_conscious() )
-					(*N) -> fixate();
-			for_all_ddtbound_synapses (Y)
-				(*Y) -> fixate();
-		}
-
-
-		_DO_ADVANCE_COMMON_INLOOP_END
-
-	} while ( model_time() < time_ending );
-
-	_DO_ADVANCE_COMMON_EPILOG
-
-	return steps;
-}
-
-
-// eof
diff --git a/upstream/src/libcn/model-nmlio.cc b/upstream/src/libcn/model-nmlio.cc
deleted file mode 100644
index 5006468..0000000
--- a/upstream/src/libcn/model-nmlio.cc
+++ /dev/null
@@ -1,487 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *
- * License: GPL-2+
- *
- * Initial version: 2008-09-02
- *
- * NeuroML import/export methods for CModel
- */
-
-#include <string>
-#include <iostream>
-#include <regex.h>
-
-#include "model.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-using namespace std;
-
-#ifdef LIBXML_READER_ENABLED
-
-
-
-
-
-int
-cnrun::CModel::
-import_NetworkML( const char *fname, bool appending)
-{
-	LIBXML_TEST_VERSION;
-
-	xmlDoc *doc = xmlReadFile( fname, nullptr, 0);
-	if ( !doc )
-		return CN_NMLIN_NOFILE;
-
-	int retval = import_NetworkML( doc, fname, appending);
-
-	xmlFreeDoc( doc);
-
-	return retval;
-}
-
-
-
-
-
-inline namespace {
-
-xmlNode*
-find_named_root_child_elem( xmlNode *node,     // node to start search from
-			    const char *elem)  // name of the element searched for
-{
-	xmlNode *n;
-
-	for ( n = node->children; n; n = n->next ) {
-		if ( n->type == XML_ELEMENT_NODE ) {
-			if ( xmlStrEqual( n->name, BAD_CAST elem) )
-				return n;
-// the <populations> and <projections> nodes are expected to appear as
-// direct children of the root node; so don't go search deeper than that
-
-//			if ( n->children ) { // go search deeper
-//				ni = find_named_elem( n->children, elem);
-//				if ( ni )
-//					return ni;
-//			}
-		}
-	}
-	return nullptr;
-}
-
-}
-
-int
-cnrun::CModel::
-import_NetworkML( xmlDoc *doc, const char *fname, bool appending)
-{
-	int retval = 0;
-
-	// we pass up on validation (for which we would need to keep a
-	// .dtd or Schema at hand), and proceed to extracting elements
-
-	xmlNode *root_node = xmlDocGetRootElement( doc),
-		*n;
-
-      // read meta:notes and make out a name for the model
-	if ( !root_node ) {
-		fprintf( stderr, "Failed to obtain root element\n");
-		retval = CN_NMLIN_NOELEM;
-		goto out;
-	}
-
-      // give it a name: assume it's generated by neuroConstruct for now
-	if ( !appending ) {
-		reset();
-		if ( !(n = find_named_root_child_elem( root_node, "notes")) ) {
-			if ( verbosely > 1 )
-				fprintf( stderr, "<notes> element not found; model will be unnamed\n");
-			// this is not critical, so just keep the user
-			// informed and proceed
-		} else
-			if ( n->type == XML_ELEMENT_NODE ) {  // only concern ourselves with nodes of this type
-				xmlChar *notes_s = xmlNodeGetContent( n);
-				// look for a substring specific to neuroConstruct, which is obviously speculative
-				regex_t RE;
-				regcomp( &RE, ".*project: (\\w*).*", REG_EXTENDED);
-				regmatch_t M[1+1];
-				name = (0 == regexec( &RE, (char*)notes_s, 1+1, M, 0))
-                    ? string ((char*)notes_s + M[1].rm_so, M[1].rm_eo - M[1].rm_so)
-                    : "(unnamed)";
-				xmlFree( notes_s);
-			} else
-				name = "(unnamed)";
-	}
-
-	if ( verbosely > 0 )
-		printf( "Model \"%s\": %sing topology from %s\n",
-			name.c_str(), (appending ?"Append" :"Import"), fname);
-
-	// In the following calls to _process_{populations,instances}
-	// functions, the actual order of appearance of these nodes in
-	// the xml file doesn't matter, thanks to the xml contents
-	// being already wholly read and available to us as a tree.
-
-      // process <populations>
-	if ( !(n = find_named_root_child_elem( root_node, "populations")) ) {
-		retval = CN_NMLIN_NOELEM;
-		goto out;
-	} // assume there is only one <populations> element: don't loop to catch more
-	if ( (retval = _process_populations( n->children)) < 0)	// note n->children, which is in fact a pointer to the first child
-		goto out;
-
-      // process <projections>
-      // don't strictly require any projections as long as there are some neurons
-	if ( (n = find_named_root_child_elem( root_node, "projections")) ) {
-		if ( (retval = _process_projections( n->children)) < 0 )
-			goto out;
-	} else
-		if ( verbosely > 2 )
-			cout << "No projections found\n";
-
-out:
-	// we are done with topology; now put units' variables on a vector
-	finalize_additions();
-	// can call time_step only after finalize_additions
-
-	cout << endl;
-
-	return retval;
-}
-
-
-
-
-
-int
-cnrun::CModel::
-_process_populations( xmlNode *n)
-{
-	xmlChar *group_id_s = nullptr,
-		*cell_type_s = nullptr;
-
-	int	pop_cnt = 0;
-
-	try {
-		for ( ; n; n = n->next ) // if is nullptr (parent had no children), we won't do a single loop
-			if ( n->type == XML_ELEMENT_NODE && xmlStrEqual( n->name, BAD_CAST "population") ) {
-
-				group_id_s = xmlGetProp( n, BAD_CAST "name"); // BAD_CAST is just a cast to xmlChar*
-									      // with a catch that libxml functions
-									      // expect strings pointed to to be good UTF
-				if ( !group_id_s ) {
-					fprintf( stderr, "<population> element missing a \"name\" attribute near line %d\n", n->line);
-					return CN_NMLIN_BADATTR;
-				}
-			      // probably having an unnamed popuation isn't an error so serious as to abort the
-			      // operation, but discipline is above all
-
-				cell_type_s = xmlGetProp( n, BAD_CAST "cell_type");
-				// now we know the type of cells included in this population; remember it to pass on to
-				// _process_population_instances, where it is used to select an appropriate unit type
-				// when actually adding a neuron to the model
-
-			      // but well, let's check if we have units of that species in stock
-				if ( !unit_species_is_neuron((char*)cell_type_s) && !unit_family_is_neuron((char*)cell_type_s) ) {
-					fprintf( stderr, "Bad cell species or family (\"%s\") in population \"%s\"\n",
-						 (char*)cell_type_s, group_id_s);
-					throw CN_NMLIN_BADCELLTYPE;
-				}
-
-				xmlNode *nin = n->children;  // again, ->children means ->first
-				if ( nin )
-					for ( ; nin; nin = nin->next )  // deal with multiple <instances> nodes
-						if ( nin->type == XML_ELEMENT_NODE && xmlStrEqual( nin->name, BAD_CAST "instances") ) {
-							int subretval = _process_population_instances( nin->children,
-												       group_id_s, cell_type_s);
-							if ( subretval < 0 )
-								throw subretval;
-
-							if ( verbosely > 2 )
-								printf( " %5d instance(s) of type \"%s\" in population \"%s\"\n",
-									subretval, cell_type_s,  group_id_s);
-							pop_cnt++;
-						}
-
-				xmlFree( cell_type_s), xmlFree( group_id_s);
-			}
-
-		if ( verbosely > 1 )
-			printf( "\tTotal %d population(s)\n", pop_cnt);
-
-	} catch (int ex) {
-		xmlFree( cell_type_s), xmlFree( group_id_s);
-
-		return ex;
-	}
-
-	return pop_cnt;
-}
-
-
-
-
-
-
-int
-cnrun::CModel::
-_process_projections( xmlNode *n)
-{
-	// much the same code as in _process_populations
-
-	xmlChar *prj_name_s = nullptr,
-		*prj_src_s = nullptr,
-		*prj_tgt_s = nullptr,
-		*synapse_type_s = nullptr;
-
-	size_t pop_cnt = 0;
-
-	try {
-		for ( ; n; n = n->next ) {
-			if ( n->type != XML_ELEMENT_NODE || !xmlStrEqual( n->name, BAD_CAST "projection") )
-				continue;
-
-			prj_name_s = xmlGetProp( n, BAD_CAST "name");
-			if ( !prj_name_s ) {
-				fprintf( stderr, "<projection> element missing a \"name\" attribute near line %u\n", n->line);
-				return CN_NMLIN_BADATTR;
-			}
-
-			prj_src_s  = xmlGetProp( n, BAD_CAST "source");
-			prj_tgt_s  = xmlGetProp( n, BAD_CAST "target");
-			if ( !prj_src_s || !prj_tgt_s ) {
-				fprintf( stderr, "Projection \"%s\" missing a \"source\" and/or \"target\" attribute near line %u\n",
-					 prj_name_s, n->line);
-				throw CN_NMLIN_BADATTR;
-			}
-
-			xmlNode *nin;
-			nin = n->children;
-			if ( !nin )
-				fprintf( stderr, "Empty <projection> node near line %d\n", n->line);
-
-			for ( ; nin; nin = nin->next )
-				if ( nin->type == XML_ELEMENT_NODE && xmlStrEqual( nin->name, BAD_CAST "synapse_props") ) {
-					synapse_type_s = xmlGetProp( nin, BAD_CAST "synapse_type");
-					if ( !unit_species_is_synapse((char*)synapse_type_s) &&
-					     !unit_family_is_synapse((char*)synapse_type_s) ) {
-						fprintf( stderr, "Bad synapse type \"%s\" near line %u\n",
-							 (char*)synapse_type_s, nin->line);
-						throw CN_NMLIN_BADCELLTYPE;
-					}
-				}
-
-			for ( nin = n->children; nin; nin = nin->next )
-				if ( nin->type == XML_ELEMENT_NODE && xmlStrEqual( nin->name, BAD_CAST "connections") ) {
-					int subretval = _process_projection_connections( nin->children,
-											 prj_name_s, synapse_type_s,
-											 prj_src_s, prj_tgt_s);
-					if ( subretval < 0 )
-						throw subretval;
-
-					if ( verbosely > 2 )
-						printf( " %5d connection(s) of type \"%s\" in projection \"%s\"\n",
-							subretval, synapse_type_s,  prj_name_s);
-					pop_cnt++;
-				}
-			xmlFree( prj_name_s), xmlFree( prj_src_s), xmlFree( prj_tgt_s);
-		}
-
-		if ( verbosely > 1 )
-			printf( "\tTotal %zd projection(s)\n", pop_cnt);
-
-	} catch (int ex) {
-		xmlFree( prj_name_s), xmlFree( prj_src_s), xmlFree( prj_tgt_s);
-		return ex;
-	}
-
-	return (int)pop_cnt;
-}
-
-
-
-
-
-
-
-int
-cnrun::CModel::
-_process_population_instances( xmlNode *n, const xmlChar *group_prefix, const xmlChar *type_s)
-{
-	int	retval = 0;  // also keeps a count of added neurons
-
-	double	x, y, z;
-	char	cell_id[CN_MAX_LABEL_SIZE];
-
-	xmlNode *nin;
-
-	xmlChar *id_s = nullptr;
-	try {
-		for ( ; n; n = n->next ) {
-			if ( n->type != XML_ELEMENT_NODE || !xmlStrEqual( n->name, BAD_CAST "instance") )
-				continue;
-
-			xmlChar *id_s = xmlGetProp( n, BAD_CAST "id");
-			if ( !id_s ) {
-			      // could be less strict here and allow empty ids, which will then be composed
-			      // from group_prefix + id (say, "LN0", "LN1" and so on); but then, as
-			      // individual <projection>s would have to reference both endpoints by explicit
-			      // ids, it is obviously prone to error to have <instance> ids depend solely on
-			      // their order of appearance.
-			      // So we bark at empty ids.
-				fprintf( stderr, "<instance> element without an \"id\" attribute near line %u\n", n->line);
-				return CN_NMLIN_BADATTR;
-			}
-
-			size_t total_len = xmlStrlen( group_prefix) + xmlStrlen( id_s);
-			if ( total_len >= CN_MAX_LABEL_SIZE ) {
-				fprintf( stderr, "Combined label for an <instance> (\"%s%s\") exceeding %d characters near line %u\n",
-					 group_prefix, id_s, CN_MAX_LABEL_SIZE, n->line);
-				throw CN_NMLIN_BIGLABEL;
-			}
-			_longest_label = max( _longest_label,
-					      (unsigned short)snprintf( cell_id, CN_MAX_LABEL_SIZE-1, "%s.%s",
-									group_prefix, id_s));  // here, a new instance is given a name
-			xmlFree( id_s);
-
-			if ( !(nin = n->children) )
-				return retval;
-
-			for ( ; nin; nin = nin->next ) {
-				if ( !(nin->type == XML_ELEMENT_NODE &&
-				       xmlStrEqual( nin->name, BAD_CAST "location")) )
-					continue;
-
-				xmlChar *x_s = xmlGetProp( nin, BAD_CAST "x"),
-					*y_s = xmlGetProp( nin, BAD_CAST "y"),
-					*z_s = xmlGetProp( nin, BAD_CAST "z");
-			      // here we do actually insert neurons into the model
-				if ( !(x_s && y_s && z_s) )
-					if ( verbosely > 1 )
-						fprintf( stderr, "<location> element missing full set of coordinates near line %d\n", nin->line);
-					// not an error
-				x = strtod( (char*)x_s, nullptr), y = strtod( (char*)y_s, nullptr), z = strtod( (char*)z_s, nullptr);
-				xmlFree( x_s), xmlFree( y_s), xmlFree( z_s);
-
-				C_BaseNeuron *neu = add_neuron_species( (char*)type_s, cell_id, false);
-
-				if ( !neu || neu->_status & CN_UERROR ) {
-					if ( neu )
-						delete neu;
-					fprintf( stderr, "Failed to add a neuron \"%s\" near line %u\n", cell_id, n->line);
-					return CN_NMLIN_STRUCTERROR;
-				} else {
-					neu->_serial_id = _global_unit_id_reservoir++;
-					neu->pos = {x, y, z};
-					retval++;
-				}
-			}
-		}
-	} catch (int ex) {
-		xmlFree( id_s);
-		return ex;
-	}
-
-	return retval;
-}
-
-
-
-
-int
-cnrun::CModel::
-_process_projection_connections( xmlNode *n,
-				 const xmlChar *synapse_name, const xmlChar *type_s,
-				 const xmlChar *src_grp_prefix, const xmlChar *tgt_grp_prefix)
-{
-	// similar to _process_population_instances, except that we read some more attributes (source and
-	// target units)
-
-	int	retval = 0;  // is also a counter of synapses
-
-	char	//synapse_id [CN_MAX_LABEL_SIZE],
-		src_s[CN_MAX_LABEL_SIZE],
-		tgt_s[CN_MAX_LABEL_SIZE];
-	double	weight;
-
-	C_BaseSynapse	*y;
-
-	xmlChar *src_cell_id_s = nullptr,
-		*tgt_cell_id_s = nullptr,
-		*weight_s      = nullptr;
-	try {
-		for ( ; n; n = n->next ) {
-			if ( n->type != XML_ELEMENT_NODE || !xmlStrEqual( n->name, BAD_CAST "connection") )
-				continue;
-
-			src_cell_id_s = xmlGetProp( n, BAD_CAST "pre_cell_id"),
-			tgt_cell_id_s = xmlGetProp( n, BAD_CAST "post_cell_id"),
-			weight_s      = xmlGetProp( n, BAD_CAST "weight");
-			if ( /*!synapse_id_s || */ !src_cell_id_s || !tgt_cell_id_s ) {
-				fprintf( stderr, "A <connection> element without \"pre_cell_id\" and/or \"post_cell_id\" attribute near line %u\n", n->line);
-				throw CN_NMLIN_BADATTR;
-			}
-
-			snprintf( src_s, CN_MAX_LABEL_SIZE-1, "%s.%s", src_grp_prefix, src_cell_id_s);
-			snprintf( tgt_s, CN_MAX_LABEL_SIZE-1, "%s.%s", tgt_grp_prefix, tgt_cell_id_s);
-
-			if ( !weight_s ) {
-				if ( verbosely > 1 )
-					fprintf( stderr, "Assuming 0 for a synapse of \"%s.%s\" to \"%s%s\" without a \"weight\" attribute near line %u\n",
-						 src_grp_prefix, src_cell_id_s, tgt_grp_prefix, tgt_cell_id_s, n->line);
-				weight = 0.;
-			}
-			/* xmlFree( synapse_id_s), */ xmlFree( src_cell_id_s), xmlFree( tgt_cell_id_s),
-				xmlFree( weight_s);
-
-			y = add_synapse_species( (char*)type_s, src_s, tgt_s, weight, true, false);
-
-			if ( !y || y->_status & CN_UERROR ) {
-				if ( y )
-					delete y;
-				fprintf( stderr, "Failed to add an \"%s\" synapse from \"%s\" to \"%s\" near line %u\n",
-					 (char*)type_s, src_s, tgt_s, n->line);
-				return CN_NMLIN_STRUCTERROR;
-			} else
-				retval++;
-		}
-	} catch (int ex) {
-		/* xmlFree( synapse_id_s), */ xmlFree( src_cell_id_s), xmlFree( tgt_cell_id_s),
-			xmlFree( weight_s);
-		return ex;
-	}
-
-	return retval;
-}
-
-
-
-int
-cnrun::CModel::
-export_NetworkML( const char *fname)
-{
-	int retval = 0;
-
-	LIBXML_TEST_VERSION;
-
-	fprintf( stderr, "export_NetworkML() not implemented yet\n");
-
-	return retval;
-}
-
-
-
-
-
-
-
-#else
-#error Need an XMLREADER-enabled libxml2 (>2.6)
-
-#endif // LIBXML_READER_ENABLED
-
-// eof
diff --git a/upstream/src/libcn/model-struct.cc b/upstream/src/libcn/model-struct.cc
deleted file mode 100644
index d571672..0000000
--- a/upstream/src/libcn/model-struct.cc
+++ /dev/null
@@ -1,1416 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny
- *
- * License: GPL-2+
- *
- * Initial version: 2008-09-02
- *
- * CModel household
- */
-
-#include <sys/time.h>
-#include <iostream>
-#include <set>
-#include <algorithm>
-
-#include <regex.h>
-
-#include "libstilton/string.hh"
-
-#include "model.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-using namespace std;
-
-
-
-cnrun::CModel::
-CModel( const char *inname, CIntegrate_base *inintegrator, int instatus)
-      : name (inname),
-	_status (instatus | CN_MDL_NOTREADY),
-	_global_unit_id_reservoir (0l),
-	_longest_label (1),
-	_var_cnt (1),			// reserve [0] for model_time
-	_cycle (0),
-	_discrete_time (0.),  _discrete_dt (NAN),
-	spike_threshold (0.),
-	spike_lapse (5.),
-	listen_dt (0),
-	_dt_logger (nullptr),
-	_spike_logger (nullptr),	// open these streams at first write instead in prepare_advance()
-	verbosely (1)
-{
-	V.resize( _var_cnt), W.resize( _var_cnt);
-	V[0] = 0.;
-
-	(_integrator = inintegrator) -> model = this;
-
-	{
-		const gsl_rng_type * T;
-		gsl_rng_env_setup();
-		T = gsl_rng_default;
-		if ( gsl_rng_default_seed == 0 ) {
-			struct timeval tp = { 0L, 0L };
-			gettimeofday( &tp, nullptr);
-			gsl_rng_default_seed = tp.tv_usec;
-		}
-		_rng = gsl_rng_alloc( T);
-	}
-
-	signal( SIGINT, SIG_IGN);
-}
-
-
-cnrun::CModel::
-~CModel()
-{
-	if ( verbosely > 4 )
-		fprintf( stdout, "Deleting all units...\n");
-
-	while (unit_list.size())
-		if ( unit_list.back() -> is_owned() )
-			delete unit_list.back();
-		else
-			unit_list.pop_back();
-
-	if ( _integrator->is_owned )
-		delete _integrator;
-
-	delete _dt_logger;
-	delete _spike_logger;
-
-	while ( Sources.size() ) {
-		delete Sources.back();
-		Sources.pop_back();
-	}
-
-	gsl_rng_free( _rng);
-}
-
-
-void
-cnrun::CModel::
-reset( bool also_reset_params)
-{
-	_cycle = 0, V[0] = 0.;
-
-	_integrator->dt = _integrator->_dt_min;
-
-	reset_state_all_units();
-	if ( also_reset_params )
-		for_all_units (U)
-			(*U)->reset_params();
-
-	regular_periods.clear();
-	regular_periods_last_checked.clear();
-  // this will cause scheduler_update_periods_* to be recomputed by prepare_advance()
-	_status |= CN_MDL_NOTREADY;
-
-	if ( _status & CN_MDL_LOGDT ) {
-		delete _dt_logger;
-		string	fname = name + ".dtlog";
-		_dt_logger = new ofstream( fname.data());
-	}
-	if ( _status & CN_MDL_LOGSPIKERS ) {
-		delete _spike_logger;
-		string	fname = name + ".spikes";
-		_spike_logger = new ofstream( fname.data());
-	}
-}
-
-
-
-
-
-
-
-
-cnrun::C_BaseUnit*
-cnrun::CModel::
-unit_by_label( const char *inlabel) const
-{
-	for_all_units_const (U)
-		if ( strcmp( (*U)->_label, inlabel) == 0 )
-			return *U;
-	return nullptr;
-}
-
-cnrun::C_BaseNeuron*
-cnrun::CModel::
-neuron_by_label( const char *inlabel) const
-{
-	for_all_units_const (U)
-		if ( (*U)->is_neuron() && strcmp( (*U)->_label, inlabel) == 0 )
-			return static_cast<C_BaseNeuron*>(*U);
-	return nullptr;
-}
-
-cnrun::C_BaseSynapse*
-cnrun::CModel::
-synapse_by_label( const char *inlabel) const
-{
-	for_all_units_const (U)
-		if ( (*U)->is_synapse() && strcmp( (*U)->_label, inlabel) == 0 )
-			return static_cast<C_BaseSynapse*>(*U);
-	return nullptr;
-}
-
-
-
-
-
-
-
-// ----- registering units with core lists
-void
-cnrun::CModel::
-_include_base_unit( C_BaseUnit* u)
-{
-	for_all_units (U)
-		if ( (*U) == u ) {
-			fprintf( stderr, "Unit %s found already included in model %s\n", u->_label, name.c_str());
-			goto skip_ul_pushback;
-		}
-	unit_list.push_back( u);
-skip_ul_pushback:
-
-	if ( verbosely > 5 )
-		fprintf( stdout, "  registered base unit %s\n", u->_label);
-
-	if ( u->has_sources() )
-		register_unit_with_sources( u);
-
-	if ( u->is_listening() ) {
-		for_all_listening_units (U)
-			if ( (*U) == u ) {
-				fprintf( stderr, "Unit \"%s\" already on listening list\n", u->_label);
-				goto skip_lisn_reg;
-			}
-		lisn_unit_list.push_back( u);
-	}
-skip_lisn_reg:
-
-	u->M = this;
-
-	u->_serial_id = _global_unit_id_reservoir++;
-}
-
-
-
-
-int
-cnrun::CModel::
-include_unit( C_HostedNeuron *u, bool is_last)
-{
-	_include_base_unit( u);
-
-	u->idx = _var_cnt;
-	_var_cnt += u->v_no();
-
-	hosted_neu_list.push_back( u);
-
-	// if ( u->_spikelogger_agent  &&  !(u->_spikelogger_agent->_status & CN_KL_IDLE) )
-	// 	spikelogging_neu_list.push_back( u);
-
-	if ( u->is_conscious() )
-		conscious_neu_list.push_back( u);
-
-	if ( is_last )
-		finalize_additions();
-
-	return 0;
-}
-
-int
-cnrun::CModel::
-include_unit( C_HostedSynapse *u, bool is_last)
-{
-	_include_base_unit( u);
-
-	u->idx = _var_cnt;
-	_var_cnt += u->v_no();
-
-	hosted_syn_list.push_back( u);
-
-	if ( u->traits() & UT_MULTIPLEXING )
-		mx_syn_list.push_back( u);
-
-	if ( is_last )
-		finalize_additions();
-
-	return 0;
-}
-
-
-
-int
-cnrun::CModel::
-include_unit( C_StandaloneNeuron *u)
-{
-	_include_base_unit( u);
-
-	// if ( u->_spikelogger_agent  &&  !(u->_spikelogger_agent->_status & CN_KL_IDLE) )
-	// 	spikelogging_neu_list.push_back( u);
-
-	if ( u->is_conscious() )
-		conscious_neu_list.push_back( u);
-
-	if ( u->is_ddtbound() )
-		ddtbound_neu_list.push_back( u);
-	else
-		standalone_neu_list.push_back( u);
-
-	return 0;
-}
-
-
-int
-cnrun::CModel::
-include_unit( C_StandaloneSynapse *u)
-{
-/*
-	if ( _check_new_synapse( u) ) {
-//		u->enable( false);
-		u->M = nullptr;
-		return -1;
-	}
-*/
-	_include_base_unit( u);
-
-	if ( u->is_ddtbound() )
-		ddtbound_syn_list.push_back( u);
-	else
-		standalone_syn_list.push_back( u);
-
-	if ( u->traits() & UT_MULTIPLEXING )
-		mx_syn_list.push_back( u);
-
-	return 0;
-}
-
-
-
-// preserve the unit if !do_delete, so it can be re-included again
-cnrun::C_BaseUnit*
-cnrun::CModel::
-exclude_unit( C_BaseUnit *u, bool do_delete)
-{
-	if ( __cn_verbosely > 5 )
-		fprintf( stderr, "-excluding unit \"%s\"", u->_label);
-
-	if ( u->has_sources() )
-		unregister_unit_with_sources( u);
-
-	if ( u->is_listening() )
-		u->stop_listening();  // also calls unregister_listener
-
-	if ( u->is_synapse() && u->traits() & UT_MULTIPLEXING )
-		mx_syn_list.erase( find( mx_syn_list.begin(), mx_syn_list.end(), u));
-
-	if ( u->is_conscious() )
-		conscious_neu_list.erase( find(conscious_neu_list.begin(), conscious_neu_list.end(), u));
-
-	if ( u->is_hostable() ) {
-		size_t	our_idx;
-		if ( u->is_neuron() ) {
-			hosted_neu_list.erase( find( hosted_neu_list.begin(), hosted_neu_list.end(), u));
-			our_idx = ((C_HostedNeuron*)u) -> idx;
-		} else {
-			hosted_syn_list.erase( find( hosted_syn_list.begin(), hosted_syn_list.end(), u));
-			our_idx = ((C_HostedSynapse*)u) -> idx;
-		}
-
-	      // shrink V
-		if ( __cn_verbosely > 5 )
-			fprintf( stderr, " (shrink V by %d)", u->v_no());
-		for_all_hosted_neurons (N)
-			if ( (*N)->idx > our_idx )
-				(*N)->idx -= u->v_no();
-		for_all_hosted_synapses (Y)
-			if ( (*Y)->idx > our_idx )
-				(*Y)->idx -= u->v_no();
-		memmove( &V[our_idx], &V[our_idx+u->v_no()], (_var_cnt - our_idx - u->v_no()) * sizeof(double));
-		V.resize( _var_cnt -= u->v_no());
-	}
-	if ( u->is_ddtbound() ) {
-		if ( u->is_neuron() )
-			ddtbound_neu_list.erase( find( ddtbound_neu_list.begin(), ddtbound_neu_list.end(), u));
-		else
-			ddtbound_syn_list.erase( find( ddtbound_syn_list.begin(), ddtbound_syn_list.end(), u));
-	}
-	if ( !u->is_hostable() ) {
-		if ( u->is_neuron() )
-			standalone_neu_list.erase( find( standalone_neu_list.begin(), standalone_neu_list.end(), u));
-		else
-			standalone_syn_list.erase( find( standalone_syn_list.begin(), standalone_syn_list.end(), u));
-	}
-
-	unit_list.erase( find( unit_list.begin(), unit_list.end(), u));
-
-	if ( do_delete ) {
-		delete u;
-		u = nullptr;
-	} else
-		u->M = nullptr;
-
-	if ( __cn_verbosely > 5 )
-		fprintf( stderr, ".\n");
-	return u;
-}
-
-
-
-
-
-
-
-// listeners & spikeloggers
-
-void
-cnrun::CModel::
-register_listener( C_BaseUnit *u)
-{
-	if ( find( lisn_unit_list.begin(), lisn_unit_list.end(), u) == lisn_unit_list.end() )
-		lisn_unit_list.push_back( u);
-}
-
-void
-cnrun::CModel::
-unregister_listener( C_BaseUnit *u)
-{
-	const auto& U = find( lisn_unit_list.begin(), lisn_unit_list.end(), u);
-	if ( U != lisn_unit_list.end() )
-		lisn_unit_list.erase( U);
-}
-
-
-
-
-
-
-
-void
-cnrun::CModel::
-register_spikelogger( C_BaseNeuron *n)
-{
-	spikelogging_neu_list.push_back( n);
-	spikelogging_neu_list.sort();
-	spikelogging_neu_list.unique();
-}
-
-void
-cnrun::CModel::
-unregister_spikelogger( C_BaseNeuron *n)
-{
-	for_all_spikelogging_neurons (N)
-		if ( (*N) == n ) {
-			spikelogging_neu_list.erase( N);
-			return;
-		}
-}
-
-
-
-
-
-
-
-
-
-
-// units with sources
-
-void
-cnrun::CModel::
-register_unit_with_sources( C_BaseUnit *u)
-{
-	for ( auto& I : u->sources )
-		if ( I.source->is_periodic() )
-			units_with_periodic_sources.push_back( u);
-		else
-			units_with_continuous_sources.push_back( u);
-	units_with_continuous_sources.unique();
-	units_with_periodic_sources.unique();
-}
-
-void
-cnrun::CModel::
-unregister_unit_with_sources( C_BaseUnit *u)
-{
-start_over_1:
-	for_all_units_with_contiuous_sources (U)
-		if ( (*U) == u ) {
-			units_with_continuous_sources.erase( U);
-			if ( verbosely > 5 )
-				fprintf( stderr, " (removed \"%s\" instance from units w/ continuous sources list)\n", u->_label);
-			goto start_over_1;
-		}
-start_over_2:
-	for_all_units_with_periodic_sources (U)
-		if ( (*U) == u ) {
-			units_with_periodic_sources.erase( U);
-			if ( verbosely > 5 )
-				fprintf( stderr, " (removed \"%s\" instance from units w/ periodic sources list)\n", u->_label);
-			goto start_over_2;
-		}
-}
-
-
-
-
-
-
-
-
-cnrun::C_BaseNeuron*
-cnrun::CModel::
-add_neuron_species( const char *type_s, const char *label, bool finalize,
-		    double x, double y, double z)
-{
-	TUnitType t = unit_species_by_string( type_s);
-	if ( t == NT_VOID || !unit_species_is_neuron(type_s) ) {
-		fprintf( stderr, "Unrecognised neuron species: \"%s\"\n", type_s);
-		return nullptr;
-	} else
-		return add_neuron_species( t, label, finalize, x, y, z);
-}
-
-cnrun::C_BaseNeuron*
-cnrun::CModel::
-add_neuron_species( TUnitType type, const char *label, bool finalize,
-		    double x, double y, double z)
-{
-	C_BaseNeuron *n;
-	switch ( type ) {
-	case NT_HH_D:
-		n = new CNeuronHH_d( label, x, y, z, this, CN_UOWNED, finalize);
-	    break;
-	case NT_HH_R:
-		n = new CNeuronHH_r( label, x, y, z, this, CN_UOWNED);
-	    break;
-
-	case NT_HH2_D:
-		n = new CNeuronHH2_d( label, x, y, z, this, CN_UOWNED, finalize);
-	    break;
-	// case NT_HH2_R:
-	// 	n = new CNeuronHH2_r( label, x, y, z, this, CN_UOWNED, finalize);
-	//     break;
-//#ifdef CN_WANT_MORE_NEURONS
-	case NT_EC_D:
-		n = new CNeuronEC_d( label, x, y, z, this, CN_UOWNED, finalize);
-	    break;
-	case NT_ECA_D:
-		n = new CNeuronECA_d( label, x, y, z, this, CN_UOWNED, finalize);
-	    break;
-/*
-	case NT_LV:
-		n = new COscillatorLV( label, x, y, z, this, CN_UOWNED, finalize);
-	    break;
- */
-	case NT_COLPITTS:
-		n = new COscillatorColpitts( label, x, y, z, this, CN_UOWNED, finalize);
-	    break;
-	case NT_VDPOL:
-		n = new COscillatorVdPol( label, x, y, z, this, CN_UOWNED, finalize);
-	    break;
-//#endif
-	case NT_DOTPOISSON:
-		n = new COscillatorDotPoisson( label, x, y, z, this, CN_UOWNED);
-	    break;
-	case NT_POISSON:
-		n = new COscillatorPoisson( label, x, y, z, this, CN_UOWNED);
-	    break;
-
-	case NT_DOTPULSE:
-		n = new CNeuronDotPulse( label, x, y, z, this, CN_UOWNED);
-	    break;
-
-	case NT_MAP:
-		n = new CNeuronMap( label, x, y, z, this, CN_UOWNED);
-	    break;
-
-	default:
-		return nullptr;
-	}
-	if ( n && n->_status & CN_UERROR ) {
-		delete n;
-		return nullptr;
-	}
-	return n;
-}
-
-
-
-
-
-
-
-
-cnrun::C_BaseSynapse*
-cnrun::CModel::
-add_synapse_species( const char *type_s, const char *src_l, const char *tgt_l,
-		     double g, bool allow_clone, bool finalize)
-{
-	TUnitType ytype = unit_species_by_string( type_s);
-	bool	given_species = true;
-	if ( ytype == NT_VOID && (given_species = false, ytype = unit_family_by_string( type_s)) == NT_VOID ) {
-		fprintf( stderr, "Unrecognised synapse species or family: \"%s\"\n", type_s);
-		return nullptr;
-	}
-
-	C_BaseNeuron
-		*src = neuron_by_label( src_l),
-		*tgt = neuron_by_label( tgt_l);
-	if ( !src || !tgt ) {
-		fprintf( stderr, "Phoney source (\"%s\") or target (\"%s\")\n", src_l, tgt_l);
-		return nullptr;
-	}
-
-	if ( given_species )  // let lower function do the checking
-		return add_synapse_species( ytype, src, tgt, g, allow_clone, finalize);
-
-	switch ( ytype ) {
-      // catch by first entry in __CNUDT, assign proper species per source and target traits
-	case YT_AB_DD:
-		if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED )
-			ytype = YT_AB_RR;
-		else if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) )
-			ytype = YT_AB_RD;
-		else if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED )
-			if ( src->traits() & UT_DOT )
-				ytype = YT_MXAB_DR;
-			else
-				ytype = YT_AB_DR;
-		else
-			if ( src->traits() & UT_DOT )
-				ytype = YT_MXAB_DD;
-			else
-				ytype = YT_AB_DD;
-	    break;
-
-	case YT_ABMINUS_DD:
-		if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED )
-			ytype = YT_ABMINUS_RR;
-		else if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) )
-			ytype = YT_ABMINUS_RD;
-		else if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED )
-			if ( src->traits() & UT_DOT )
-				ytype = YT_MXABMINUS_DR;
-			else
-				ytype = YT_ABMINUS_DR;
-		else
-			if ( src->traits() & UT_DOT )
-				ytype = YT_MXABMINUS_DD;
-			else
-				ytype = YT_ABMINUS_DD;
-	    break;
-
-	case YT_RALL_DD:
-		if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED )
-			ytype = YT_RALL_RR;
-		else if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) )
-			ytype = YT_RALL_RD;
-		else if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED )
-			if ( src->traits() & UT_DOT )
-				ytype = YT_MXRALL_DR;
-			else
-				ytype = YT_RALL_DR;
-		else
-			if ( src->traits() & UT_DOT )
-				ytype = YT_MXRALL_DD;
-			else
-				ytype = YT_RALL_DD;
-	    break;
-
-	case YT_MAP:
-		if ( src->traits() & UT_DDTSET)
-			if ( src->traits() & UT_DOT )
-				ytype = YT_MXMAP;
-			else
-				ytype = YT_MAP;
-		else {
-			fprintf( stderr, "Map synapses can only connect Map neurons\n");
-			return nullptr;
-		}
-	    break;
-	default:
-		printf( "Teleporting is fun!\n");
-		return nullptr;
-	}
-
-	return add_synapse_species( ytype, src, tgt, g, allow_clone, finalize);
-}
-
-
-
-
-cnrun::C_BaseSynapse*
-cnrun::CModel::
-add_synapse_species( TUnitType ytype, C_BaseNeuron *src, C_BaseNeuron *tgt,
-		     double g, bool allow_clone, bool finalize)
-{
-	if ( verbosely > 5 )
-		printf( "add_synapse_species( \"%s\", \"%s\", \"%s\", %g, %d, %d)\n",
-			__CNUDT[ytype].species, src->_label, tgt->_label, g, allow_clone, finalize);
-
-	C_BaseSynapse *y = nullptr;
-
-      // consider cloning
-	if ( !(_status & CN_MDL_DONT_COALESCE) && allow_clone && src->_axonal_harbour.size() )
-		for ( auto& L : src->_axonal_harbour )
-			if ( L->_type == ytype &&
-			     L->is_not_altered() )
-				return L->clone_to_target( tgt, g);
-
-	switch ( ytype ) {
-      // the __CNUDT entry at first TUnitType element whose
-      // 'name' matches the type id supplied, captures all cases for a given synapse family
-	case YT_AB_RR:
-		if (  src->traits() & UT_RATEBASED &&  tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
-			y = new CSynapseAB_rr( src, tgt, g, this, CN_UOWNED, finalize);
-	    break;
-	case YT_AB_RD:
-		if (  src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
-			// y = new CSynapseAB_rd( synapse_id, src, tgt, this, CN_UOWNED, false);
-			fprintf( stderr, "AB_rd not implemented\n");
-	    break;
-	case YT_AB_DR:
-		if ( !(src->traits() & UT_RATEBASED) &&  tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
-			// y = new CSynapseAB_rr( synapse_id, src, tgt, this, CN_UOWNED, false);
-			fprintf( stderr, "AB_dr not implemented\n");
-	    break;
-	case YT_AB_DD:
-		if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
-			y = new CSynapseAB_dd( src, tgt, g, this, CN_UOWNED, finalize);
-	    break;
-	case YT_MXAB_DR:
-		if ( !(src->traits() & UT_RATEBASED) &&  tgt->traits() & UT_RATEBASED &&  src->traits() & UT_DOT )
-			y = new CSynapseMxAB_dr( src, tgt, g, this, CN_UOWNED, finalize);
-	    break;
-	case YT_MXAB_DD:
-		if (  !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) &&  src->traits() & UT_DOT )
-			y = new CSynapseMxAB_dd( src, tgt, g, this, CN_UOWNED, finalize);
-	    break;
-
-
-	case YT_ABMINUS_RR:
-		if (  src->traits() & UT_RATEBASED &&  tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
-			// y = new CSynapseABMINUS_rr( src, tgt, g, this, CN_UOWNED, finalize);
-			fprintf( stderr, "ABMINUS_rr not implemented\n");
-	    break;
-	case YT_ABMINUS_RD:
-		if (  src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
-			// y = new CSynapseABMINUS_rd( synapse_id, src, tgt, this, CN_UOWNED, false);
-			fprintf( stderr, "ABMINUS_rd not implemented\n");
-	    break;
-	case YT_ABMINUS_DR:
-		if ( !(src->traits() & UT_RATEBASED) &&  tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
-			// y = new CSynapseABMINUS_rr( synapse_id, src, tgt, this, CN_UOWNED, false);
-			fprintf( stderr, "ABMINUS_dr not implemented\n");
-	    break;
-	case YT_ABMINUS_DD:
-		if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
-			y = new CSynapseABMinus_dd( src, tgt, g, this, CN_UOWNED, finalize);
-	    break;
-	case YT_MXABMINUS_DR:
-		if ( !(src->traits() & UT_RATEBASED) &&  tgt->traits() & UT_RATEBASED &&  src->traits() & UT_DOT )
-			// y = new CSynapseMxABMinus_dr( src, tgt, g, this, CN_UOWNED, finalize);
-			fprintf( stderr, "MxABMinus_dr not implemented\n");
-	    break;
-	case YT_MXABMINUS_DD:
-		if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) &&  src->traits() & UT_DOT )
-			// y = new CSynapseMxABMinus_dd( src, tgt, g, this, CN_UOWNED, finalize);
-			fprintf( stderr, "MxABMinus_dd not implemented\n");
-	    break;
-
-
-	case YT_RALL_RR:
-		if (  src->traits() & UT_RATEBASED &&  tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
-			// y = new CSynapseRall_rr( src, tgt, g, this, CN_UOWNED, finalize);
-			fprintf( stderr, "Rall_rr not implemented\n");
-	    break;
-	case YT_RALL_RD:
-		if (  src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
-			// y = new CSynapseRall_rd( synapse_id, src, tgt, this, CN_UOWNED, false);
-			fprintf( stderr, "Rall_rd not implemented\n");
-	    break;
-	case YT_RALL_DR:
-		if ( !(src->traits() & UT_RATEBASED) &&  tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
-			// y = new CSynapseRall_rr( synapse_id, src, tgt, this, CN_UOWNED, false);
-			fprintf( stderr, "Rall_dr not implemented\n");
-	    break;
-	case YT_RALL_DD:
-		if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
-			y = new CSynapseRall_dd( src, tgt, g, this, CN_UOWNED, finalize);
-	    break;
-	case YT_MXRALL_DR:
-		if ( !(src->traits() & UT_RATEBASED) &&  tgt->traits() & UT_RATEBASED &&  src->traits() & UT_DOT )
-			// y = new CSynapseMxRall_dr( src, tgt, g, this, CN_UOWNED, finalize);
-			fprintf( stderr, "MxRall_dr not implemented\n");
-	    break;
-	case YT_MXRALL_DD:
-		if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) &&  src->traits() & UT_DOT )
-			// y = new CSynapseMxRall_dd( src, tgt, g, this, CN_UOWNED, finalize);
-			fprintf( stderr, "MxRall_dd not implemented\n");
-	    break;
-
-
-	case YT_MAP:
-		if ( src->traits() & UT_DDTSET)
-			if ( src->traits() & UT_DOT )
-				y = new CSynapseMxMap( src, tgt, g, this, CN_UOWNED);
-			else
-				y = new CSynapseMap( src, tgt, g, this, CN_UOWNED);
-		else
-			fprintf( stderr, "Map synapses can only connect Map neurons\n");
-	    break;
-
-	default:
-		return nullptr;
-	}
-
-	if ( !y || y->_status & CN_UERROR ) {
-		if ( y )
-			delete y;
-		return nullptr;
-	}
-
-	if ( verbosely > 5 )
-		printf( "new synapse \"%s->%s\"\n", y->_label, tgt->label());
-	y->set_g_on_target( *tgt, g);
-
-	return y;
-}
-
-
-
-
-
-
-void
-cnrun::CModel::
-finalize_additions()
-{
-	V.resize( _var_cnt),  W.resize( _var_cnt);
-
-	for_all_hosted_neurons (N)
-		(*N) -> reset_vars();
-	for_all_hosted_synapses (Y)
-		(*Y) -> reset_vars();
-
-	if ( _status & CN_MDL_SORTUNITS ) {
-		__C_BaseUnitCompareByLabel cmp;
-		unit_list.sort( cmp);
-		// hosted_neu_list.sort( cmp);
-		// hosted_syn_list.sort( cmp);
-		// standalone_neu_list.sort( cmp);
-		// standalone_syn_list.sort( cmp);
-	}
-
-	_integrator->prepare();
-}
-
-
-
-
-
-
-
-
-void
-cnrun::CModel::
-cull_deaf_synapses()
-{
-	// needs fixing
-      // 1. Need to traverse syn_list backwards due to shifts its vector will undergo on element deletions;
-      // 2. Omit those with a param reader, scheduler or range, but only if it is connected to parameter "gsyn"
-grand_restart:
-	for_all_hosted_synapses (Y)
-		if ( !(*Y)->has_sources() ) {
-		restart:
-			for ( C_BaseSynapse::lni T = (*Y)->_targets.begin(); T != (*Y)->_targets.end(); T++ ) {
-				if ( (*Y)->g_on_target( **T) == 0  ) {
-					if ( verbosely > 3 )
-						fprintf( stderr, " (deleting dendrite to \"%s\" of a synapse \"%s\" with gsyn == 0)\n",
-							 (*T)->_label, (*Y)->_label);
-					(*T)->_dendrites.erase( *Y);
-					(*Y)->_targets.erase( find( (*Y)->_targets.begin(), (*Y)->_targets.end(), *T));
-
-					snprintf( (*Y)->_label, CN_MAX_LABEL_SIZE-1, "%s:%zu", (*Y)->_source->_label, (*Y)->_targets.size());
-					goto restart;
-				}
-			}
-			if ( (*Y)->_targets.size() == 0 ) {
-				delete (*Y);
-				goto grand_restart;
-			}
-		}
-
-	// older stuff
-/*
-	for_all_synapses_reversed (Y) {
-		int gsyn_pidx = (*Y) -> param_idx_by_sym( "gsyn");
-		if ( ((*Y)->param_schedulers && device_list_concerns_parm( (*Y)->param_schedulers, gsyn_pidx)) ||
-		     ((*Y)->param_readers    && device_list_concerns_parm( (*Y)->param_readers,    gsyn_pidx)) ||
-		     ((*Y)->param_ranges     && device_list_concerns_parm( (*Y)->param_ranges,     gsyn_pidx)) ) {
-			if ( verbosely > 2 )
-				printf( " (preserving doped synapse with zero gsyn: \"%s\")\n", (*Y)->_label);
-			continue;
-		}
-		if ( gsyn_pidx > -1 && (*Y)->param_value( gsyn_pidx) == 0. ) {
-			if ( verbosely > 2 )
-				printf( " (deleting synapse with zero gsyn: \"%s\")\n", (*Y)->_label);
-			delete (*Y);
-			cnt++;
-		}
-	}
-	if ( verbosely > 0 && cnt )
-		printf( "Deleted %zd deaf synapses\n", cnt);
-*/
-}
-
-
-
-// needs to be called after a neuron is put out
-void
-cnrun::CModel::
-cull_blind_synapses()
-{
-	for_all_hosted_synapses_reversed (Y)
-		if ( (*Y)->_source == nullptr && !(*Y)->has_sources() ) {
-			if ( verbosely > 3 )
-				printf( " (deleting synapse with nullptr source: \"%s\")\n", (*Y)->_label);
-			delete (*Y);
-		}
-	for_all_standalone_synapses_reversed (Y)
-		if ( (*Y)->_source == nullptr && !(*Y)->has_sources() ) {
-			if ( verbosely > 3 )
-				printf( " (deleting synapse with nullptr source: \"%s\")\n", (*Y)->_label);
-			delete (*Y);
-		}
-}
-
-
-
-void
-cnrun::CModel::
-reset_state_all_units()
-{
-	for_all_units (U)
-		(*U) -> reset_state();
-}
-
-
-
-
-
-
-
-// tags
-
-int
-cnrun::CModel::
-process_listener_tags( const list<STagGroupListener> &Listeners)
-{
-	regex_t RE;
-	for ( auto& P : Listeners ) {
-		if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
-			fprintf( stderr, "Invalid regexp in process_listener_tags: \"%s\"\n", P.pattern.c_str());
-			return -1;
-		}
-		for_all_units (U) {
-			if ( regexec( &RE, (*U)->_label, 0, 0, 0) == 0 ) {
-				if ( P.enable ) {
-					(*U) -> start_listening( P.bits);
-					if ( verbosely > 3 )
-						printf( " (unit \"%s\" listening%s)\n",
-							(*U)->_label, P.bits & CN_ULISTENING_1VARONLY ? ", to one var only" :"");
-				} else {
-					(*U) -> stop_listening();
-					if ( verbosely > 3 )
-						printf( " (unit \"%s\" not listening)\n", (*U)->_label);
-				}
-			}
-		}
-	}
-
-	return 0;
-}
-
-
-int
-cnrun::CModel::
-process_spikelogger_tags( const list<STagGroupSpikelogger> &Spikeloggers)
-{
-	regex_t RE;
-	for ( auto& P : Spikeloggers ) {
-		if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
-			fprintf( stderr, "Invalid regexp in process_spikelogger_tags: \"%s\"\n", P.pattern.c_str());
-			return -1;
-		}
-		for_all_standalone_neurons (N) {
-			if ( regexec( &RE, (*N)->_label, 0, 0, 0) == 0 ) {
-				if ( P.enable ) {
-					bool log_sdf = !(P.period == 0. || P.sigma == 0.);
-					if ( ( log_sdf && !(*N)->enable_spikelogging_service( P.period, P.sigma, P.from))
-					     ||
-					     (!log_sdf && !(*N)->enable_spikelogging_service()) ) {
-						fprintf( stderr, "Cannot have \"%s\" log spikes because it is not a conductance-based neuron (of type %s)\n",
-							 (*N)->_label, (*N)->species());
-						return -1;
-					}
-				} else
-					(*N)->disable_spikelogging_service();
-
-				if ( verbosely > 3 )
-					printf( " (%sabling spike logging for standalone neuron \"%s\")\n",
-						P.enable ? "en" : "dis", (*N)->_label);
-			}
-		}
-		for_all_hosted_neurons (N) {
-			if ( regexec( &RE, (*N)->_label, 0, 0, 0) == 0 ) {
-				if ( P.enable ) {
-					bool log_sdf = !(P.period == 0. || P.sigma == 0.);
-					if ( ( log_sdf && !(*N)->enable_spikelogging_service( P.period, P.sigma, P.from))
-					     ||
-					     (!log_sdf && !(*N)->enable_spikelogging_service()) ) {
-						fprintf( stderr, "Cannot have \"%s\" log spikes because it is not a conductance-based neuron (of type %s)\n",
-							 (*N)->_label, (*N)->species());
-						return -1;
-					}
-				} else
-					(*N)->disable_spikelogging_service();
-
-				if ( verbosely > 3 )
-					printf( " (%sabling spike logging for hosted neuron \"%s\")\n",
-						P.enable ? "en" : "dis", (*N)->_label);
-			}
-		}
-	}
-
-	return 0;
-}
-
-
-int
-cnrun::CModel::
-process_putout_tags( const list<STagGroup> &ToRemove)
-{
-      // execute some
-	regex_t RE;
-	for ( auto& P : ToRemove ) {
-		if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
-			fprintf( stderr, "Invalid regexp in process_putout_tags: \"%s\"\n", P.pattern.c_str());
-			return -1;
-		}
-		for_all_units (U) {
-			if ( regexec( &RE, (*U)->_label, 0, 0, 0) == 0 ) {
-				if ( verbosely > 2 )
-					printf( " (put out unit \"%s\")\n",
-						(*U)->_label);
-				delete (*U);
-				if ( units() > 0 )
-					U = ulist_begin();
-				else
-					break;
-			}
-		}
-	}
-
-	cull_blind_synapses();
-
-	return 0;
-}
-
-
-int
-cnrun::CModel::
-process_decimate_tags( const list<STagGroupDecimate> &ToDecimate)
-{
-      // decimate others
-	regex_t RE;
-	for ( auto& P : ToDecimate ) {
-		if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
-			fprintf( stderr, "Invalid regexp in process_decimate_tags: \"%s\"\n", P.pattern.c_str());
-			return -1;
-		}
-
-	      // collect group
-		vector<C_BaseUnit*> dcmgroup;
-		for_all_units (U)
-			if ( regexec( &RE, (*U)->_label, 0, 0, 0) == 0 )
-				dcmgroup.push_back( *U);
-		random_shuffle( dcmgroup.begin(), dcmgroup.end());
-
-	      // execute
-		size_t	to_execute = rint( dcmgroup.size() * P.fraction), n = to_execute;
-		while ( n-- )
-			delete dcmgroup[n];
-
-		if ( verbosely > 3 )
-			printf( " (decimated %4.1f%% (%zu units) of %s)\n", P.fraction*100, to_execute, P.pattern.c_str());
-
-	}
-
-	cull_blind_synapses();
-
-	return 0;
-}
-
-
-
-
-
-
-int
-cnrun::CModel::
-process_paramset_static_tags( const list<STagGroupNeuronParmSet> &tags)
-{
-	regex_t RE;
-	for ( auto& P : tags ) {
-		if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
-			fprintf( stderr, "Invalid regexp in process_paramset_static_tags: \"%s\"\n", P.pattern.c_str());
-			return -1;
-		}
-
-		vector<string> current_tag_assigned_labels;
-
-		for_all_neurons (U) {
-			if ( regexec( &RE, (*U)->_label, 0, 0, 0) == 0 )
-				continue;
-		      // because a named parameter can map to a different param_id in different units, rather
-		      // do lookup every time
-
-			int p_d = -1;
-			C_BaseUnit::TSinkType kind = (C_BaseUnit::TSinkType)-1;
-			if ( (p_d = (*U)->param_idx_by_sym( P.parm.c_str())) > -1 )
-				kind = C_BaseUnit::SINK_PARAM;
-			else if ( (p_d = (*U)->var_idx_by_sym( P.parm.c_str())) > -1 )
-				kind = C_BaseUnit::SINK_VAR;
-			if ( p_d == -1 ) {
-				fprintf( stderr, "%s \"%s\" (type \"%s\") has no parameter or variable named \"%s\"\n",
-					 (*U)->class_name(), (*U)->label(), (*U)->species(), P.parm.c_str());
-				continue;
-			}
-
-			switch ( kind ) {
-			case C_BaseUnit::SINK_PARAM:
-				(*U)->param_value(p_d) = P.enable ? P.value : __CNUDT[(*U)->type()].stock_param_values[p_d];
-				(*U)->param_changed_hook();
-			    break;
-			case C_BaseUnit::SINK_VAR:
-				(*U)->  var_value(p_d) = P.value;
-			    break;
-			}
-
-			current_tag_assigned_labels.push_back( (*U)->label());
-		}
-
-		if ( current_tag_assigned_labels.empty() ) {
-			fprintf( stderr, "No neuron labelled matching \"%s\"\n", P.pattern.c_str());
-			return -2;
-		}
-
-		if ( verbosely > 3 ) {
-			printf( " set ");
-			for ( auto S = current_tag_assigned_labels.begin(); S != current_tag_assigned_labels.end(); S++ )
-				printf( "%s%s",
-					(S == current_tag_assigned_labels.begin()) ? "" : ", ", S->c_str());
-			printf( " {%s} = %g\n", P.parm.c_str(), P.value);
-		}
-	}
-	return 0;
-}
-
-
-
-
-
-int
-cnrun::CModel::
-process_paramset_static_tags( const list<STagGroupSynapseParmSet> &tags)
-{
-	for ( auto& P : tags ) {
-		regex_t REsrc, REtgt;
-		if (0 != regcomp( &REsrc, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB) ) {  // P->pattern acting as src
-			fprintf( stderr, "Invalid regexp in process_paramset_static_tags (src): \"%s\"\n", P.pattern.c_str());
-			return -1;
-		}
-		if (0 != regcomp( &REtgt, P.target.c_str(), REG_EXTENDED | REG_NOSUB) ) {
-			fprintf( stderr, "Invalid regexp in process_paramset_static_tags (tgt): \"%s\"\n", P.target.c_str());
-			return -1;
-		}
-
-		vector<string> current_tag_assigned_labels;
-
-		bool do_gsyn = (P.parm == "gsyn");
-
-		if ( verbosely > 5 )
-			printf( "== setting %s -> %s {%s} = %g...\n", P.pattern.c_str(), P.target.c_str(), P.parm.c_str(), P.value);
-
-		for_all_neurons (Us) {
-			if ( regexec( &REsrc, (*Us)->label(), 0, 0, 0) == 0 )
-				continue;
-
-			for_all_neurons (Ut) {
-                if ( regexec( &REtgt, (*Ut)->label(), 0, 0, 0) == 0 ) /* || Us == Ut */
-					continue;
-				C_BaseSynapse *y = static_cast<C_BaseNeuron*>(*Us) -> connects_via( *static_cast<C_BaseNeuron*>(*Ut));
-				if ( !y )
-					continue;
-
-				if ( do_gsyn ) {
-					y->set_g_on_target( *static_cast<C_BaseNeuron*>(*Ut), P.value);
-					current_tag_assigned_labels.push_back( y->label());
-					continue;
-				}
-
-				int p_d = -1;
-				C_BaseUnit::TSinkType kind = (C_BaseUnit::TSinkType)-1;
-				if ( (p_d = y->param_idx_by_sym( P.parm.c_str())) > -1 )
-					kind = C_BaseUnit::SINK_PARAM;
-				else if ( (p_d = y->var_idx_by_sym( P.parm.c_str())) > -1 )
-					kind = C_BaseUnit::SINK_VAR;
-				if ( p_d == -1 ) {
-					fprintf( stderr, "%s \"%s\" (type \"%s\") has no parameter or variable named \"%s\"\n",
-						 y->class_name(), y->label(), y->species(), P.parm.c_str());
-					continue;
-				}
-
-				switch ( kind ) {
-				case C_BaseUnit::SINK_PARAM:
-					if ( y->_targets.size() > 1 ) {
-						y = y->make_clone_independent( static_cast<C_BaseNeuron*>(*Ut));  // lest brethren synapses to other targets be clobbered
-					}
-					y->param_value(p_d) = P.enable ? P.value : __CNUDT[y->type()].stock_param_values[p_d];
-					y->param_changed_hook();
-				    break;
-				case C_BaseUnit::SINK_VAR:
-					y->  var_value(p_d) = P.value;
-				    break;
-				}
-
-				current_tag_assigned_labels.push_back( y->label());
-			}
-		}
-		if ( current_tag_assigned_labels.empty() ) {
-			fprintf( stderr, "No synapse connecting any of \"%s\" to \"%s\"\n", P.pattern.c_str(), P.target.c_str());
-			return -2;
-		}
-
-		if ( verbosely > 3 ) {
-			printf( " set ");
-			for ( auto S = current_tag_assigned_labels.begin(); S != current_tag_assigned_labels.end(); S++ )
-				printf( "%s%s",
-					(S == current_tag_assigned_labels.begin()) ? "" : ", ", S->c_str());
-			printf( " {%s} = %g\n", P.parm.c_str(), P.value);
-		}
-	}
-
-	if ( !(_status & CN_MDL_DONT_COALESCE) )
-		coalesce_synapses();
-
-	return 0;
-}
-
-
-void
-cnrun::CModel::
-coalesce_synapses()
-{
-startover:
-	for_all_synapses (U1) {
-		C_BaseSynapse *y1 = static_cast<C_BaseSynapse*>(*U1);
-		for_all_synapses (U2) {
-			if ( *U2 == *U1 )
-				continue;
-
-			C_BaseSynapse *y2 = static_cast<C_BaseSynapse*>(*U2);
-			if ( y1->_source == y2->_source &&
-			     (*U1) -> is_identical( **U2) ) {
-
-				if ( verbosely > 5 )
-					printf( "coalescing \"%s\" and \"%s\"\n", y1->_label, y2->_label);
-				for ( C_BaseSynapse::lni T = y2->_targets.begin(); T != y2->_targets.end(); T++ ) {
-					y1->_targets.push_back( *T);
-					(*T)->_dendrites[y1] = (*T)->_dendrites[y2];
-				}
-				snprintf( y1->_label, CN_MAX_LABEL_SIZE-1, "%s:%zu", y1->_source->_label, y1->_targets.size());
-
-				delete y2;
-
-				goto startover;
-			}
-		}
-	}
-}
-
-
-
-
-
-int
-cnrun::CModel::
-process_paramset_source_tags( const list<STagGroupSource> &tags)
-{
-	regex_t RE;
-	for ( auto& P : tags ) {
-		if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
-			fprintf( stderr, "Invalid regexp in process_paramset_source_tags: \"%s\"\n", P.pattern.c_str());
-			return -1;
-		}
-
-		for_all_units (U) {
-			if ( regexec( &RE, (*U)->label(), 0, 0, 0) == 0 )
-				continue;
-
-			int p_d = -1;
-			C_BaseUnit::TSinkType kind = (C_BaseUnit::TSinkType)-1;
-			if ( (p_d = (*U)->param_idx_by_sym( P.parm.c_str())) > -1 )
-				kind = C_BaseUnit::SINK_PARAM;
-			else if ( (p_d = (*U)->var_idx_by_sym( P.parm.c_str())) > -1 )
-				kind = C_BaseUnit::SINK_VAR;
-			if ( p_d == -1 ) {
-				fprintf( stderr, "%s \"%s\" (type \"%s\") has no parameter or variable named \"%s\"\n",
-					 (*U)->class_name(), (*U)->label(), (*U)->species(), P.parm.c_str());
-				continue;
-			}
-
-			if ( P.enable ) {
-				(*U) -> attach_source( P.source, kind, p_d);
-				if ( verbosely > 3 )
-					printf( "Connected source \"%s\" to \"%s\"{%s}\n",
-						P.source->name.c_str(), (*U)->label(), P.parm.c_str());
-			} else {
-				(*U) -> detach_source( P.source, kind, p_d);
-				if ( verbosely > 3 )
-					printf( "Disconnected source \"%s\" from \"%s\"{%s}\n",
-						P.source->name.c_str(), (*U)->label(), P.parm.c_str());
-			}
-		}
-	}
-
-	return 0;
-}
-
-
-
-
-inline const char*
-__attribute__ ((pure))
-pl_ending( size_t cnt)
-{
-	return cnt == 1 ? "" : "s";
-}
-
-void
-cnrun::CModel::
-dump_metrics( FILE *strm)
-{
-	fprintf( strm,
-		 "\nModel \"%s\"%s:\n"
-		 "  %5zd unit%s total (%zd Neuron%s, %zd Synapse%s):\n"
-		 "    %5zd hosted,\n"
-		 "    %5zd standalone\n"
-		 "    %5zd discrete dt-bound\n"
-		 "  %5zd Listening unit%s\n"
-		 "  %5zd Spikelogging neuron%s\n"
-		 "  %5zd Unit%s being tuned continuously\n"
-		 "  %5zd Unit%s being tuned periodically\n"
-		 "  %5zd Spontaneously firing neuron%s\n"
-		 "  %5zd Multiplexing synapse%s\n"
-		 " %6zd vars on integration vector\n\n",
-		 name.c_str(), (_status & CN_MDL_DISKLESS) ? " (diskless)" : "",
-		 units(), pl_ending(units()),
-		 total_neuron_cnt(), pl_ending(total_neuron_cnt()),
-		 total_synapse_cnt(), pl_ending(total_synapse_cnt()),
-		 hosted_unit_cnt(),
-		 standalone_unit_cnt(),
-		 ddtbound_unit_cnt(),
-		 listening_unit_cnt(), pl_ending(listening_unit_cnt()),
-		 spikelogging_neuron_cnt(), pl_ending(spikelogging_neuron_cnt()),
-		 unit_with_continuous_sources_cnt(), pl_ending(unit_with_continuous_sources_cnt()),
-		 unit_with_periodic_sources_cnt(), pl_ending(unit_with_periodic_sources_cnt()),
-		 conscious_neuron_cnt(), pl_ending(conscious_neuron_cnt()),
-		 mx_syn_list.size(), pl_ending(mx_syn_list.size()),
-		 _var_cnt-1);
-	if ( _status & CN_MDL_HAS_DDTB_UNITS )
-		fprintf( strm, "Discrete dt: %g msec\n", discrete_dt());
-}
-
-void
-cnrun::CModel::
-dump_state( FILE *strm)
-{
-	fprintf( strm,
-		 "Model time: %g msec\n"
-		 "Integrator dt_min: %g msec, dt_max: %g msec\n"
-		 "Logging at: %g msec\n\n",
-		 model_time(),
-		 dt_min(), dt_max(),
-		 listen_dt);
-}
-
-
-
-void
-cnrun::CModel::
-dump_units( FILE *strm)
-{
-	fprintf( strm, "\nUnit types in the model:\n");
-
-	set<int> found_unit_types;
-	unsigned p = 0;
-
-	fprintf( strm, "\n===== Neurons:\n");
-	for_all_units (U)
-		if ( (*U)->is_neuron() && found_unit_types.count( (*U)->type()) == 0 ) {
-			found_unit_types.insert( (*U)->type());
-
-			fprintf( strm, "--- %s: %s\nParameters: ---\n",
-				 (*U)->species(), (*U)->type_description());
-			for ( p = 0; p < (*U)->p_no(); p++ )
-				if ( *(*U)->param_sym(p) != '.' || verbosely > 5 )
-					fprintf( strm, "%2d: %-5s\t= %s %s\n",
-						 p, (*U)->param_sym(p),
-						 cnrun::str::double_dot_aligned_s( (*U)->param_value(p), 4, 6).c_str(),
-						 (*U)->param_name(p));
-			fprintf( strm, "Variables: ---\n");
-			for ( p = 0; p < (*U)->v_no(); p++ )
-				if ( *(*U)->var_sym(p) != '.' || verbosely > 5 )
-					fprintf( strm, "%2d: %-5s\t= %s %s\n",
-						 p, (*U)->var_sym(p),
-						 cnrun::str::double_dot_aligned_s( (*U)->var_value(p), 4, 6).c_str(),
-						 (*U)->var_name(p));
-		}
-	fprintf( strm, "\n===== Synapses:\n");
-	for_all_units (U)
-		if ( (*U)->is_synapse() && found_unit_types.count( (*U)->type()) == 0 ) {
-			found_unit_types.insert( (*U)->type());
-
-			fprintf( strm, "--- %s: %s\nParameters: ---\n",
-				 (*U)->species(), (*U)->type_description());
-			fprintf( strm, "    parameters:\n");
-			for ( p = 0; p < (*U)->p_no(); p++ )
-				if ( *(*U)->param_sym(p) != '.' || verbosely > 5 )
-					fprintf( strm, "%2d: %-5s\t= %s %s\n",
-						 p, (*U)->param_sym(p),
-						 cnrun::str::double_dot_aligned_s( (*U)->param_value(p), 4, 6).c_str(),
-						 (*U)->param_name(p));
-			fprintf( strm, "Variables: ---\n");
-			for ( p = 0; p < (*U)->v_no(); p++ )
-				if ( *(*U)->var_sym(p) != '.' || verbosely > 5 )
-					fprintf( strm, "%2d: %-5s\t= %s %s\n",
-						 p, (*U)->var_sym(p),
-						 cnrun::str::double_dot_aligned_s( (*U)->var_value(p), 4, 6).c_str(),
-						 (*U)->var_name(p));
-
-		}
-	fprintf( strm, "\n");
-}
-
diff --git a/upstream/src/libcn/model.hh b/upstream/src/libcn/model.hh
deleted file mode 100644
index 2dd17a8..0000000
--- a/upstream/src/libcn/model.hh
+++ /dev/null
@@ -1,711 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny
- *
- * License: GPL-2+
- *
- * Initial version: 2008-09-02
- *
- * Class CModel
- */
-
-/*--------------------------------------------------------------------------
-
-The wrapper class which takes lists of pointers to neurons and synapses
-which are networked to a neural system and assembles a common state
-vector and handles the derivatives. At the same time it serves the neurons
-and synapses their state at any given time and allows them to adjust their
-parameters.
-
---------------------------------------------------------------------------*/
-
-
-#ifndef LIBCN_MODEL_H
-#define LIBCN_MODEL_H
-
-#include <csignal>
-#include <list>
-#include <vector>
-#include <string>
-
-#include "libxml/parser.h"
-#include "libxml/tree.h"
-
-#include "gsl/gsl_rng.h"
-
-#include "base-neuron.hh"
-#include "base-synapse.hh"
-#include "hosted-neurons.hh"
-#include "hosted-synapses.hh"
-#include "standalone-neurons.hh"
-#include "standalone-synapses.hh"
-#include "integrate-rk65.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-using namespace std;
-namespace cnrun {
-
-// CModel _status bits
-#define CN_MDL_LOGDT		(1 << 0)
-#define CN_MDL_LOGSPIKERS	(1 << 1)
-#define CN_MDL_LOGUSINGID	(1 << 2)
-#define CN_MDL_SORTUNITS	(1 << 3)
-#define CN_MDL_ALLOWNOISE	(1 << 4)
-#define CN_MDL_NOTREADY		(1 << 5)
-#define CN_MDL_DISKLESS		(1 << 6)
-#define CN_MDL_HAS_DDTB_UNITS	(1 << 7)
-#define CN_MDL_DISPLAY_PROGRESS_PERCENT	(1 << 8)
-#define CN_MDL_DISPLAY_PROGRESS_TIME	(1 << 9)
-#define CN_MDL_DONT_COALESCE		(1 << 10)
-
-
-class CModel {
-
-    public:
-	string	name;
-
-    private:
-	int	_status;
-    public:
-	int status()	{ return _status; }
-
-      // structure ------
-    friend class C_BaseSynapse;
-    friend class C_HostedNeuron;
-    friend class C_HostedConductanceBasedNeuron;
-    friend class C_HostedRateBasedNeuron;
-    friend class C_HostedSynapse;
-    friend class CNeuronMap;
-    friend class CSynapseMap;
-
-    public:
-	size_t units() const __attribute__ ((pure))
-		{ return unit_list.size(); }
-
-    private:
-	unsigned long
-		_global_unit_id_reservoir;
-    private:
-	list<C_BaseUnit*>		unit_list; // all units together
-      // these have derivative(), are churned in _integrator->cycle()
-	list<C_HostedNeuron*>		hosted_neu_list;
-	list<C_HostedSynapse*>	hosted_syn_list;
-      // these need preadvance() and fixate()
-	list<C_StandaloneNeuron*>	standalone_neu_list;
-	list<C_StandaloneSynapse*>	standalone_syn_list;
-      // ... also these, but at discrete dt only
-      // (only the standalone map units currently)
-	list<C_StandaloneNeuron*>	ddtbound_neu_list;
-	list<C_StandaloneSynapse*>	ddtbound_syn_list;
-
-      // neurons that can possibly_fire() (various oscillators), and
-      // have no inputs, and hence not dependent on anything else
-	list<C_BaseNeuron*>		conscious_neu_list;
-
-      // various lists to avoid traversing all of them in unit_list:
-      // listeners, spikeloggers & readers
-	list<C_BaseUnit*>		lisn_unit_list;
-      // uses a meaningful do_spikelogging_or_whatever
-	list<C_BaseNeuron*>		spikelogging_neu_list;
-      // `Multiplexing AB' synapses are treated very specially
-	list<C_BaseSynapse*>		mx_syn_list;
-
-      // those for which apprise_from_source( model_time()) will be called
-	list<C_BaseUnit*>		units_with_continuous_sources;
-      // same, but not every cycle
-	list<C_BaseUnit*>		units_with_periodic_sources;
-	list<double>		regular_periods;
-	list<unsigned>		regular_periods_last_checked;
-
-    public:
-	C_BaseUnit    *unit_by_label( const char *) const __attribute__ ((pure));
-	C_BaseNeuron  *neuron_by_label( const char *) const __attribute__ ((pure));
-	C_BaseSynapse *synapse_by_label( const char *) const __attribute__ ((pure));
-
-	size_t hosted_unit_cnt() const __attribute__ ((pure))
-		{  return hosted_neu_list.size() + hosted_syn_list.size();		}
-	size_t standalone_unit_cnt() const __attribute__ ((pure))
-		{  return standalone_neu_list.size() + standalone_syn_list.size();	}
-	size_t ddtbound_unit_cnt() const __attribute__ ((pure))
-		{  return ddtbound_neu_list.size() + ddtbound_syn_list.size();		}
-	size_t total_neuron_cnt() const __attribute__ ((pure))
-		{  return hosted_neu_list.size()
-				+ standalone_neu_list.size()
-				+ ddtbound_neu_list.size();		}
-	size_t total_synapse_cnt() const __attribute__ ((pure))
-		{  return hosted_syn_list.size()
-				+ standalone_syn_list.size()
-				+ ddtbound_syn_list.size();		}
-	size_t conscious_neuron_cnt() const __attribute__ ((pure))
-		{  return conscious_neu_list.size();	}
-	size_t listening_unit_cnt() const __attribute__ ((pure))
-		{  return lisn_unit_list.size();	}
-	size_t spikelogging_neuron_cnt() const __attribute__ ((pure))
-		{  return spikelogging_neu_list.size();	}
-
-	size_t unit_with_continuous_sources_cnt() const __attribute__ ((pure))
-		{  return units_with_continuous_sources.size();	}
-	size_t unit_with_periodic_sources_cnt() const __attribute__ ((pure))
-		{  return units_with_periodic_sources.size();	}
-
-      // if is_last == true, do allocations of hosted units' vars immediately
-      // otherwise defer until addition is done with is_last == true
-      // or the user calls finalize_additions
-	int include_unit( C_HostedNeuron*, bool is_last = true);
-	int include_unit( C_HostedSynapse*, bool is_last = true);
-	int include_unit( C_StandaloneNeuron*);
-	int include_unit( C_StandaloneSynapse*);
-
-	C_BaseNeuron *add_neuron_species( TUnitType type, const char *label, bool finalize = true,
-					  double x = 0., double y = 0., double z = 0.);
-	C_BaseNeuron *add_neuron_species( const char *type, const char *label, bool finalize = true,
-					  double x = 0., double y = 0., double z = 0.);
-	C_BaseSynapse *add_synapse_species( const char *type, const char *src_l, const char *tgt_l,
-					    double g, bool allow_clone = true, bool finalize = true);
-	void finalize_additions();
-    private:
-	C_BaseSynapse *add_synapse_species( TUnitType type, C_BaseNeuron *src, C_BaseNeuron *tgt,
-					    double g, bool allow_clone, bool finalize);
-	void _include_base_unit( C_BaseUnit*);
-//	int _check_new_synapse( C_BaseSynapse*);
-    public:
-	C_BaseUnit* exclude_unit( C_BaseUnit*, bool do_delete = false);
-	// return nullptr if do_delete == true, the excluded unit otherwise, even if it was not owned
-	void delete_unit( C_BaseUnit* u)
-		{  exclude_unit( u, true);  }
-    private:
-	friend class C_BaseUnit;
-	void register_listener( C_BaseUnit*);
-	void unregister_listener( C_BaseUnit*);
-	friend class C_BaseNeuron;
-	friend class SSpikeloggerService;
-	void register_spikelogger( C_BaseNeuron*);
-	void unregister_spikelogger( C_BaseNeuron*);
-	void register_mx_synapse( C_BaseSynapse*);
-	void unregister_mx_synapse( C_BaseSynapse*);
-
-	void register_unit_with_sources( C_BaseUnit*);
-	void unregister_unit_with_sources( C_BaseUnit*);
-
-    private:
-	unsigned short	_longest_label;
-    public:
-	unsigned short longest_label()  { return _longest_label; }
-
-    public:
-      // ctor, dtor
-	CModel( const char *inname, CIntegrate_base *inRK65Setup, int instatus);
-       ~CModel();
-
-	void reset( bool also_reset_params = false);
-	void reset_state_all_units();
-
-    public:
-      // NeuroMl interface
-	int import_NetworkML( const char *fname, bool appending = false);
-	int import_NetworkML( xmlDoc *doc, const char *fname, bool appending = false);  // fname is merely informational here
-	void cull_deaf_synapses();  // those with gsyn == 0
-	void cull_blind_synapses(); // those with _source == nullptr
-	int export_NetworkML( const char *fname);
-	int export_NetworkML( xmlDoc *doc);
-	void dump_metrics( FILE *strm = stdout);
-	void dump_state( FILE *strm = stdout);
-	void dump_units( FILE *strm = stdout);
-    private:
-	int _process_populations( xmlNode*);
-	int _process_population_instances( xmlNode*, const xmlChar*, const xmlChar*);
-
-	int _process_projections( xmlNode*);
-	int _process_projection_connections( xmlNode*, const xmlChar*, const xmlChar*,
-					     const xmlChar *src_grp_prefix, const xmlChar *tgt_grp_prefix);
-
-      // the essential mechanical parts: ----
-      // hosted unit variables
-    private:
-	vector<double> V,	// contains catenated var vectors of all constituent neurons and synapses
-		       W;	// V and W alternate in the capacity of the main vector, so avoiding many a memcpy
-	size_t	_var_cnt;	// total # of variables (to be) allocated in V an W, plus one for model_time
-    public:
-	size_t vars()			{ return _var_cnt;         }
-
-      // integrator interface
-    private:
-	friend class CIntegrate_base;
-	friend class CIntegrateRK65;
-    public:
-	CIntegrate_base
-		*_integrator;
-	const double& model_time() const		{ return V[0];            }
-
-	double& dt() const				{ return _integrator->dt;	}
-	double& dt_min() const				{ return _integrator->_dt_min;	}
-	double& dt_max() const				{ return _integrator->_dt_max;	}
-      // this one is internal
-	friend class CSynapseMxAB_dd;
-    private:
-	const double& model_time( vector<double> &x)	{ return x[0];            }
-
-    private:
-	unsigned long
-		_cycle;
-	double	_discrete_time;
-	double	_discrete_dt;
-    public:
-	unsigned long cycle()				{ return _cycle;          }
-	const double& model_discrete_time()		{ return _discrete_time;  }
-	const double& discrete_dt()			{ return _discrete_dt;    }
-
-      // simulation
-    private:
-	void _setup_schedulers();
-	void prepare_advance();
-//	void ctrl_c_handler( int);
-	unsigned _do_advance_on_pure_hosted( double, double*)  __attribute__ ((hot));
-	unsigned _do_advance_on_pure_standalone( double, double*) __attribute__ ((hot));
-	unsigned _do_advance_on_pure_ddtbound( double, double*) __attribute__ ((hot));
-	unsigned _do_advance_on_mixed( double, double*) __attribute__ ((hot));
-    public:
-	unsigned advance( double dist, double *cpu_time_p = nullptr) __attribute__ ((hot));
-
-    public:
-	double	spike_threshold, // above which neurons will detect a spike
-		spike_lapse;  // if occurs less than this after the unit's _last_spike_end
-	// (begs to be moved to SSpikeloggerService)
-
-    public:
-	float	listen_dt;
-    private:
-	ofstream
-		*_dt_logger, *_spike_logger;
-
-    public:
-      // high-level functions to manipulate unit behaviour, set params, & connect sources
-	struct STagGroup {
-		string pattern;
-		bool enable;
-		STagGroup( const char *a, bool b = true)
-		      : pattern (a), enable (b) {}
-	};
-	struct STagGroupListener : STagGroup {
-		int bits;
-		STagGroupListener( const char *a, bool b, int c = 0)
-		      : STagGroup (a, b), bits (c) {}
-	};
-	int process_listener_tags( const list<STagGroupListener>&);
-
-	struct STagGroupSpikelogger : STagGroup {
-		double period, sigma, from;
-		STagGroupSpikelogger( const char *a, bool b,
-				      double c = 0., double d = 0., double e = 0.)  // defaults disable sdf computation
-		      : STagGroup (a, b), period (c), sigma (d), from (e) {}
-	};
-	int process_spikelogger_tags( const list<STagGroupSpikelogger>&);
-
-	int process_putout_tags( const list<STagGroup>&);
-
-	struct STagGroupDecimate : STagGroup {
-		float fraction;
-		STagGroupDecimate( const char *a, double c)
-		      : STagGroup (a), fraction (c) {}
-	};
-	int process_decimate_tags( const list<STagGroupDecimate>&);
-
-	struct STagGroupNeuronParmSet : STagGroup {
-		string parm;
-		double value;
-		STagGroupNeuronParmSet( const char *a, bool b, const char *c, double d)  // b == false to revert to stock
-		      : STagGroup (a, b), parm (c), value (d)
-			{}
-	};
-	struct STagGroupSynapseParmSet : STagGroupNeuronParmSet {
-		string target;
-		STagGroupSynapseParmSet( const char *a, const char *z, bool b, const char *c, double d)
-		      : STagGroupNeuronParmSet (a, b, c, d), target (z)
-			{}
-	};
-	int process_paramset_static_tags( const list<STagGroupNeuronParmSet>&);
-	int process_paramset_static_tags( const list<STagGroupSynapseParmSet>&);
-    private:
-	void coalesce_synapses();  // those which used to be clones then made independent
-
-    public:
-	struct STagGroupSource : STagGroup {
-		string parm;
-		C_BaseSource *source;
-		STagGroupSource( const char *a, bool b, const char *c, C_BaseSource *d)  // b == false to revert to stock
-		      :  STagGroup (a, b), parm (c), source (d)
-			{}
-	};
-	int process_paramset_source_tags( const list<STagGroupSource>&);
-
-	list<C_BaseSource*> Sources;
-	C_BaseSource* source_by_id( const char *id)
-		{
-			auto K = Sources.begin();
-			while ( K != Sources.end() ) {
-				if ( (*K)->name == id )
-					return *K;
-				K++;
-			}
-			return nullptr;
-		}
-
-    public:
-	int	verbosely;
-
-	gsl_rng	*_rng;
-
-	double rng_sample()
-		{
-			return gsl_rng_uniform_pos( _rng);
-		}
-
-      // various convenience fields and members
-	typedef list<C_BaseUnit*>::iterator lBUi;
-	typedef list<C_BaseUnit*>::const_iterator lBUci;
-	typedef list<C_BaseUnit*>::reverse_iterator lBUri;
-	typedef list<C_BaseUnit*>::const_reverse_iterator lBUcri;
-
-	lBUi  ulist_begin()	{ return unit_list.begin();		}
-	lBUi  ulist_end()	{ return unit_list.end();		}
-	lBUri ulist_rbegin()	{ return unit_list.rbegin();		}
-	lBUri ulist_rend()	{ return unit_list.rend();		}
-
-	lBUi lulist_begin()	{ return lisn_unit_list.begin();	}
-	lBUi lulist_end()	{ return lisn_unit_list.end();		}
-	list<C_BaseNeuron*>::		iterator knlist_begin()	{ return spikelogging_neu_list.begin();	}
-	list<C_BaseNeuron*>::		iterator knlist_end()	{ return spikelogging_neu_list.end();	}
-
-	// lBUi rlist_rbegin()	{ return reader_unit_list.rbegin();	}
-	// lBUi rlist_rend()	{ return reader_unit_list.rend();	}
-};
-
-
-
-
-// by popular demand
-#define for_all_units(U) \
-	for ( auto U = ulist_begin(); U != ulist_end(); ++U )
-#define for_all_units_const(U) \
-	for ( auto U = unit_list.begin(); U != unit_list.end(); ++U )
-#define for_all_neurons(U) \
-	for ( auto U = ulist_begin(); U != ulist_end(); ++U ) if ( (*U)->is_neuron() )
-#define for_all_synapses(U) \
-	for ( auto U = ulist_begin(); U != ulist_end(); ++U ) if ( (*U)->is_synapse() )
-#define for_all_neurons_reversed(U) \
-	for ( auto U = ulist_rbegin(); U != ulist_rend(); ++U ) if ( (*U)->is_neuron() )
-#define for_all_synapses_reversed(U) \
-	for ( auto U = ulist_rbegin(); U != ulist_rend(); ++U ) if ( (*U)->is_synapse() )
-
-#define for_all_hosted_neurons(U) \
-	for ( auto U = hosted_neu_list.begin(); U != hosted_neu_list.end(); ++U )
-#define for_all_hosted_synapses(U) \
-	for ( auto U = hosted_syn_list.begin(); U != hosted_syn_list.end(); ++U )
-#define for_all_standalone_neurons(U) \
-	for ( auto U = standalone_neu_list.begin(); U != standalone_neu_list.end(); ++U )
-#define for_all_standalone_synapses(U) \
-	for ( auto U = standalone_syn_list.begin(); U != standalone_syn_list.end(); ++U )
-#define for_all_ddtbound_neurons(U) \
-	for ( auto U = ddtbound_neu_list.begin(); U != ddtbound_neu_list.end(); ++U )
-#define for_all_ddtbound_synapses(U) \
-	for ( auto U = ddtbound_syn_list.begin(); U != ddtbound_syn_list.end(); ++U )
-
-#define for_all_units_with_contiuous_sources(U) \
-	for ( auto U = units_with_continuous_sources.begin(); U != units_with_continuous_sources.end(); ++U )
-#define for_all_units_with_periodic_sources(U) \
-	for ( auto U = units_with_periodic_sources.begin(); U != units_with_periodic_sources.end(); ++U )
-
-#define for_all_units_reversed(U) \
-	for ( auto U = ulist_rbegin(); U != ulist_rend(); ++U )
-#define for_all_readers_reversed(U) \
-	for ( auto U = rlist_rbegin(); U != rlist_rend(); ++U )
-
-#define for_all_hosted_neurons_reversed(U)       for ( auto U = hosted_neu_list.rbegin(); U != hosted_neu_list.rend(); ++U )
-#define for_all_hosted_synapses_reversed(U)      for ( auto U = hosted_syn_list.rbegin(); U != hosted_syn_list.rend(); ++U )
-#define for_all_standalone_synapses_reversed(U)  for ( auto U = standalone_syn_list.rbegin(); U != standalone_syn_list.rend(); ++U )
-
-#define for_all_listening_units(U) \
-	for ( auto U = lulist_begin(); U != lulist_end(); ++U )
-#define for_all_conscious_neurons(N) \
-	for ( auto N = conscious_neu_list.begin(); N != conscious_neu_list.end(); ++N )
-#define for_all_spikelogging_neurons(N)	\
-	for ( auto N = knlist_begin(); N != knlist_end(); ++N )
-#define for_all_mx_synapses(N)	\
-	for ( auto Y = mx_syn_list.begin(); Y != mx_syn_list.end(); ++Y )
-
-
-#define for_model_units(M,U) \
-	for ( auto U = M->ulist_begin(); U != M->ulist_end(); ++U )
-#define for_model_units_reversed(M,U)  \
-	for ( auto U = M->ulist_rbegin(); U != M->ulist_rend(); ++U )
-
-#define for_model_hosted_neurons(M,U)  \
-	for ( auto U = M->hosted_neu_list.begin(); U != M->hosted_neu_list.end(); ++U )
-#define for_model_hosted_synapses(M,U)  \
-	for ( auto U = M->hosted_syn_list.begin(); U != M->hosted_syn_list.end(); ++U )
-#define for_model_hosted_neurons_reversed(M,U)   for ( auto U = M->hnlist_rbegin(); U != M->hnlist_rend(); ++U )
-#define for_model_hosted_synapses_reversed(M,U)  for ( auto U = M->hylist_rbegin(); U != M->hylist_rend(); ++U )
-
-#define for_model_standalone_neurons(M,U)   for ( auto U = M->snlist_begin(); U != M->snlist_end(); ++U )
-#define for_model_standalone_synapses(M,U)  for ( auto U = M->sylist_begin(); U != M->sylist_end(); ++U )
-#define for_model_standalone_neurons_reversed(M,U)   for ( auto U = M->snlist_rbegin(); U != M->snlist_rend(); ++U )
-#define for_model_standalone_synapses_reversed(M,U)  for ( auto U = M->sylist_rbegin(); U != M->sylist_rend(); ++U )
-
-#define for_model_neuron_units(M,U)   for_model_units(M,U) if ( (*U)->is_neuron() )
-#define for_model_synapse_units(M,U)  for_model_units(M,U) if ( (*U)->is_synapse() )
-
-#define for_model_spikelogging_neurons(M,N)	for ( auto N = M->knlist_begin(); N != M->knlist_end(); ++N )
-
-
-// return values for import_NetworkML
-#define CN_NMLIN_NOFILE		-1
-#define CN_NMLIN_NOELEM		-2
-#define CN_NMLIN_BADATTR	-3
-#define CN_NMLIN_BADCELLTYPE	-4
-#define CN_NMLIN_BIGLABEL	-5
-#define CN_NMLIN_STRUCTERROR	-6
-
-
-
-
-
-
-inline void
-CIntegrateRK65::fixate()
-{
-	swap( model->V, model->W);
-}
-
-
-// various CUnit & CNeuron methods accessing CModel members
-// that we want to have inline
-
-inline const double&
-C_BaseUnit::model_time() const
-{
-	return M->model_time();
-}
-
-inline void
-C_BaseUnit::pause_listening()
-{
-	if ( M )
-		M->unregister_listener( this);
-}
-
-inline void
-C_BaseUnit::resume_listening()
-{
-	if ( M )
-		M->register_listener( this);
-}
-
-
-
-template <class T>
-void
-C_BaseUnit::attach_source( T *s, TSinkType t, unsigned short idx)
-{
-	sources.push_back( SSourceInterface<T>( s, t, idx));
-	M->register_unit_with_sources(this);
-}
-
-
-
-
-
-inline SSpikeloggerService*
-C_BaseNeuron::enable_spikelogging_service( int s_mask)
-{
-	if ( !_spikelogger_agent )
-		_spikelogger_agent = new SSpikeloggerService( this, s_mask);
-	M->register_spikelogger( this);
-	return _spikelogger_agent;
-}
-inline SSpikeloggerService*
-C_BaseNeuron::enable_spikelogging_service( double sample_period, double sigma, double from, int s_mask)
-{
-	if ( !_spikelogger_agent )
-		_spikelogger_agent = new SSpikeloggerService( this, sample_period, sigma, from, s_mask);
-	M->register_spikelogger( this);
-	return _spikelogger_agent;
-}
-
-inline void
-C_BaseNeuron::disable_spikelogging_service()
-{
-	if ( _spikelogger_agent && !(_spikelogger_agent->_status & CN_KL_PERSIST)) {
-		_spikelogger_agent->sync_history();
-		M->unregister_spikelogger( this);
-
-		delete _spikelogger_agent;
-		_spikelogger_agent = nullptr;
-	}
-}
-
-
-
-
-
-
-inline void
-C_HostedNeuron::reset_vars()
-{
-//	cout << "reset_vars() on " << label << " (idx " << idx << ")\n";
-	if ( M && idx < M->_var_cnt )
-		memcpy( &M->V[idx],
-			__CNUDT[_type].stock_var_values,
-			__CNUDT[_type].vno * sizeof(double));
-}
-
-inline double&
-C_HostedNeuron::var_value( size_t v)
-{
-	return M->V[idx + v];
-}
-
-inline const double&
-C_HostedNeuron::get_var_value( size_t v) const
-{
-	return M->V[idx + v];
-}
-
-
-
-inline unsigned
-C_HostedConductanceBasedNeuron::n_spikes_in_last_dt() const
-{
-	return E() >= M->spike_threshold;
-}
-
-inline unsigned
-C_HostedRateBasedNeuron::n_spikes_in_last_dt() const
-{
-	return round(E() * M->dt() * M->rng_sample());
-}
-
-
-inline unsigned
-C_StandaloneConductanceBasedNeuron::n_spikes_in_last_dt() const
-{
-	return E() >= M->spike_threshold;
-}
-
-inline unsigned
-C_StandaloneRateBasedNeuron::n_spikes_in_last_dt() const
-{
-	return round(E() * M->dt() * M->rng_sample());
-}
-
-
-
-
-
-
-
-
-inline void
-C_HostedSynapse::reset_vars()
-{
-//	cout << "reset_vars() on " << label << " (idx " << idx << ")\n";
-	if ( M && M->_var_cnt > idx )
-		memcpy( &M->V[idx],
-			__CNUDT[_type].stock_var_values,
-			__CNUDT[_type].vno * sizeof(double));
-}
-
-
-
-inline double&
-C_HostedSynapse::var_value( size_t v)
-{
-	return M->V[idx + v];
-}
-
-inline const double&
-C_HostedSynapse::get_var_value( size_t v) const
-{
-	return M->V[idx + v];
-}
-
-
-
-inline double
-C_HostedConductanceBasedNeuron::E() const
-{
-	return M->V[idx+0];
-}
-
-// F is computed on the fly, so far usually
-
-
-inline double
-C_HostedSynapse::S() const
-{
-	return M->V[idx+0];
-}
-
-
-
-
-
-
-inline
-CNeuronMap::CNeuronMap( const char *inlabel, double x, double y, double z, CModel *inM, int s_mask)
-      : C_StandaloneConductanceBasedNeuron( NT_MAP, inlabel, x, y, z, inM, s_mask)
-{
-	if ( inM ) {
-		if ( isfinite( inM->_discrete_dt) && inM->_discrete_dt != fixed_dt ) {
-			printf( "Inappropriate discrete dt\n");
-			_status |= CN_UERROR;
-		}
-		inM -> _discrete_dt = fixed_dt;
-	}
-}
-
-
-inline
-CSynapseMap::CSynapseMap( C_BaseNeuron *insource, C_BaseNeuron *intarget,
-			  double ing, CModel *inM, int s_mask, TUnitType alt_type)
-      : C_StandaloneSynapse( alt_type, insource, intarget, ing, inM, s_mask),
-	_source_was_spiking (false)
-{
-	if ( !inM )
-		fprintf( stderr, "A MxMap synapse is created unattached to a model: preadvance() will cause a segfault!\n");
-	else {
-		if ( isfinite( inM->_discrete_dt) && inM->_discrete_dt != fixed_dt ) {
-			printf( "Inappropriate discrete dt\n");
-			_status |= CN_UERROR;
-		}
-		inM -> _discrete_dt = fixed_dt;
-	}
-}
-
-
-inline void
-CSynapseMap::preadvance()
-{
-//	printf( "fafa %s\n", label);
-	V_next[0] = S() * exp( -M->discrete_dt() / P[_tau_])
-		+ (_source->n_spikes_in_last_dt() ? P[_delta_] : 0);
-
-//	V_next[1] = ;
-}
-
-
-
-inline void
-CSynapseMxMap::preadvance()
-{
-	V_next[0] = S() * exp( -M->discrete_dt() / P[_tau_]) + q() * P[_delta_];
-}
-
-}
-
-#endif
-
-// EOF
diff --git a/upstream/src/libcn/mx-attr.hh b/upstream/src/libcn/mx-attr.hh
deleted file mode 100644
index 3ecb3e0..0000000
--- a/upstream/src/libcn/mx-attr.hh
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
- *
- * License: GPL-2+
- *
- * Initial version: 2009-03-31
- *
- */
-
-
-#ifndef LIBCN_MX_ATTR_H
-#define LIBCN_MX_ATTR_H
-
-#include <vector>
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-using namespace std;
-
-namespace cnrun {
-
-class C_MultiplexingAttributes {
-    protected:
-	friend class CModel;
-	virtual void update_queue() = 0;
-	vector<double> _kq;
-    public:
-	double  q() const	{ return _kq.size(); }
-	void reset()
-		{
-			_kq.clear();
-		}
-};
-
-
-
-class C_DotAttributes {
-    public:
-	virtual double& spikes_fired_in_last_dt() = 0;
-};
-
-}
-
-#endif
-
-// EOF
diff --git a/upstream/src/libcn/param-unit-literals.hh b/upstream/src/libcn/param-unit-literals.hh
deleted file mode 100644
index 6bbfa5f..0000000
--- a/upstream/src/libcn/param-unit-literals.hh
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *
- * License: GPL-2+
- *
- * Initial version: 2009-04-11
- *
- */
-
-#ifndef LIBCN_PARAM_UNIT_LITERALS_H
-#define LIBCN_PARAM_UNIT_LITERALS_H
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-#define __CN_PU_CONDUCTANCE "\316\274S/cm\302\262"
-#define __CN_PU_RESISTANCE "M\316\251"
-#define __CN_PU_POTENTIAL "mV"
-#define __CN_PU_VOLTAGE "mV"
-#define __CN_PU_CURRENT "nA"
-#define __CN_PU_CAPACITY_DENSITY "\316\274F/cm\302\262"
-#define __CN_PU_TIME "msec"
-#define __CN_PU_TIME_MSEC "msec"
-#define __CN_PU_RATE "1/msec"
-#define __CN_PU_FREQUENCY "Hz"
-#define __CN_PU_TIME_SEC "sec"
-
-#endif
-
-// EOF
diff --git a/upstream/src/libcn/sources.cc b/upstream/src/libcn/sources.cc
deleted file mode 100644
index 78ad7b2..0000000
--- a/upstream/src/libcn/sources.cc
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
- *
- * License: GPL-2+
- *
- * Initial version: 2010-02-24
- *
- */
-
-
-#include <cmath>
-#include <sys/time.h>
-#include <iostream>
-#include <fstream>
-#include <limits>
-
-#include "sources.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-using namespace std;
-
-const char * const cnrun::__SourceTypes[] = {
-	"Null",
-	"Tape",
-	"Periodic",
-	"Function",
-	"Noise",
-};
-
-
-cnrun::CSourceTape::
-CSourceTape( const char *id, const char *infname, bool inis_looping)
-      : C_BaseSource (id, SRC_TAPE), is_looping (inis_looping)
-{
-	ifstream ins( infname);
-	if ( !ins.good() ) {
-		name = "";
-		return;
-	}
-	skipws(ins);
-
-	while ( !ins.eof() && ins.good() ) {
-		while ( ins.peek() == '#' || ins.peek() == '\n' )
-			ins.ignore( numeric_limits<streamsize>::max(), '\n');
-		double timestamp, datum;
-		ins >> timestamp >> datum;
-		values.push_back( pair<double,double>(timestamp, datum));
-	}
-
-	if ( values.size() == 0 ) {
-		fprintf( stderr, "No usable values in \"%s\"\n", infname);
-		return;
-	}
-
-	fname = infname;
-	I = values.begin();
-}
-
-double
-cnrun::CSourceTape::
-operator() ( double t)
-{
-      // position I such that *I < t < *(I+1)
-	while ( I+1 != values.end() && (I+1)->first < t )
-		++I;
-
-	if ( I+1 == values.end() && is_looping )
-		I = values.begin();
-
-	return I->second;
-}
-
-
-
-
-
-
-cnrun::CSourcePeriodic::
-CSourcePeriodic( const char *id, const char *infname, bool inis_looping, double inperiod)
-      : C_BaseSource (id, SRC_PERIODIC), is_looping (inis_looping)
-{
-	ifstream ins( infname);
-	if ( !ins.good() ) {
-		name = "";
-		return;
-	}
-	skipws(ins);
-
-	while ( ins.peek() == '#' || ins.peek() == '\n' )
-		ins.ignore( numeric_limits<streamsize>::max(), '\n');
-
-	if ( !isfinite(inperiod) || inperiod <= 0. ) {
-		ins >> inperiod;
-		if ( !isfinite(inperiod) || inperiod <= 0. ) {
-			fprintf( stderr, "Bad interval for \"%s\"\n", infname);
-			name = "";
-			return;
-		}
-	}
-	period = inperiod;
-
-	while ( true ) {
-		while ( ins.peek() == '#' || ins.peek() == '\n' )
-			ins.ignore( numeric_limits<streamsize>::max(), '\n');
-		double datum;
-		ins >> datum;
-		if ( ins.eof() || !ins.good() )
-			break;
-		values.push_back( datum);
-	}
-
-	if ( values.size() < 2 ) {
-		fprintf( stderr, "Need at least 2 scheduled values in \"%s\"\n", infname);
-		name = "";
-		return;
-	}
-
-	fname = infname;
-}
-
-
-
-const char * const cnrun::distribution_names[] = { "uniform", "gaussian" };
-
-cnrun::CSourceNoise::
-CSourceNoise( const char *id,
-	      double in_min, double in_max,
-	      TDistribution indist_type,
-	      int seed)
-      : C_BaseSource (id, SRC_NOISE), _min (in_min), _max (in_max), _sigma (in_max - in_min), dist_type (indist_type)
-{
-	const gsl_rng_type *T;
-	gsl_rng_env_setup();
-	T = gsl_rng_default;
-	if ( gsl_rng_default_seed == 0 ) {
-		struct timeval tp = { 0L, 0L };
-		gettimeofday( &tp, nullptr);
-		gsl_rng_default_seed = tp.tv_usec;
-	}
-	rng = gsl_rng_alloc( T);
-}
-
-
-cnrun::CSourceNoise::
-~CSourceNoise()
-{
-	gsl_rng_free( rng);
-}
-
-
-// eof
diff --git a/upstream/src/libcn/sources.hh b/upstream/src/libcn/sources.hh
deleted file mode 100644
index 6b8ec1b..0000000
--- a/upstream/src/libcn/sources.hh
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *
- * License: GPL-2+
- *
- * Initial version: 2010-02-24
- *
- */
-
-
-#ifndef LIBCN_SOURCES_H
-#define LIBCN_SOURCES_H
-
-#include <string>
-#include <vector>
-
-#include "gsl/gsl_rng.h"
-#include "gsl/gsl_randist.h"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-using namespace std;
-
-namespace cnrun {
-
-
-typedef enum { SRC_NULL, SRC_TAPE, SRC_PERIODIC, SRC_FUNCTION, SRC_NOISE } TSourceType;
-extern const char * const __SourceTypes[];
-
-class C_BaseSource {
-    public:
-	string name;
-	TSourceType type;
-	C_BaseSource( const char *id, TSourceType intype = SRC_NULL)
-	      : name (id), type (intype)
-		{}
-	virtual ~C_BaseSource()
-		{}
-
-	virtual double operator() ( double)
-		{  return 0.;  }
-	virtual bool is_periodic()
-		{  return false;  }
-	bool operator== ( const C_BaseSource &rv)
-		{  return name == rv.name; }
-	bool operator== ( const char *rv)
-		{  return name == name; }
-	virtual void dump( FILE *strm = stdout)
-		{
-			fprintf( strm, "%s (%s)\n", name.c_str(), __SourceTypes[type]);
-		}
-};
-
-
-
-class CSourceTape : public C_BaseSource {
-    private:
-	CSourceTape();
-    public:
-	string fname;
-	vector< pair<double, double> > values;
-	bool is_looping;
-
-	CSourceTape( const char *id, const char *infname, bool is_looping = false);
-
-	double operator() ( double at);
-
-	void dump( FILE *strm = stdout)
-		{
-			fprintf( strm, "%s (%s) %zu values from %s%s\n",
-				 name.c_str(), __SourceTypes[type],
-				 values.size(), fname.c_str(), is_looping ? "" : " (looping)");
-		}
-    private:
-	vector< pair<double, double> >::iterator I;
-};
-
-
-class CSourcePeriodic : public C_BaseSource {
-    private:
-	CSourcePeriodic();
-    public:
-	string fname;
-	vector<double> values;
-	double period;
-	bool is_looping;
-
-	CSourcePeriodic( const char *id, const char *fname, bool is_looping = false, double period = 0.);
-
-	double operator() ( double at)
-		{
-			size_t	i_abs = (size_t)(at / period),
-				i_eff = is_looping
-					? i_abs % values.size()
-					: min (i_abs, values.size() - 1);
-			return values[i_eff];
-		}
-
-	void dump( FILE *strm = stdout)
-		{
-			fprintf( strm, "%s (%s) %zu values at %g from %s%s\n",
-				 name.c_str(), __SourceTypes[type],
-				 values.size(), period, fname.c_str(), is_looping ? "" : " (looping)");
-		}
-
-	bool is_periodic()
-		{  return true;  }
-};
-
-
-class CSourceFunction : public C_BaseSource {
-    private:
-	CSourceFunction();
-    public:
-	double (*function)( double at);
-
-	CSourceFunction( const char *id, double (*f)(double))
-	      : C_BaseSource (id, SRC_FUNCTION), function (f)
-		{}
-
-	double operator() ( double at)
-		{
-			return function( at);
-		}
-};
-
-
-extern  const char * const distribution_names[];
-
-class CSourceNoise : public C_BaseSource {
-    private:
-	CSourceNoise();
-    public:
-	double _min, _max, _sigma;
-	enum TDistribution {
-		SOURCE_RANDDIST_UNIFORM,
-		SOURCE_RANDDIST_GAUSSIAN,
-	};
-	TDistribution dist_type;
-	gsl_rng	*rng;
-
-	CSourceNoise( const char *id, double in_min = 0., double in_max = 1.,
-		      TDistribution type = SOURCE_RANDDIST_UNIFORM,
-		      int seed = 0);
-       ~CSourceNoise();
-
-	double operator() ( double unused) const
-		{
-			switch ( dist_type ) {
-			case SOURCE_RANDDIST_UNIFORM:	return gsl_rng_uniform( rng) * (_max - _min) + _min;
-			case SOURCE_RANDDIST_GAUSSIAN:	return gsl_ran_gaussian( rng, _sigma) + (_max - _min)/2;
-			default:			return 42.;
-			}
-		}
-
-	void dump( FILE *strm = stdout)
-		{
-			fprintf( strm, "%s (%s) %s in range %g:%g (sigma = %g)\n",
-				 name.c_str(), __SourceTypes[type],
-				 distribution_names[dist_type], _min, _max, _sigma);
-		}
-};
-
-}
-
-#endif
-
-// EOF
diff --git a/upstream/src/libcn/standalone-attr.hh b/upstream/src/libcn/standalone-attr.hh
deleted file mode 100644
index 832ae7a..0000000
--- a/upstream/src/libcn/standalone-attr.hh
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
- *
- * License: GPL-2+
- *
- * Initial version: 2009-03-31
- *
- */
-
-
-
-#ifndef LIBCN_STANDALONE_ATTR_H
-#define LIBCN_STANDALONE_ATTR_H
-
-#include <vector>
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-using namespace std;
-namespace cnrun {
-
-class C_StandaloneAttributes {
-
-    protected:
-	C_StandaloneAttributes()
-		{}
-	C_StandaloneAttributes( size_t nvars)
-		{
-			V.resize( nvars),  V_next.resize( nvars);
-		}
-
-	vector<double>	V, V_next;
-
-    friend class CModel;
-    private:
-	virtual void preadvance()
-		{}
-	void fixate()
-		{  V = V_next;  }
-};
-
-}
-
-#endif
-
-// EOF
diff --git a/upstream/src/libcn/standalone-neurons.cc b/upstream/src/libcn/standalone-neurons.cc
deleted file mode 100644
index ba8c751..0000000
--- a/upstream/src/libcn/standalone-neurons.cc
+++ /dev/null
@@ -1,440 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
- *
- * License: GPL-2+
- *
- * Initial version: 2009-04-08
- *
- */
-
-
-#include <iostream>
-
-#include "standalone-neurons.hh"
-#include "param-unit-literals.hh"
-#include "types.hh"
-#include "model.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-
-cnrun::C_StandaloneNeuron::
-C_StandaloneNeuron( TUnitType intype, const char *inlabel,
-		    double x, double y, double z,
-		    CModel *inM, int s_mask)
-      : C_BaseNeuron( intype, inlabel, x, y, z, inM, s_mask),
-	C_StandaloneAttributes( __CNUDT[intype].vno)
-{
-	reset_vars();
-	if ( M )
-		M->include_unit( this);
-}
-
-
-cnrun::C_StandaloneNeuron::
-~C_StandaloneNeuron()
-{
-	if ( M && M->verbosely > 5 )
-		fprintf( stderr, " deleting standalone neuron \"%s\"\n", _label);
-}
-
-
-
-
-
-
-// --------- Rhythm'n'Blues
-
-const char* const cnrun::__CN_ParamNames_NeuronHH_r[] = {
-	"a, " __CN_PU_FREQUENCY,
-	"I\342\202\200, " __CN_PU_CURRENT,
-	"r in F(I) = a (I-I\342\202\200)^r",
-	"Externally applied DC, " __CN_PU_CURRENT,
-};
-const char* const cnrun::__CN_ParamSyms_NeuronHH_r[] = {
-	"a",
-	"I0",
-	"r",
-	"Idc",
-};
-const double cnrun::__CN_Params_NeuronHH_r[] = {
-	0.185,		//   a,
-	0.0439,		//   I0,
-	0.564,		//   r in F(I) = a * (I-I0)^r
-	0.		// Externally applied DC
-};
-
-
-const char* const cnrun::__CN_VarNames_NeuronHH_r[] = {
-	"Spiking rate, " __CN_PU_FREQUENCY,
-};
-const char* const cnrun::__CN_VarSyms_NeuronHH_r[] = {
-	"F",
-};
-const double cnrun::__CN_Vars_NeuronHH_r[] = {
-	 0.	// frequency F
-};
-
-
-double
-cnrun::CNeuronHH_r::
-F( vector<double>& x) const
-{
-	double subsq = Isyn(x) - P[_I0_] + P[_Idc_];
-	if ( subsq <= 0. )
-		return 0.;
-	else {
-		return P[_a_] * pow( subsq, P[_r_]);
-	}
-}
-
-void
-cnrun::CNeuronHH_r::
-preadvance()
-{
-	double subsq = Isyn() - P[_I0_] + P[_Idc_];
-//	printf( "%s->Isyn(x) = %g,\tsubsq = %g\n", _label, Isyn(), subsq);
-	if ( subsq <= 0. )
-		V_next[0] = 0;
-	else
-		V_next[0] = P[_a_] * pow( subsq, P[_r_]);
-}
-
-
-
-
-
-
-
-
-
-
-const char* const cnrun::__CN_ParamNames_OscillatorPoissonDot[] = {
-	"Rate \316\273, " __CN_PU_RATE,
-	"Resting potential, " __CN_PU_POTENTIAL,
-	"Potential when firing, " __CN_PU_POTENTIAL,
-};
-const char* const cnrun::__CN_ParamSyms_OscillatorPoissonDot[] = {
-	"lambda",
-	"Vrst",
-	"Vfir",
-};
-const double cnrun::__CN_Params_OscillatorPoissonDot[] = {
-	0.02,	// firing rate Lambda [1/ms]=[10^3 Hz]
-      -60.0,	// input neuron resting potential
-       20.0,	// input neuron potential when firing
-};
-
-const char* const cnrun::__CN_VarNames_OscillatorPoissonDot[] = {
-	"Membrane potential, " __CN_PU_POTENTIAL,
-	"Spikes recently fired",
-//	"Time"
-};
-const char* const cnrun::__CN_VarSyms_OscillatorPoissonDot[] = {
-	"E",
-	"nspk",
-//	"t"
-};
-const double cnrun::__CN_Vars_OscillatorPoissonDot[] = {
-	-60.,	// = Vrst, per initialization code found in ctor
-	  0,
-//	  0.
-};
-
-
-
-inline namespace {
-#define _THIRTEEN_ 13
-unsigned long __factorials[_THIRTEEN_] = {
-	1,
-	1, 2, 6, 24, 120,
-	720, 5040, 40320, 362880L, 3628800L,
-	39916800L, 479001600L
-};
-
-inline double
-__attribute__ ((pure))
-factorial( unsigned n)
-{
-	if ( n < _THIRTEEN_ )
-		return __factorials[n];
-	else {
-		//cerr << n << "!" << endl;
-		return __factorials[_THIRTEEN_-1] * factorial(n-_THIRTEEN_);
-	}
-}
-}
-
-void
-cnrun::COscillatorDotPoisson::
-possibly_fire()
-{
-	double	lt = P[_lambda_] * M->dt(),
-		dice = M->rng_sample(),
-		probk = 0.;
-
-	unsigned k;
-	for ( k = 0; ; k++ ) {
-		probk += exp( -lt) * pow( lt, (double)k) / factorial(k);
-		if ( probk > dice ) {
-			nspikes() = k;
-			break;
-		}
-	}
-
-	if ( k ) {
-		_status |=  CN_NFIRING;
-		var_value(0) = P[_Vfir_];
-	} else {
-		_status &= ~CN_NFIRING;
-		var_value(0) = P[_Vrst_];
-	}
-}
-
-
-
-void
-cnrun::COscillatorDotPoisson::
-do_detect_spike_or_whatever()
-{
-	unsigned n = n_spikes_in_last_dt();
-	if ( n > 0 ) {
-		for ( unsigned qc = 0; qc < n; qc++ )
-			_spikelogger_agent->spike_history.push_back( model_time());
-		_spikelogger_agent->_status |= CN_KL_ISSPIKINGNOW;
-		_spikelogger_agent->t_last_spike_start = _spikelogger_agent->t_last_spike_end = model_time();
-	} else
-		_spikelogger_agent->_status &= ~CN_KL_ISSPIKINGNOW;
-}
-
-
-
-
-
-
-
-
-
-
-
-const char* const cnrun::__CN_ParamNames_OscillatorPoisson[] = {
-	"Rate \316\273, " __CN_PU_RATE,
-	"Input neuron resting potential, " __CN_PU_POTENTIAL,
-	"Input neuron potential when firing, " __CN_PU_POTENTIAL,
-	"Spike time, " __CN_PU_TIME,
-	"Spike time + refractory period, " __CN_PU_TIME,
-};
-const char* const cnrun::__CN_ParamSyms_OscillatorPoisson[] = {
-	"lambda",
-	"trel",
-	"trel+trfr",
-	"Vrst",
-	"Vfir",
-};
-const double cnrun::__CN_Params_OscillatorPoisson[] = {
-	0.02,	// firing rate Lambda [1/ms]=[10^3 Hz]
-	0.0,	// spike time
-	0.0,	// refractory period + spike time
-      -60.0,	// input neuron resting potential
-       20.0,	// input neuron potential when firing
-};
-
-const char* const cnrun::__CN_VarNames_OscillatorPoisson[] = {
-	"Membrane potential E, " __CN_PU_POTENTIAL,
-};
-const char* const cnrun::__CN_VarSyms_OscillatorPoisson[] = {
-	"E",
-};
-const double cnrun::__CN_Vars_OscillatorPoisson[] = {
-	-60.,
-};
-
-
-
-void
-cnrun::COscillatorPoisson::
-possibly_fire()
-{
-	if ( _status & CN_NFIRING )
-		if ( model_time() - _spikelogger_agent->t_last_spike_start > P[_trel_] ) {
-			(_status &= ~CN_NFIRING) |= CN_NREFRACT;
-			_spikelogger_agent->t_last_spike_end = model_time();
-		}
-	if ( _status & CN_NREFRACT )
-		if ( model_time() - _spikelogger_agent->t_last_spike_start > P[_trelrfr_] )
-			_status &= ~CN_NREFRACT;
-
-	if ( !(_status & (CN_NFIRING | CN_NREFRACT)) ) {
-		double lt = P[_lambda_] * M->dt();
-		if ( M->rng_sample() <= exp( -lt) * lt ) {
-			_status |= CN_NFIRING;
-			_spikelogger_agent->t_last_spike_start = model_time() /* + M->dt() */ ;
-		}
-	}
-
-//	E() = next_state_E;
-//	next_state_E = (_status & CN_NFIRING) ?P.n.Vfir :P.n.Vrst;
-	var_value(0) = (_status & CN_NFIRING) ?P[_Vfir_] :P[_Vrst_];
-//	if ( strcmp( label, "ORNa.1") == 0 ) cout << label << ": firing_started = " << t_firing_started << ", firing_ended = " << t_firing_ended << " E = " << E() << endl;
-}
-
-
-void
-cnrun::COscillatorPoisson::
-do_detect_spike_or_whatever()
-{
-	unsigned n = n_spikes_in_last_dt();
-	if ( n > 0 ) {
-		if ( !(_spikelogger_agent->_status & CN_KL_ISSPIKINGNOW) ) {
-			_spikelogger_agent->spike_history.push_back( model_time());
-			_spikelogger_agent->_status |= CN_KL_ISSPIKINGNOW;
-		}
-	} else
-		if ( _spikelogger_agent->_status & CN_KL_ISSPIKINGNOW ) {
-			_spikelogger_agent->_status &= ~CN_KL_ISSPIKINGNOW;
-			_spikelogger_agent->t_last_spike_end = model_time();
-		}
-}
-
-
-
-
-
-
-
-
-// Map neurons require descrete time
-
-const double cnrun::__CN_Params_NeuronMap[] = {
-	60.0,		// 0 - Vspike: spike Amplitude factor
-	 3.0002440,	// 1 - alpha: "steepness / size" parameter
-	-2.4663490,	// 3 - gamma: "shift / excitation" parameter
-	 2.64,		// 2 - beta: input sensitivity
-	 0.,
-// Old comment by TN: beta chosen such that Isyn= 10 "nA" is the threshold for spiking
-};
-const char* const cnrun::__CN_ParamNames_NeuronMap[] = {
-	"Spike amplitude factor, " __CN_PU_POTENTIAL,
-	"\"Steepness / size\" parameter \316\261",
-	"\"Shift / excitation\" parameter \316\263",
-	"Input sensitivity \316\262, " __CN_PU_RESISTANCE,
-	"External DC, " __CN_PU_CURRENT,
-};
-const char* const cnrun::__CN_ParamSyms_NeuronMap[] = {
-	"Vspike",
-	"alpha",
-	"gamma",
-	"beta",
-	"Idc"
-};
-
-const double cnrun::__CN_Vars_NeuronMap[] = {
-      -50,	// E
-};
-const char* const cnrun::__CN_VarNames_NeuronMap[] = {
-	"Membrane potential E, " __CN_PU_POTENTIAL
-};
-const char* const cnrun::__CN_VarSyms_NeuronMap[] = {
-	"E",
-};
-
-
-
-void
-cnrun::CNeuronMap::
-preadvance()
-{
-	double Vspxaxb = P[_Vspike_] * (P[_alpha_] + P[_gamma_]);
-	V_next[0] =
-		( E() <= 0. )
-		  ? P[_Vspike_] * ( P[_alpha_] * P[_Vspike_] / (P[_Vspike_] - E() - P[_beta_] * (Isyn() + P[_Idc_]))
-				   + P[_gamma_] )
-		  : ( E() <= Vspxaxb && _E_prev <= 0.)
-		    ? Vspxaxb
-		    : -P[_Vspike_];
-
-	_E_prev = E();
-}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-// ----- Pulse
-
-const char* const cnrun::__CN_ParamNames_NeuronDotPulse[] = {
-	"Frequency f, " __CN_PU_FREQUENCY,
-	"Resting potential Vrst, " __CN_PU_VOLTAGE,
-	"Firing potential Vfir, " __CN_PU_VOLTAGE,
-};
-const char* const cnrun::__CN_ParamSyms_NeuronDotPulse[] = {
-	"f",
-	"Vrst",
-	"Vfir",
-};
-const double cnrun::__CN_Params_NeuronDotPulse[] = {
-	 10,
-	-60,
-	 20
-};
-
-const char* const cnrun::__CN_VarNames_NeuronDotPulse[] = {
-	"Membrane potential E, " __CN_PU_VOLTAGE,
-	"Spikes recently fired",
-};
-const char* const cnrun::__CN_VarSyms_NeuronDotPulse[] = {
-	"E",
-	"nspk",
-};
-const double cnrun::__CN_Vars_NeuronDotPulse[] = {
-	-60.,	// E
-	 0
-};
-
-
-void
-cnrun::CNeuronDotPulse::
-possibly_fire()
-{
-	enum TParametersNeuronDotPulse { _f_, _Vrst_, _Vfir_ };
-
-	spikes_fired_in_last_dt() = floor( (model_time() + M->dt()) * P[_f_]/1000)
-		                  - floor(  model_time()            * P[_f_]/1000);
-
-	if ( spikes_fired_in_last_dt() ) {
-		_status |=  CN_NFIRING;
-		var_value(0) = P[_Vfir_];
-	} else {
-		_status &= ~CN_NFIRING;
-		var_value(0) = P[_Vrst_];
-	}
-
-}
-
-void
-cnrun::CNeuronDotPulse::
-param_changed_hook()
-{
-	if ( P[_f_] < 0 ) {
-		if ( M->verbosely > 0 )
-			fprintf( stderr, "DotPulse oscillator \"%s\" got a negative parameter f: capping at 0\n", _label);
-		P[_f_] = 0.;
-	}
-}
-
-
-// eof
diff --git a/upstream/src/libcn/standalone-neurons.hh b/upstream/src/libcn/standalone-neurons.hh
deleted file mode 100644
index 0465dfb..0000000
--- a/upstream/src/libcn/standalone-neurons.hh
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
- *
- * License: GPL-2+
- *
- * Initial version: 2008-08-02
- */
-
-
-
-#ifndef LIBCN_STANDALONE_NEURONS_H
-#define LIBCN_STANDALONE_NEURONS_H
-
-#include "base-neuron.hh"
-#include "standalone-attr.hh"
-#include "mx-attr.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-namespace cnrun {
-
-class C_StandaloneNeuron
-  : public C_BaseNeuron, public C_StandaloneAttributes {
-
-    private:
-	C_StandaloneNeuron();
-
-    protected:
-	C_StandaloneNeuron( TUnitType intype, const char *inlabel,
-			    double x, double y, double z,
-			    CModel*, int s_mask);
-
-    public:
-       ~C_StandaloneNeuron();
-
-	double &var_value( size_t v)			{  return V[v];  }
-	const double &get_var_value( size_t v) const 	{  return V[v];  }
-	void reset_vars()
-		{
-			memcpy( V.data(), __CNUDT[_type].stock_var_values,
-				sizeof(double) * v_no());
-			memcpy( V_next.data(), __CNUDT[_type].stock_var_values,
-				sizeof(double) * v_no());
-		}
-};
-
-
-
-class C_StandaloneConductanceBasedNeuron
-  : public C_StandaloneNeuron {
-
-    private:
-	C_StandaloneConductanceBasedNeuron();
-
-    protected:
-	C_StandaloneConductanceBasedNeuron( TUnitType intype, const char *inlabel,
-					    double inx, double iny, double inz,
-					    CModel *inM, int s_mask)
-	      : C_StandaloneNeuron (intype, inlabel, inx, iny, inz, inM, s_mask)
-		{}
-
-    public:
-	double E() const			{  return V[0];  }
-	double E( vector<double>&) const	{  return V[0];  }
-
-	unsigned n_spikes_in_last_dt() const;
-};
-
-
-class C_StandaloneRateBasedNeuron
-  : public C_StandaloneNeuron {
-
-    private:
-	C_StandaloneRateBasedNeuron();
-
-    protected:
-	C_StandaloneRateBasedNeuron( TUnitType intype, const char *inlabel,
-				     double inx, double iny, double inz,
-				     CModel *inM, int s_mask)
-	      : C_StandaloneNeuron (intype, inlabel, inx, iny, inz, inM, s_mask)
-		{}
-
-    public:
-
-	unsigned n_spikes_in_last_dt() const;
-};
-
-
-
-
-
-
-
-
-class CNeuronHH_r
-  : public C_StandaloneRateBasedNeuron {
-    public:
-	CNeuronHH_r( const char *inlabel,
-		     double x, double y, double z,
-		     CModel *inM, int s_mask = 0)
-	      : C_StandaloneRateBasedNeuron( NT_HH_R, inlabel, x, y, z, inM, s_mask)
-		{}
-
-	enum {
-		_a_, _I0_, _r_, _Idc_,
-	};
-
-	double F( vector<double>&) const  __attribute__ ((hot));
-
-	void preadvance() __attribute__ ((hot));
-};
-
-
-
-
-
-
-
-
-class COscillatorPoisson
-  : public C_StandaloneConductanceBasedNeuron {
-
-    public:
-	COscillatorPoisson( const char *inlabel, double x, double y, double z, CModel *inM, int s_mask = 0)
-	      : C_StandaloneConductanceBasedNeuron( NT_POISSON, inlabel, x, y, z, inM, s_mask)
-		{
-		      // need _spikelogger_agent's fields even when no spikelogging is done
-			_spikelogger_agent = new SSpikeloggerService( static_cast<C_BaseNeuron*>(this),
-								      0 | CN_KL_PERSIST | CN_KL_IDLE);
-		}
-
-	enum {
-		_lambda_, _trel_, _trelrfr_, _Vrst_, _Vfir_,
-	};
-
-	void possibly_fire() __attribute__ ((hot));
-
-	void do_detect_spike_or_whatever() __attribute__ ((hot));
-};
-
-
-
-
-
-
-
-
-
-
-class COscillatorDotPoisson
-  : public C_StandaloneConductanceBasedNeuron {
-
-    public:
-	COscillatorDotPoisson( const char *inlabel, double x, double y, double z, CModel *inM, int s_mask = 0)
-	      : C_StandaloneConductanceBasedNeuron( NT_DOTPOISSON, inlabel, x, y, z, inM, s_mask)
-		{
-		      // need _spikelogger_agent's fields even when no spikelogging is done
-			_spikelogger_agent = new SSpikeloggerService( static_cast<C_BaseNeuron*>(this),
-								      0 | CN_KL_PERSIST | CN_KL_IDLE);
-		}
-
-	enum {
-		_lambda_, _Vrst_, _Vfir_,
-	};
-
-	void do_detect_spike_or_whatever() __attribute__ ((hot));
-
-	void possibly_fire() __attribute__ ((hot));
-
-	unsigned n_spikes_in_last_dt()
-		{  return V[1];  }
-
-	double &nspikes()
-		{  return V[1];  }
-};
-
-
-
-class CNeuronDotPulse
-  : public C_StandaloneConductanceBasedNeuron {
-    public:
-	CNeuronDotPulse( const char *inlabel,
-			 double x, double y, double z,
-			 CModel *inM, int s_mask = 0)
-	      : C_StandaloneConductanceBasedNeuron( NT_DOTPULSE, inlabel, x, y, z, inM, s_mask)
-		{}
-
-	enum { _f_, _Vrst_, _Vfir_ };
-
-	double &spikes_fired_in_last_dt()
-		{  return V[1];  }
-
-	void possibly_fire();
-
-	void param_changed_hook();
-};
-
-
-
-
-
-
-
-
-
-class CNeuronMap
-  : public C_StandaloneConductanceBasedNeuron {
-
-    public:
-	static constexpr double fixed_dt = 0.1;
-
-	CNeuronMap( const char *inlabel, double x, double y, double z, CModel *inM, int s_mask = 0);
-
-	enum {
-		_Vspike_, _alpha_, _gamma_, _beta_, _Idc_
-	};
-
-	void preadvance();
-	void fixate();
-    private:
-	double _E_prev;
-
-};
-
-
-
-}
-
-#endif
-
-// EOF
diff --git a/upstream/src/libcn/standalone-synapses.cc b/upstream/src/libcn/standalone-synapses.cc
deleted file mode 100644
index f29caf2..0000000
--- a/upstream/src/libcn/standalone-synapses.cc
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
- *
- * License: GPL-2+
- *
- * Initial version: 2009-04-08
- *
- */
-
-
-#include <iostream>
-
-#include "param-unit-literals.hh"
-
-#include "standalone-synapses.hh"
-#include "types.hh"
-#include "model.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-
-cnrun::C_StandaloneSynapse::
-C_StandaloneSynapse( TUnitType intype,
-		     C_BaseNeuron* insource, C_BaseNeuron* intarget,
-		     double ing, CModel* inM, int s_mask)
-      : C_BaseSynapse (intype, insource, intarget, ing, inM, s_mask),
-	C_StandaloneAttributes (__CNUDT[intype].vno)
-{
-	reset_vars();
-	if ( M )
-		M->include_unit( this);
-	// else
-	// 	_status &= ~CN_UENABLED;
-}
-
-
-cnrun::C_StandaloneSynapse::
-~C_StandaloneSynapse()
-{
-	if ( __cn_verbosely > 5 )
-		fprintf( stderr, " deleting standalone synapse \"%s\"\n", _label);
-}
-
-// C_StandaloneSynapse::~C_StandaloneSynapse()
-// {
-// 	if ( M->unregister_standalone_synapse( this) )
-// 		cerr << "Synapse " << label << " was forgotten by mother model\n";
-// }
-
-
-
-
-
-
-
-
-const double cnrun::__CN_Params_SynapseMap[] = {
-//	0.075,
-       18.94463,  // Decay rate time constant
-	0.25,
-	0
-
-};
-const char* const cnrun::__CN_ParamNames_SynapseMap[] = {
-//	"Synaptic strength g, " __CN_PU_CONDUCTANCE,
-	"Decay rate time constant \317\204, " __CN_PU_RATE,
-	"Release quantile \316\264",
-	"Reversal potential Vrev, " __CN_PU_POTENTIAL
-};
-const char* const cnrun::__CN_ParamSyms_SynapseMap[] = {
-//	"gsyn",
-	"tau",
-	"delta",
-	"Vrev"
-};
-
-
-
-
-
-
-
-void
-cnrun::CSynapseMxMap::
-update_queue()
-{
-	unsigned k = _source -> n_spikes_in_last_dt();
-	while ( k-- )
-		_kq.push_back( model_time());
-
-	while ( true ) {
-		if ( q() > 0 && model_time() - _kq.front() > P[_tau_] )
-			_kq.erase( _kq.begin());
-		else
-			break;
-	}
-}
-
-
-// eof
diff --git a/upstream/src/libcn/standalone-synapses.hh b/upstream/src/libcn/standalone-synapses.hh
deleted file mode 100644
index 6da3d3d..0000000
--- a/upstream/src/libcn/standalone-synapses.hh
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
- *
- * License: GPL-2+
- *
- * Initial version: 2008-08-02
- *
- */
-
-
-
-#ifndef LIBCN_STANDALONE_SYNAPSES_H
-#define LIBCN_STANDALONE_SYNAPSES_H
-
-#include <iostream>
-
-#include "base-synapse.hh"
-#include "base-neuron.hh"
-#include "standalone-attr.hh"
-#include "mx-attr.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-namespace cnrun {
-
-class CModel;
-
-class C_StandaloneSynapse
-  : public C_BaseSynapse, public C_StandaloneAttributes {
-
-    private:
-	C_StandaloneSynapse();
-    protected:
-	C_StandaloneSynapse( TUnitType intype, C_BaseNeuron *insource, C_BaseNeuron *intarget,
-			     double ing, CModel* inM, int s_mask = 0);
-
-    public:
-       ~C_StandaloneSynapse();
-
-	double &var_value( size_t v)			{ return V[v]; }
-	const double &get_var_value( size_t v) const	{ return V[v]; }
-	double  S() const				{ return V[0]; }
-	double &S( vector<double>&)			{ return V[0];  }
-
-	void reset_vars()
-		{
-			memcpy( V.data(), __CNUDT[_type].stock_var_values,
-				sizeof(double) * v_no());
-			memcpy( V_next.data(), __CNUDT[_type].stock_var_values,
-				sizeof(double) * v_no());
-		}
-};
-
-
-
-
-
-class CSynapseMap
-  : public C_StandaloneSynapse {
-
-    private:
-	CSynapseMap();
-
-    public:
-	static constexpr double fixed_dt = 0.1;
-
-	CSynapseMap( C_BaseNeuron *insource, C_BaseNeuron *intarget,
-		     double ing, CModel *inM, int s_mask = 0, TUnitType alt_type = YT_MAP);
-
-	void preadvance();  // defined inline in model.h
-
-	enum {
-		_tau_, _delta_, _Esyn_
-	};
-	double Isyn( const C_BaseNeuron &with_neuron, double g) const
-		{
-			return -g * S() * (with_neuron.E() - P[_Esyn_]);
-		}
-	double Isyn( vector<double>& unused, const C_BaseNeuron &with_neuron, double g) const
-		{
-			return Isyn( with_neuron, g);
-		}
-
-    protected:
-	bool _source_was_spiking;
-};
-
-
-
-
-
-class CSynapseMxMap
-  : public CSynapseMap, public C_MultiplexingAttributes {
-
-    public:
-	static constexpr double fixed_dt = 0.1;
-
-	CSynapseMxMap( C_BaseNeuron *insource, C_BaseNeuron *intarget,
-		       double ing, CModel *inM, int s_mask = 0)
-	      : CSynapseMap( insource, intarget, ing, inM, s_mask, YT_MXMAP)
-		{}
-
-	enum {
-		_tau_, _delta_, _Esyn_
-	};
-	void preadvance();  // defined inline in model.h
-
-    private:
-	friend class CModel;
-	void update_queue();
-};
-
-}
-
-#endif
-
-// EOF
diff --git a/upstream/src/libcn/types.cc b/upstream/src/libcn/types.cc
deleted file mode 100644
index e6fcfbc..0000000
--- a/upstream/src/libcn/types.cc
+++ /dev/null
@@ -1,521 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny
- *
- * License: GPL-2+
- *
- * Initial version: 2008-09-23
- *
- * CN global unit descriptors
- */
-
-
-
-#include <cstdio>
-#include <cstring>
-#include <iostream>
-
-#include "libstilton/string.hh"
-
-#include "types.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-using namespace std;
-
-cnrun::SCNDescriptor cnrun::__CNUDT[] = {
-
-// ---------------- Neuron types
-
-	{ UT_HOSTED,  // NT_HH_D
-	  8+18, 4,
-	  __CN_Params_NeuronHH_d,
-	  __CN_ParamNames_NeuronHH_d,
-	  __CN_ParamSyms_NeuronHH_d,
-	  __CN_Vars_NeuronHH_d,
-	  __CN_VarNames_NeuronHH_d,
-	  __CN_VarSyms_NeuronHH_d,
-	  "HH",
-	  "HH",
-	  "Hodgkin-Huxley by Traub and Miles (1991)"
-	},
-
-	{ UT_RATEBASED,  // NT_HH_R
-	  4, 1,
-	  __CN_Params_NeuronHH_r,
-	  __CN_ParamNames_NeuronHH_r,
-	  __CN_ParamSyms_NeuronHH_r,
-	  __CN_Vars_NeuronHH_r,
-	  __CN_VarNames_NeuronHH_r,
-	  __CN_VarSyms_NeuronHH_r,
-	  "HHRate",
-	  "HHRate",
-	  "Rate-based model of the Hodgkin-Huxley neuron"
-	},
-
-	{ UT_HOSTED,  // NT_HH2_D
-	  11+18-1, 4,
-	  __CN_Params_NeuronHH2_d,
-	  __CN_ParamNames_NeuronHH2_d,
-	  __CN_ParamSyms_NeuronHH2_d,
-	  __CN_Vars_NeuronHH2_d,
-	  __CN_VarNames_NeuronHH_d,
-	  __CN_VarSyms_NeuronHH_d,
-	  "HH2",
-	  "HH2",
-	  "Hodgkin-Huxley by Traub & Miles w/ K leakage"
-	},
-
-	{ UT_RATEBASED | UT__STUB,  // NT_HH2_R
-	  0, 0,
-	  NULL,	  NULL,	  NULL,
-	  NULL,	  NULL,	  NULL,
-	  "HH2Rate",
-	  "HH2Rate",
-	  "Rate-based model of the Hodgkin-Huxley by Traub & Miles"
-	},
-
-//#ifdef CN_WANT_MORE_NEURONS
-	{ UT_HOSTED,  // NT_EC_D
-	  14, 6,
-	  __CN_Params_NeuronEC_d,
-	  __CN_ParamNames_NeuronEC_d,
-	  __CN_ParamSyms_NeuronEC_d,
-	  __CN_Vars_NeuronEC_d,
-	  __CN_VarNames_NeuronEC_d,
-	  __CN_VarSyms_NeuronEC_d,
-	  "EC",
-	  "EC",
-	  "Entorhinal Cortex neuron"
-	},
-
-	{ UT_HOSTED,  // NT_ECA_D
-	  11, 7,
-	  __CN_Params_NeuronECA_d,
-	  __CN_ParamNames_NeuronECA_d,
-	  __CN_ParamSyms_NeuronECA_d,
-	  __CN_Vars_NeuronECA_d,
-	  __CN_VarNames_NeuronECA_d,
-	  __CN_VarSyms_NeuronECA_d,
-	  "ECA",
-	  "ECA",
-	  "Entorhinal Cortex (A) neuron"
-	},
-//#endif
-
-	{ UT_OSCILLATOR | UT_DOT,  // NT_POISSONDOT
-	  3, 2,
-	  __CN_Params_OscillatorPoissonDot,
-	  __CN_ParamNames_OscillatorPoissonDot,
-	  __CN_ParamSyms_OscillatorPoissonDot,
-	  __CN_Vars_OscillatorPoissonDot,
-	  __CN_VarNames_OscillatorPoissonDot,
-	  __CN_VarSyms_OscillatorPoissonDot,
-	  "DotPoisson",
-	  "DotPoisson",
-	  "Duration-less spike Poisson oscillator"
-	},
-
-	{ UT_OSCILLATOR,  // NT_POISSON
-	  5, 1,
-	  __CN_Params_OscillatorPoisson,
-	  __CN_ParamNames_OscillatorPoisson,
-	  __CN_ParamSyms_OscillatorPoisson,
-	  __CN_Vars_OscillatorPoisson,
-	  __CN_VarNames_OscillatorPoisson,
-	  __CN_VarSyms_OscillatorPoisson,
-	  "Poisson",
-	  "Poisson",
-	  "Poisson oscillator"
-	},
-
-/*
-	{ UT_HOSTED | UT_OSCILLATOR,  // NT_LV
-	  1, 2,
-	  __CN_Params_OscillatorLV,
-	  __CN_ParamNames_OscillatorLV,
-	  __CN_ParamSyms_OscillatorLV,
-	  __CN_Vars_OscillatorLV,
-	  __CN_VarNames_OscillatorLV,
-	  __CN_VarSyms_OscillatorLV,
-	  "LV",
-	  "LV",
-	  "Lotka-Volterra oscillator"
-	},
-*/
-
-	{ UT_HOSTED | UT_OSCILLATOR,  // NT_COLPITTS,
-	  4, 3,
-	  __CN_Params_OscillatorColpitts,
-	  __CN_ParamNames_OscillatorColpitts,
-	  __CN_ParamSyms_OscillatorColpitts,
-	  __CN_Vars_OscillatorColpitts,
-	  __CN_VarNames_OscillatorColpitts,
-	  __CN_VarSyms_OscillatorColpitts,
-	  "Colpitts",
-	  "Colpitts",
-	  "Colpitts oscillator"
-	},
-
-	{ UT_HOSTED | UT_OSCILLATOR,  // NT_VDPOL,
-	  2, 2,
-	  __CN_Params_OscillatorVdPol,
-	  __CN_ParamNames_OscillatorVdPol,
-	  __CN_ParamSyms_OscillatorVdPol,
-	  __CN_Vars_OscillatorVdPol,
-	  __CN_VarNames_OscillatorVdPol,
-	  __CN_VarSyms_OscillatorVdPol,
-	  "VdPol",
-	  "VdPol",
-	  "Van der Pol oscillator"
-	},
-
-	{ UT_OSCILLATOR | UT_DOT,  // NT_DOTPULSE
-	  3, 2,
-	  __CN_Params_NeuronDotPulse,
-	  __CN_ParamNames_NeuronDotPulse,
-	  __CN_ParamSyms_NeuronDotPulse,
-	  __CN_Vars_NeuronDotPulse,
-	  __CN_VarNames_NeuronDotPulse,
-	  __CN_VarSyms_NeuronDotPulse,
-	  "DotPulse",
-	  "DotPulse",
-	  "Dot Pulse generator"
-	},
-
-	{ UT_DDTSET,  // NT_MAP
-	  5, 1,
-	  __CN_Params_NeuronMap,
-	  __CN_ParamNames_NeuronMap,
-	  __CN_ParamSyms_NeuronMap,
-	  __CN_Vars_NeuronMap,
-	  __CN_VarNames_NeuronMap,
-	  __CN_VarSyms_NeuronMap,
-	  "NMap",
-	  "NMap",
-	  "Map neuron"
-	},
-
-// ---------------- Synapse types
-
-// a proper synapse (of eg AB type) will be selected based on whether
-// its source/target is rate-based or discrete
-
-	{ UT_HOSTED,  // YT_AB_DD
-	  5, 1,
-	  __CN_Params_SynapseAB_dd,
-	  __CN_ParamNames_SynapseAB_dd,
-	  __CN_ParamSyms_SynapseAB_dd,
-	  __CN_Vars_SynapseAB,
-	  __CN_VarNames_SynapseAB,
-	  __CN_VarSyms_SynapseAB,
-	  "AB",
-	  "AB_pp",
-	  "Alpha-Beta synapse (Destexhe, Mainen, Sejnowsky, 1994)"
-	},
-
-	{ UT_HOSTED | UT_TGTISRATE | UT__STUB,  // YT_AB_DR
-	  5, 1,
-	  NULL,	  NULL,	  NULL,
-	  NULL,	  NULL,	  NULL,
-	  "AB",
-	  "AB_pt",
-	  "Alpha-Beta synapse (phasic->tonic)"
-	},
-
-	{ UT_HOSTED | UT_SRCISRATE | UT__STUB,  // YT_AB_RD
-	  5, 1,
-	  NULL,	  NULL,	  NULL,
-	  NULL,	  NULL,	  NULL,
-	  "AB",
-	  "AB_tp",
-	  "Alpha-Beta synapse (tonic->phasic)"
-	},
-
-	{ UT_HOSTED | UT_RATEBASED,  // YT_AB_RR
-	  4, 1,
-	  __CN_Params_SynapseAB_rr,
-	  __CN_ParamNames_SynapseAB_rr,
-	  __CN_ParamSyms_SynapseAB_rr,
-	  __CN_Vars_SynapseAB,
-	  __CN_VarNames_SynapseAB,
-	  __CN_VarSyms_SynapseAB,
-	  "AB",
-	  "AB_tt",
-	  "Alpha-Beta synapse (tonic->tonic)"
-	},
-
-	{ UT_HOSTED | UT_MULTIPLEXING,  // YT_MXAB_DD, inheriting all parameters except alpha, and variables from YT_AB
-	  5, 1,
-	  __CN_Params_SynapseMxAB_dd,
-	  __CN_ParamNames_SynapseAB_dd,
-	  __CN_ParamSyms_SynapseAB_dd,
-	  __CN_Vars_SynapseAB,
-	  __CN_VarNames_SynapseAB,
-	  __CN_VarSyms_SynapseAB,
-	  "AB",
-	  "AB_Mx_pp",
-	  "Multiplexing Alpha-Beta synapse for use with durationless units as source (phasic->phasic)"
-	},
-
-	{ UT_HOSTED | UT_TGTISRATE | UT_MULTIPLEXING,  // YT_MXAB_DR
-	  5, 1,
-	  __CN_Params_SynapseMxAB_dr,
-	  __CN_ParamNames_SynapseAB_dr,
-	  __CN_ParamSyms_SynapseAB_dr,
-	  __CN_Vars_SynapseAB,
-	  __CN_VarNames_SynapseAB,
-	  __CN_VarSyms_SynapseAB,
-	  "AB",
-	  "AB_Mx_pt",
-	  "Multiplexing Alpha-Beta synapse for use with durationless units as source (phasic->tonic)"
-	},
-
-
-	{ UT_HOSTED,  // YT_ABMINS_DD
-	  5, 1,
-	  __CN_Params_SynapseABMinus_dd,
-	  __CN_ParamNames_SynapseAB_dd,
-	  __CN_ParamSyms_SynapseAB_dd,
-	  __CN_Vars_SynapseAB,
-	  __CN_VarNames_SynapseAB,
-	  __CN_VarSyms_SynapseAB,
-	  "ABMinus",
-	  "ABMinus_pp",
-	  "Alpha-Beta synapse w/out (1-S) term"
-	},
-
-	{ UT_HOSTED | UT_TGTISRATE | UT__STUB,  // YT_ABMINS_DR
-	  5, 1,
-	  NULL,	  NULL,	  NULL,
-	  __CN_Vars_SynapseAB,
-	  __CN_VarNames_SynapseAB,
-	  __CN_VarSyms_SynapseAB,
-	  "ABMinus",
-	  "ABMinus_pt",
-	  "Alpha-Beta synapse w/out (1-S) term (phasic->tonic)"
-	},
-
-	{ UT_HOSTED | UT_SRCISRATE | UT__STUB,  // YT_ABMINS_RD
-	  5, 1,
-	  NULL,	  NULL,	  NULL,
-	  __CN_Vars_SynapseAB,
-	  __CN_VarNames_SynapseAB,
-	  __CN_VarSyms_SynapseAB,
-	  "ABMinus",
-	  "ABMinus_tp",
-	  "Alpha-Beta synapse w/out (1-S) term (tonic->phasic)"
-	},
-
-	{ UT_HOSTED | UT_SRCISRATE | UT_TGTISRATE | UT__STUB,  // YT_ABMINS_RR
-	  5, 1,
-	  NULL,	  NULL,	  NULL,
-	  __CN_Vars_SynapseAB,
-	  __CN_VarNames_SynapseAB,
-	  __CN_VarSyms_SynapseAB,
-	  "ABMinus",
-	  "ABMinus_tt",
-	  "Alpha-Beta synapse w/out (1-S) term (tonic->tonic)"
-	},
-
-	{ UT_HOSTED | UT_MULTIPLEXING | UT__STUB,  // YT_MXABMINUS_DD
-	  5, 1,
-	  NULL,	  NULL,	  NULL,
-	  __CN_Vars_SynapseAB,
-	  __CN_VarNames_SynapseAB,
-	  __CN_VarSyms_SynapseAB,
-	  "ABMinus",
-	  "ABMinus_Mx_pp",
-	  "Multiplexing Alpha-Beta w/out (1-S) synapse for use with durationless units as source (phasic->phasic)"
-	},
-
-	{ UT_HOSTED | UT_TGTISRATE | UT_MULTIPLEXING | UT__STUB,  // YT_MXABMINUS_DR
-	  5, 1,
-	  NULL,	  NULL,	  NULL,
-	  __CN_Vars_SynapseAB,
-	  __CN_VarNames_SynapseAB,
-	  __CN_VarSyms_SynapseAB,
-	  "ABMinus",
-	  "ABMinus_Mx_pt",
-	  "Multiplexing Alpha-Beta w/out (1-S) synapse for use with durationless units as source (phasic->tonic)"
-	},
-
-
-	{ UT_HOSTED,  // YT_RALL_DD
-	  3, 2,
-	  __CN_Params_SynapseRall_dd,
-	  __CN_ParamNames_SynapseRall_dd,
-	  __CN_ParamSyms_SynapseRall_dd,
-	  __CN_Vars_SynapseRall,
-	  __CN_VarNames_SynapseRall,
-	  __CN_VarSyms_SynapseRall,
-	  "Rall",
-	  "Rall_pp",
-	  "Rall synapse (Rall, 1967)"
-	},
-
-	{ UT_HOSTED | UT_TGTISRATE | UT__STUB,  // YT_RALL_DR
-	  3, 2,
-	  NULL,	  NULL,	  NULL,
-	  __CN_Vars_SynapseRall,
-	  __CN_VarNames_SynapseRall,
-	  __CN_VarSyms_SynapseRall,
-	  "Rall",
-	  "Rall_pt",
-	  "Rall synapse (Rall, 1967) (phasic->tonic)"
-	},
-
-	{ UT_HOSTED | UT_SRCISRATE | UT__STUB,  // YT_RALL_RD
-	  3, 2,
-	  NULL,	  NULL,	  NULL,
-	  __CN_Vars_SynapseRall,
-	  __CN_VarNames_SynapseRall,
-	  __CN_VarSyms_SynapseRall,
-	  "Rall",
-	  "Rall_tp",
-	  "Rall synapse (tonic->phasic)"
-	},
-
-	{ UT_HOSTED | UT_SRCISRATE | UT_TGTISRATE | UT__STUB,  // YT_RALL_RR
-	  3, 2,
-	  NULL,	  NULL,	  NULL,
-	  __CN_Vars_SynapseRall,
-	  __CN_VarNames_SynapseRall,
-	  __CN_VarSyms_SynapseRall,
-	  "Rall",
-	  "Rall_tt",
-	  "Rall synapse (tonic->tonic)"
-	},
-
-	{ UT_HOSTED | UT_MULTIPLEXING | UT__STUB,  // YT_MXRALL_DD
-	  3, 2,
-	  NULL,	  NULL,	  NULL,
-	  __CN_Vars_SynapseRall,
-	  __CN_VarNames_SynapseRall,
-	  __CN_VarSyms_SynapseRall,
-	  "Rall",
-	  "Rall_Mx_pp",
-	  "Rall synapse for use with durationless units as source (phasic->phasic)"
-	},
-
-	{ UT_HOSTED | UT_TGTISRATE | UT_MULTIPLEXING | UT__STUB,  // YT_MXRALL_DR
-	  3, 2,
-	  NULL,	  NULL,	  NULL,
-	  __CN_Vars_SynapseRall,
-	  __CN_VarNames_SynapseRall,
-	  __CN_VarSyms_SynapseRall,
-	  "Rall",
-	  "Rall_Mx_pt",
-	  "Rall synapse for use with durationless units as source (phasic->tonic)"
-	},
-
-
-	{ UT_DDTSET,  // YT_MAP
-	  3, 1,
-	  __CN_Params_SynapseMap,
-	  __CN_ParamNames_SynapseMap,
-	  __CN_ParamSyms_SynapseMap,
-	  __CN_Vars_SynapseAB,
-	  __CN_VarNames_SynapseAB,
-	  __CN_VarSyms_SynapseAB,
-	  "Map",
-	  "Map",
-	  "Map synapse"
-	},
-
-	{ UT_DDTSET | UT_MULTIPLEXING,  // YT_MXMAP
-	  3, 1,
-	  __CN_Params_SynapseMap,
-	  __CN_ParamNames_SynapseMap,
-	  __CN_ParamSyms_SynapseMap,
-	  __CN_Vars_SynapseAB,
-	  __CN_VarNames_SynapseAB,
-	  __CN_VarSyms_SynapseAB,
-	  "Map",
-	  "Map_Mx",
-	  "Multiplexing Map synapse"
-	},
-};
-
-
-
-cnrun::TUnitType
-cnrun::
-unit_family_by_string( const char *id)
-{
-	for ( int i = NT_FIRST; i <= YT_LAST; i++ )
-		if ( strcmp( id, __CNUDT[i].family ) == 0 )
-			return (TUnitType)i;
-	return NT_VOID;
-}
-
-cnrun::TUnitType
-cnrun::
-unit_species_by_string( const char *id)
-{
-	for ( int i = NT_FIRST; i <= YT_LAST; i++ )
-		if ( strcmp( id, __CNUDT[i].species ) == 0 )
-			return (TUnitType)i;
-	return NT_VOID;
-}
-
-
-
-
-void
-cnrun::
-cnmodel_dump_available_units()
-{
-	size_t u, p;
-	cout << "\n===== Neurons:\n";
-	for ( u = NT_FIRST; u <= NT_LAST; u++ ) {
-		SCNDescriptor &U = __CNUDT[u];
-		if ( U.traits & UT__STUB )
-			continue;
-		printf( "--- [%s]: %s\nParameters:\n",
-			U.species, U.description);
-		for ( p = 0; p < U.pno; p++ ) {
-			printf( "%4zu: %-5s\t= %s  %s\n",
-				p, U.stock_param_syms[p],
-				cnrun::str::double_dot_aligned_s( U.stock_param_values[p], 4, 8).c_str(),
-				U.stock_param_names[p]);
-		}
-		printf( "Variables:\n");
-		for ( p = 0; p < U.vno; p++ ) {
-			printf( "%4zu: %-5s\t= %s  %s\n",
-				p, U.stock_var_syms[p],
-				cnrun::str::double_dot_aligned_s( U.stock_var_values[p], 4, 8).c_str(),
-				U.stock_var_names[p]);
-		}
-		cout << endl;
-	}
-	cout << "\n===== Synapses:\n";
-	for ( u = YT_FIRST; u <= YT_LAST; u++ ) {
-		SCNDescriptor &U = __CNUDT[u];
-		if ( U.traits & UT__STUB )
-			continue;
-		printf( "--- [%s]: %s\nParameters:\n",
-			U.species, U.description);
-		for ( p = 0; p < U.pno; p++ ) {
-			printf( "%4zu: %-5s\t= %s  %s\n",
-				p, U.stock_param_syms[p],
-				cnrun::str::double_dot_aligned_s( U.stock_param_values[p], 4, 8).c_str(),
-				U.stock_param_names[p]);
-		}
-		cout << "Variables:\n";
-		for ( p = 0; p < U.vno; p++ ) {
-			printf( "%4zu: %-5s\t= %s  %s\n",
-				p, U.stock_var_syms[p],
-				cnrun::str::double_dot_aligned_s( U.stock_var_values[p], 4, 8).c_str(),
-				U.stock_var_names[p]);
-		}
-		cout << endl;
-	}
-	cout << endl;
-}
-
-
-// eof
diff --git a/upstream/src/libcn/types.hh b/upstream/src/libcn/types.hh
deleted file mode 100644
index dcbab3e..0000000
--- a/upstream/src/libcn/types.hh
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *         building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
- *
- * License: GPL-2+
- *
- * Initial version: 2008-08-02
- *
- * Enumerated type for unit ids, and a structure describing a unit type
- */
-
-
-//#define CN_WANT_MORE_NEURONS
-
-#ifndef LIBCN_TYPES_H
-#define LIBCN_TYPES_H
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-using namespace std;
-namespace cnrun {
-
-enum TUnitType {
-      // neuron types
-	NT_VOID = -1,
-
-	NT_HH_D,
-	NT_HH_R,
-	NT_HH2_D,
-	NT_HH2_R,
-//#ifdef CN_WANT_MORE_NEURONS
-	NT_EC_D,
-	NT_ECA_D,
-//#endif
-	NT_DOTPOISSON,
-	NT_POISSON,
-//#ifdef CN_WANT_MORE_NEURONS
-//	NT_LV,
-	NT_COLPITTS,
-	NT_VDPOL,
-//#endif
-	NT_DOTPULSE,
-	NT_MAP,
-
-      // synapse types
-	YT_AB_DD,
-	YT_AB_DR,
-	YT_AB_RD,
-	YT_AB_RR,
-	YT_MXAB_DD,
-	YT_MXAB_DR,
-
-	YT_ABMINUS_DD,
-	YT_ABMINUS_DR,
-	YT_ABMINUS_RD,
-	YT_ABMINUS_RR,
-	YT_MXABMINUS_DD,
-	YT_MXABMINUS_DR,
-
-	YT_RALL_DD,
-	YT_RALL_DR,
-	YT_RALL_RD,
-	YT_RALL_RR,
-	YT_MXRALL_DD,
-	YT_MXRALL_DR,
-
-	YT_MAP,
-	YT_MXMAP,
-};
-
-#define NT_FIRST NT_HH_D
-#define NT_LAST  NT_MAP
-#define YT_FIRST YT_AB_DD
-#define YT_LAST  YT_MXMAP
-
-
-
-// traits, used to ensure units being connected are compatible
-#define UT_HOSTED	(1 << 0)
-#define UT_DDTSET	(1 << 1)
-#define UT_OSCILLATOR	(1 << 2)
-#define UT_RATEBASED	(1 << 3)
-#define UT_SRCISRATE	UT_RATEBASED
-#define UT_TGTISRATE	(1 << 4)
-#define UT_DOT		(1 << 5)
-#define UT_MULTIPLEXING	 UT_DOT
-#define UT__STUB	(1 << 15)
-
-struct SCNDescriptor {
-	int	traits;
-	unsigned short
-		pno, vno;
-	const double	*const  stock_param_values;
-	const char	*const *stock_param_names;
-	const char	*const *stock_param_syms;
-	const double	*const  stock_var_values;
-	const char	*const *stock_var_names;
-	const char	*const *stock_var_syms;
-	const char	*family, *species;
-	const char	*description;
-};
-
-TUnitType unit_family_by_string( const char*) __attribute__ ((pure));
-TUnitType unit_species_by_string( const char*) __attribute__ ((pure));
-
-inline bool
-unit_species_is_valid( const char *id)
-{
-	return unit_species_by_string(id) != NT_VOID;
-}
-inline bool
-unit_species_is_neuron( const char *id)
-{
-	TUnitType t = unit_species_by_string(id);
-	return t >= NT_FIRST && t <= NT_LAST;
-}
-
-inline bool
-unit_species_is_synapse( const char *id)
-{
-	TUnitType t = unit_species_by_string(id);
-	return t >= YT_FIRST && t <= YT_LAST;
-}
-
-inline bool
-unit_family_is_neuron( const char *id)
-{
-	TUnitType t = unit_family_by_string(id);
-	return t >= NT_FIRST && t <= NT_LAST;
-}
-
-inline bool
-unit_family_is_synapse( const char *id)
-{
-	TUnitType t = unit_family_by_string(id);
-	return t >= YT_FIRST && t <= YT_LAST;
-}
-
-extern SCNDescriptor __CNUDT[];
-
-void cnmodel_dump_available_units();
-
-
-
-extern const double __CN_Params_NeuronHH_d[];
-extern const char* const __CN_ParamNames_NeuronHH_d[];
-extern const char* const __CN_ParamSyms_NeuronHH_d[];
-extern const double __CN_Vars_NeuronHH_d[];
-extern const char* const __CN_VarNames_NeuronHH_d[];
-extern const char* const __CN_VarSyms_NeuronHH_d[];
-
-extern const double __CN_Params_NeuronHH2_d[];
-extern const char* const __CN_ParamNames_NeuronHH2_d[];
-extern const char* const __CN_ParamSyms_NeuronHH2_d[];
-extern const double __CN_Vars_NeuronHH2_d[];
-
-
-extern const double __CN_Params_NeuronHH_r[];
-extern const char* const __CN_ParamNames_NeuronHH_r[];
-extern const char* const __CN_ParamSyms_NeuronHH_r[];
-extern const double __CN_Vars_NeuronHH_r[];
-extern const char* const __CN_VarNames_NeuronHH_r[];
-extern const char* const __CN_VarSyms_NeuronHH_r[];
-
-
-extern const double __CN_Params_NeuronDotPulse[];
-extern const char* const __CN_ParamNames_NeuronDotPulse[];
-extern const char* const __CN_ParamSyms_NeuronDotPulse[];
-extern const double __CN_Vars_NeuronDotPulse[];
-extern const char* const __CN_VarNames_NeuronDotPulse[];
-extern const char* const __CN_VarSyms_NeuronDotPulse[];
-
-
-//#ifdef CN_WANT_MORE_NEURONS
-extern const double __CN_Params_NeuronEC_d[];
-extern const char* const __CN_ParamNames_NeuronEC_d[];
-extern const char* const __CN_ParamSyms_NeuronEC_d[];
-extern const double __CN_Vars_NeuronEC_d[];
-extern const char* const __CN_VarNames_NeuronEC_d[];
-extern const char* const __CN_VarSyms_NeuronEC_d[];
-
-
-extern const double __CN_Params_NeuronECA_d[];
-extern const char* const __CN_ParamNames_NeuronECA_d[];
-extern const char* const __CN_ParamSyms_NeuronECA_d[];
-extern const double __CN_Vars_NeuronECA_d[];
-extern const char* const __CN_VarNames_NeuronECA_d[];
-extern const char* const __CN_VarSyms_NeuronECA_d[];
-//#endif
-
-extern const double __CN_Params_NeuronMap[];
-extern const char* const __CN_ParamNames_NeuronMap[];
-extern const char* const __CN_ParamSyms_NeuronMap[];
-extern const double __CN_Vars_NeuronMap[];
-extern const char* const __CN_VarNames_NeuronMap[];
-extern const char* const __CN_VarSyms_NeuronMap[];
-
-
-extern const double __CN_Params_OscillatorPoissonDot[];
-extern const char* const __CN_ParamNames_OscillatorPoissonDot[];
-extern const char* const __CN_ParamSyms_OscillatorPoissonDot[];
-extern const double __CN_Vars_OscillatorPoissonDot[];
-extern const char* const __CN_VarNames_OscillatorPoissonDot[];
-extern const char* const __CN_VarSyms_OscillatorPoissonDot[];
-
-extern const double __CN_Params_OscillatorPoisson[];
-extern const char* const __CN_ParamNames_OscillatorPoisson[];
-extern const char* const __CN_ParamSyms_OscillatorPoisson[];
-extern const double __CN_Vars_OscillatorPoisson[];
-extern const char* const __CN_VarNames_OscillatorPoisson[];
-extern const char* const __CN_VarSyms_OscillatorPoisson[];
-
-
-/*
-extern const double __CN_Params_OscillatorLV[];
-extern const char* const __CN_ParamNames_OscillatorLV[];
-extern const char* const __CN_ParamSyms_OscillatorLV[];
-extern const double __CN_Vars_OscillatorLV[];
-extern const char* const __CN_VarNames_OscillatorLV[];
-extern const char* const __CN_VarSyms_OscillatorLV[];
-*/
-
-extern const double __CN_Params_OscillatorColpitts[];
-extern const char* const __CN_ParamNames_OscillatorColpitts[];
-extern const char* const __CN_ParamSyms_OscillatorColpitts[];
-extern const double __CN_Vars_OscillatorColpitts[];
-extern const char* const __CN_VarNames_OscillatorColpitts[];
-extern const char* const __CN_VarSyms_OscillatorColpitts[];
-
-
-extern const double __CN_Params_OscillatorVdPol[];
-extern const char* const __CN_ParamNames_OscillatorVdPol[];
-extern const char* const __CN_ParamSyms_OscillatorVdPol[];
-extern const double __CN_Vars_OscillatorVdPol[];
-extern const char* const __CN_VarNames_OscillatorVdPol[];
-extern const char* const __CN_VarSyms_OscillatorVdPol[];
-//#endif
-
-
-
-extern const double __CN_Params_SynapseAB_dd[];
-extern const char* const __CN_ParamNames_SynapseAB_dd[];
-extern const char* const __CN_ParamSyms_SynapseAB_dd[];
-extern const double __CN_Vars_SynapseAB[];
-extern const char* const __CN_VarNames_SynapseAB[];
-extern const char* const __CN_VarSyms_SynapseAB[];
-
-extern const double __CN_Params_SynapseABMinus_dd[];
-
-extern const double __CN_Params_SynapseMxAB_dd[];
-
-extern const char* const __CN_ParamNames_SynapseAB_dr[];
-extern const char* const __CN_ParamSyms_SynapseAB_dr[];
-
-extern const double __CN_Params_SynapseMxAB_dr[];
-
-extern const double __CN_Params_SynapseAB_rr[];
-extern const char* const __CN_ParamNames_SynapseAB_rr[];
-extern const char* const __CN_ParamSyms_SynapseAB_rr[];
-
-
-extern const double __CN_Params_SynapseRall_dd[];
-extern const char* const __CN_ParamNames_SynapseRall_dd[];
-extern const char* const __CN_ParamSyms_SynapseRall_dd[];
-extern const double __CN_Vars_SynapseRall[];
-extern const char* const __CN_VarNames_SynapseRall[];
-extern const char* const __CN_VarSyms_SynapseRall[];
-
-
-extern const double __CN_Params_SynapseMap[];
-extern const char* const __CN_ParamNames_SynapseMap[];
-extern const char* const __CN_ParamSyms_SynapseMap[];
-
-}
-#endif
-
-// EOF
diff --git a/upstream/src/libcnrun/Makefile.am b/upstream/src/libcnrun/Makefile.am
new file mode 100644
index 0000000..d4a01bb
--- /dev/null
+++ b/upstream/src/libcnrun/Makefile.am
@@ -0,0 +1,46 @@
+include $(top_srcdir)/src/Common.mk
+AM_CXXFLAGS += -shared -fPIC
+
+lib_LTLIBRARIES = \
+	libcnrun.la
+
+libcnrun_la_SOURCES = \
+	forward-decls.hh \
+	sources.cc \
+	types.cc \
+	base-unit.cc \
+	standalone-neurons.cc \
+	standalone-synapses.cc \
+	hosted-neurons.cc \
+	hosted-synapses.cc \
+	model-struct.cc \
+	model-tags.cc \
+	model-cycle.cc \
+	model-nmlio.cc \
+	sources.hh \
+	types.hh \
+	mx-attr.hh \
+	base-unit.hh	standalone-attr.hh    	hosted-attr.hh \
+	base-synapse.hh	standalone-neurons.hh 	hosted-neurons.hh  \
+	base-neuron.hh	standalone-synapses.hh	hosted-synapses.hh \
+	model.hh \
+	integrate-base.hh integrate-rk65.hh
+
+libcnrun_la_LIBADD = \
+	../libstilton/liba.a
+
+libcnrun_la_LDFLAGS = \
+	-shared -version-info $(subst .,:,$(PACKAGE_VERSION))
+
+libcnrunincdir = $(includedir)/libcnrun
+
+libcnruninc_HEADERS = \
+	forward-decls.hh \
+	sources.hh \
+	types.hh \
+	mx-attr.hh \
+	base-unit.hh	standalone-attr.hh    	hosted-attr.hh \
+	base-synapse.hh	standalone-neurons.hh 	hosted-neurons.hh  \
+	base-neuron.hh	standalone-synapses.hh	hosted-synapses.hh \
+	model.hh \
+	integrate-base.hh integrate-rk65.hh
diff --git a/upstream/src/libcnrun/base-neuron.hh b/upstream/src/libcnrun/base-neuron.hh
new file mode 100644
index 0000000..e1ff426
--- /dev/null
+++ b/upstream/src/libcnrun/base-neuron.hh
@@ -0,0 +1,298 @@
+/*
+ *       File name:  libcn/base-neuron.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ * Initial version:  2009-03-31
+ *
+ *         Purpose:  neuron base class
+ *
+ *         License:  GPL-2+
+ */
+
+#ifndef CNRUN_LIBCN_BASENEURON_H_
+#define CNRUN_LIBCN_BASENEURON_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <list>
+#include <cstring>
+#include <cmath>
+#include <map>
+#include <tuple>
+
+#include "forward-decls.hh"
+#include "base-unit.hh"
+#include "base-synapse.hh"
+
+
+using namespace std;
+
+namespace cnrun {
+
+struct SSpikeloggerService;
+
+using SCleft = map<C_BaseSynapse*, double>;
+inline double operator+ ( double a, const pair<C_BaseSynapse*, double>& b) { return a + b.second; }
+
+class C_BaseNeuron
+  : public C_BaseUnit {
+
+        DELETE_DEFAULT_METHODS (C_BaseNeuron)
+
+        friend class CModel;
+        friend class C_BaseSynapse;
+
+    protected:
+        C_BaseNeuron (TUnitType intype, const string& inlabel,
+                      double inx, double iny, double inz,
+                      CModel* inM, int s_mask = 0)
+              : C_BaseUnit (intype, inlabel, inM, s_mask),
+                pos (inx, iny, inz),
+                _spikelogger_agent (nullptr)
+                {}
+
+        virtual ~C_BaseNeuron();
+
+        struct SCoord {
+
+                DELETE_DEFAULT_METHODS (SCoord)
+
+                double x, y, z;
+
+                SCoord( double inx, double iny, double inz)
+                      : x (inx), y (iny), z (inz)
+                        {}
+
+                SCoord& operator= ( tuple<double, double, double> v)
+                        {
+                                tie(x, y, z) = v;
+                                return *this;
+                        }
+
+              // distance
+                double operator- ( const SCoord &p) const
+                        {
+                                return sqrt( pow(x - p.x, 2) + pow(y - p.y, 2) + pow(z - p.z, 2));
+                        }
+                bool too_close( const SCoord& p, double mindist = .42 /* units? */) const
+                        {
+                                return operator-(p) < mindist;
+                        }
+        };
+
+    public:
+        SCoord  pos;
+
+        size_t axonal_conns() const     { return _axonal_harbour.size(); }
+        size_t dendrites() const        { return _dendrites.size(); }
+
+        bool
+        connects_to( const C_BaseNeuron &to) const;
+
+        C_BaseSynapse*
+        connects_via( const C_BaseNeuron &to,
+                      SCleft::mapped_type *g_ptr = nullptr) const;
+
+        void reset_state();
+
+      // even though for rate-based neurons, E is not meaningful
+      // leave these here to make the method available to synapses wanting _target-E
+        virtual double E() const
+                {  return 0;  }
+        virtual double E( vector<double>&) const
+                {  return 0;  }
+      // likewise, for those needing _source->F
+        virtual double F() const
+                {  return 0;  }
+        virtual double F( vector<double>&) const
+                {  return 0;  }
+
+        // struct __SCleft_second_plus {
+        //         double operator() ( double a, const SCleft::value_type &i) { return a + i.second; }
+        // };
+        double Isyn() const  // is the sum of Isyn() on all dendrites
+                {
+                        double I = 0.;
+                        for ( auto &Y : _dendrites )
+                                I += Y.first->Isyn(*this, Y.second);
+                        return I;
+                }
+
+        double Isyn( vector<double> &x) const  // an honourable mention
+                {
+                        double I = 0.;
+                        for ( auto &Y : _dendrites )
+                                I += Y.first->Isyn(x, *this, Y.second);
+                        return I;
+                }
+
+        virtual void possibly_fire()
+                {}
+
+      // Even though rate-based neurons do not track individual spikes,
+      // we can estimate a probability of such a neuron spiking as F*dt*rand().
+      // Which makes this method a valid one
+
+      // Note this assumes P[0] is F for all rate-based neurons, and E
+      // for those conductance-based, which by now is hard-coded for all neurons.
+        virtual size_t n_spikes_in_last_dt() const
+                {  return 0;  }
+        virtual void do_detect_spike_or_whatever()
+                {}
+
+        SSpikeloggerService* spikelogger_agent()  { return _spikelogger_agent;  }
+        SSpikeloggerService*
+        enable_spikelogging_service( int s_mask = 0);
+        SSpikeloggerService*
+        enable_spikelogging_service( double sample_period, double sigma, double from = 0.,
+                                     int s_mask = 0);
+        void disable_spikelogging_service();
+        void sync_spikelogging_history() const;
+
+        double distance_to( C_BaseNeuron*) const; // will do on demand
+
+        void dump( bool with_params = false, FILE *strm = stdout) const;
+
+    protected:
+        SCleft  _dendrites;
+        list<C_BaseSynapse*>
+                _axonal_harbour;
+
+        SSpikeloggerService
+               *_spikelogger_agent;
+};
+
+
+
+
+
+#define CN_KL_COMPUTESDF        (1 << 0)
+#define CN_KL_ISSPIKINGNOW      (1 << 1)
+#define CN_KL_PERSIST           (1 << 2)  // should not be deleted at disable_spikelogging_service
+#define CN_KL_IDLE              (1 << 3)  // should not be placed on spikelogging_neurons on enable_spikelogging_service
+
+
+struct SSpikeloggerService {
+
+        DELETE_DEFAULT_METHODS (SSpikeloggerService)
+
+        friend class C_BaseNeuron;
+        friend class C_HostedConductanceBasedNeuron;  // accesses _status from do_spikelogging_or_whatever
+        friend class COscillatorDotPoisson;  // same
+        friend class COscillatorPoisson;  // same
+        friend class CModel;  // checks CN_KL_IDLE in include_unit
+
+    public:
+        SSpikeloggerService (C_BaseNeuron *client,
+                             int s_mask = 0)
+              : _client (client),
+                t_last_spike_start (-INFINITY), t_last_spike_end (-INFINITY),
+                sample_period (42), sigma (42), start_delay (0.),
+                _status (s_mask & ~CN_KL_COMPUTESDF)
+                {}
+        SSpikeloggerService (C_BaseNeuron *client,
+                             double insample_period, double insigma, double instart_delay = 0.,
+                             int s_mask = 0)
+              : _client (client),
+                t_last_spike_start (-INFINITY), t_last_spike_end (-INFINITY),
+                sample_period (insample_period), sigma (insigma), start_delay (instart_delay),
+                _status (s_mask | CN_KL_COMPUTESDF)
+                {}
+
+        C_BaseNeuron *_client;
+
+        double  t_last_spike_start,
+                t_last_spike_end;
+
+        double  sample_period,
+                sigma,
+                start_delay;
+
+//        void spike_detect();  // multiplexing units will have a different version
+        // replaced by do_spikelogging_or_whatever on the client side
+
+        vector<double> spike_history;
+
+        void reset()
+                {
+                        _status &= ~CN_KL_ISSPIKINGNOW;
+                        t_last_spike_start = t_last_spike_end
+                                /*= t_firing_started = t_firing_ended */ = -INFINITY;
+                        spike_history.clear();
+                }
+
+        size_t n_spikes_since( double since = 0.) const;
+
+      // spike density function
+        double sdf( double at, double sample_length, double sigma,
+                    size_t* nspikes = nullptr) const;
+      // spike homogeneity function
+        double shf( double at, double sample_length) const;
+
+      // why not allow custom sampling?
+        size_t get_sxf_vector_custom( vector<double> *sdf_buf, vector<double> *shf_buf, vector<size_t> *nsp_buf,
+                               double sample_period_custom, double sigma_custom,
+                               double from = 0., double to = 0.) const; // "to == 0." for model_time()
+        size_t get_sxf_vector( vector<double> *sdf_buf, vector<double> *shf_buf, vector<size_t> *nsp_buf,
+                               double from = 0., double to = 0.) const
+                {
+                        return get_sxf_vector_custom( sdf_buf, shf_buf, nsp_buf,
+                                                      sample_period, sigma,
+                                                      from, to);
+                }
+
+    protected:
+        void sync_history() const;
+
+    private:
+        int _status;
+};
+
+
+
+
+inline void
+C_BaseNeuron::reset_state()
+{
+        C_BaseUnit::reset_state();
+        if ( _spikelogger_agent )
+                _spikelogger_agent->reset();
+}
+
+
+
+inline void
+C_BaseNeuron::sync_spikelogging_history() const
+{
+        if ( _spikelogger_agent )
+                _spikelogger_agent->sync_history();
+}
+
+
+
+inline double
+C_BaseSynapse::g_on_target( C_BaseNeuron &neuron) const
+{
+        return neuron._dendrites.at(
+                const_cast<C_BaseSynapse*>(this));
+}
+inline void
+C_BaseSynapse::set_g_on_target( C_BaseNeuron &neuron, double g)
+{
+        neuron._dendrites[this] = g;
+}
+
+
+}
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/base-synapse.hh b/upstream/src/libcnrun/base-synapse.hh
new file mode 100644
index 0000000..864148e
--- /dev/null
+++ b/upstream/src/libcnrun/base-synapse.hh
@@ -0,0 +1,97 @@
+/*
+ *       File name:  libcn/base-synapse.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ * Initial version:  2009-03-31
+ *
+ *         Purpose:  synapse base class
+ *
+ *         License:  GPL-2+
+ */
+
+#ifndef CNRUN_LIBCN_BASESYNAPSE_H_
+#define CNRUN_LIBCN_BASESYNAPSE_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <cmath>
+#include <vector>
+#include <list>
+#include <map>
+
+#include "libstilton/lang.hh"
+#include "libstilton/containers.hh"
+#include "forward-decls.hh"
+#include "base-unit.hh"
+
+
+using namespace std;
+
+namespace cnrun {
+
+class C_BaseSynapse
+  : public C_BaseUnit {
+
+        DELETE_DEFAULT_METHODS (C_BaseSynapse)
+
+        friend class CModel;
+        friend class C_BaseNeuron;
+
+    protected:
+        C_BaseSynapse( TUnitType intype,
+                       C_BaseNeuron *insource, C_BaseNeuron *intarget,
+                       double ing, CModel *inM, int s_mask = 0);
+        virtual ~C_BaseSynapse();
+
+    public:
+        bool has_target( const C_BaseNeuron& tgt) const __attribute__ ((pure))
+                {
+                        return cnrun::alg::member(
+                                const_cast<C_BaseNeuron*>(&tgt), _targets);
+                }
+        C_BaseNeuron* source()  {  return _source;  }
+
+        double g_on_target( C_BaseNeuron&) const;
+        void set_g_on_target( C_BaseNeuron&, double);
+
+        C_BaseSynapse *clone_to_target( C_BaseNeuron *nt, double g);
+        C_BaseSynapse *make_clone_independent( C_BaseNeuron *target);
+
+        void reset_state()
+                {
+                        C_BaseUnit::reset_state();
+                        t_last_release_started = -INFINITY;
+                }
+
+        virtual double Isyn( const C_BaseNeuron &with_neuron, double g) const = 0;
+        virtual double Isyn( vector<double> &base, const C_BaseNeuron &with_neuron, double g) const = 0;
+        // no gsyn known to the synapse: now C_BaseNeuron::SCleft knows it
+
+        void dump( bool with_params = false, FILE *strm = stdout) const;
+
+    protected:
+        C_BaseNeuron
+               *_source;
+        list<C_BaseNeuron*>
+                _targets;
+
+        double t_last_release_started;
+
+    private:
+        virtual void update_queue()
+                {}
+};
+
+}
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/base-unit.cc b/upstream/src/libcnrun/base-unit.cc
new file mode 100644
index 0000000..9984dfa
--- /dev/null
+++ b/upstream/src/libcnrun/base-unit.cc
@@ -0,0 +1,666 @@
+/*
+ *       File name:  libcn/base-unit.cc
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ * Initial version:  2008-08-02
+ *
+ *         Purpose:  unit base class
+ *
+ *         License:  GPL-2+
+ */
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <iostream>
+#include <limits>
+#include <functional>
+
+#include <gsl/gsl_statistics_double.h>
+
+#include "libstilton/containers.hh"
+#include "base-unit.hh"
+#include "model.hh"
+
+
+using namespace std;
+using namespace cnrun;
+using cnrun::alg::member;
+
+unsigned short cnrun::global::precision = 4;
+int cnrun::global::verbosely = 1;
+
+cnrun::C_BaseUnit::
+C_BaseUnit (TUnitType type_, const string& label_,
+            CModel* M_, int s_mask)
+      : precision (global::precision),
+        _type (type_), _status (0 |/* CN_UENABLED |*/ s_mask),
+        M (M_),
+        _binwrite_handle (-1), _listener_disk (nullptr), _listener_mem (nullptr)
+{
+        memset( _label, 0, max_label_size);
+        if ( label_.size() )
+                strncpy( _label, label_.c_str(), max_label_size);
+        else
+                snprintf( _label, max_label_size-1, "fafa%p", this);
+
+        if ( M_ && M_->unit_by_label( label_) ) {
+                fprintf( stderr, "Model %s already has a unit labelled \"%s\"\n", M_->name.c_str(), label_.c_str());
+                throw "Duplicate unit label";
+        }
+
+        reset_params();
+        // don't have field idx to do reset_vars() safely
+}
+
+
+
+void
+cnrun::C_BaseUnit::
+reset_state()
+{
+        if ( M )
+                M->vp( 3, stderr, "Resetting \"%s\"\n", _label);
+        reset_vars();
+        if ( is_listening() )
+                restart_listening();
+}
+
+
+int
+cnrun::C_BaseUnit::
+param_idx_by_sym( const string& sym) const
+{
+        for ( size_t i = 0; i < p_no(); ++i )
+                if ( sym == __CNUDT[_type].stock_param_syms[i] )
+                        return i;
+        return -1;
+}
+
+int
+cnrun::C_BaseUnit::
+var_idx_by_sym( const string& sym) const
+{
+        for ( size_t i = 0; i < v_no(); ++i )
+                if ( sym == __CNUDT[_type].stock_var_syms[i] )
+                        return i;
+        return -1;
+}
+
+
+
+
+
+
+
+
+void
+cnrun::C_BaseUnit::
+start_listening( int mask)
+{
+        if ( !M ) {
+                fprintf( stderr, "start_listening() called for an unattached unit \"%s\"\n", _label);
+                return;
+        }
+        if ( _listener_disk || _listener_mem || _binwrite_handle != -1 ) { // listening already; check if user wants us to listen differently
+                if ( (_status | (mask & (CN_ULISTENING_DISK | CN_ULISTENING_MEM | CN_ULISTENING_BINARY | CN_ULISTENING_1VARONLY | CN_ULISTENING_DEFERWRITE)))
+                     != mask ) {
+                        stop_listening();  // this will nullptrify _listener_{mem,disk}, avoiding recursion
+                        start_listening( mask);
+                        M->vp( 4, stderr, "Unit \"%s\" was already listening\n", _label);
+                        return;
+                }
+        }
+
+      // deferred write implies a mem listener
+        if ( mask & CN_ULISTENING_DEFERWRITE && !(mask & CN_ULISTENING_MEM) )
+                mask |= CN_ULISTENING_MEM;
+
+        if ( mask & CN_ULISTENING_MEM )
+                _listener_mem = new vector<double>;
+
+        if ( mask & CN_ULISTENING_DISK ) {
+                if ( M->is_diskless )
+                        M->vp( 1, stderr, "Cannot get Unit \"%s\" to listen to disk in a diskless model\n", _label);
+                else {
+                        _listener_disk = new ofstream( (string(_label)+".var").c_str(), ios_base::trunc);
+                        _listener_disk->precision( precision);
+
+                        *_listener_disk << "# " << _label << " variables\n#<time>";
+                        if ( mask & CN_ULISTENING_1VARONLY )
+                                *_listener_disk << "\t<" << var_sym(0) << ">";
+                        else
+                                for ( size_t v = 0; v < v_no(); ++v )
+                                        *_listener_disk << "\t<" << var_sym(v) << ">";
+                        *_listener_disk << endl;
+                        M->vp( 4, stderr, "Unit \"%s\" now listening\n", _label);
+                }
+        }
+
+        if ( mask & CN_ULISTENING_BINARY )
+                _binwrite_handle = open( (string(_label)+".varx").c_str(), O_WRONLY|O_CREAT|O_TRUNC, S_IRUSR | S_IWUSR);
+
+        _status |= (mask & (CN_ULISTENING_DISK | CN_ULISTENING_MEM | CN_ULISTENING_BINARY |
+                            CN_ULISTENING_1VARONLY | CN_ULISTENING_DEFERWRITE));
+
+      // inform the model
+        M->register_listener( this);
+}
+
+
+void
+cnrun::C_BaseUnit::
+stop_listening()
+{
+      // do deferred write
+        if ( _status & CN_ULISTENING_DEFERWRITE && _listener_mem ) {
+                if ( _listener_disk ) {
+                        for ( auto mI = _listener_mem->begin(); mI != _listener_mem->end(); ) {
+                                *_listener_disk << *(mI++);
+                                if ( _status & CN_ULISTENING_1VARONLY )
+                                        *_listener_disk << "\t" << *(mI++);
+                                else
+                                        for ( size_t v = 0; v < v_no(); ++v )
+                                                *_listener_disk << "\t" << *(mI++);
+                                *_listener_disk << endl;
+                        }
+                }
+                if ( _binwrite_handle != -1 )
+                        if ( write( _binwrite_handle, _listener_mem->data(),
+                                    sizeof(double) * _listener_mem->size()) < 1 )
+                                M->vp( 0, stderr, "write() failed on \"%s.varx\"\n", _label);
+        }
+
+        if ( _listener_mem ) {
+                delete _listener_mem;
+                _listener_mem = nullptr;
+        }
+
+        if ( _listener_disk ) {
+                _listener_disk->close();
+                delete _listener_disk;
+                _listener_disk = nullptr;
+        }
+
+        if ( _binwrite_handle != -1 ) {
+                close( _binwrite_handle);
+                _binwrite_handle = -1;
+        }
+
+        _status &= ~(CN_ULISTENING_MEM | CN_ULISTENING_DISK | CN_ULISTENING_BINARY);
+
+        if ( M ) {
+                M->unregister_listener( this);
+                M->vp( 4, stderr, "Unit \"%s\" not listening now\n", _label);
+        }
+
+}
+
+
+
+
+void
+cnrun::C_BaseUnit::
+tell()
+{
+        if ( _binwrite_handle != -1 && !(_status & CN_ULISTENING_DEFERWRITE) ) {
+                if ( write( _binwrite_handle, &M->V[0], sizeof(double)) < 1 ||
+                     write( _binwrite_handle, &var_value(0),
+                            sizeof(double) * ((_status & CN_ULISTENING_1VARONLY) ? 1 : v_no())) < 1 )
+                        M->vp( 0, stderr, "write() failed in tell() for \"%s\"\n", _label);
+        }
+
+        if ( _listener_disk && !(_status & CN_ULISTENING_DEFERWRITE) ) {
+                *_listener_disk << model_time();
+                if ( _status & CN_ULISTENING_1VARONLY )
+                        *_listener_disk << "\t" << var_value(0);
+                else
+                        for ( size_t v = 0; v < v_no(); ++v )
+                                *_listener_disk << "\t" << var_value(v);
+                *_listener_disk << endl;
+        }
+
+        if ( _listener_mem ) {
+//                _listener_mem->push_back( 999);
+                _listener_mem->push_back( model_time());
+                if ( _status & CN_ULISTENING_1VARONLY )
+                        _listener_mem->push_back( var_value(0));
+                else
+                        for ( size_t v = 0; v < v_no(); ++v )
+                                _listener_mem->push_back( var_value(v));
+        }
+}
+
+
+
+
+
+
+void
+cnrun::C_BaseUnit::
+dump( bool with_params, FILE *strm) const
+{
+        fprintf( strm, "[%lu] (%s) \"%s\"\n", _serial_id, species(), _label);
+
+        if ( with_params ) {
+                fprintf( strm, "    Pp: ");
+                for ( size_t p = 0; p < p_no(); ++p )
+                        if ( *param_sym(p) != '.' || M->options.verbosely > 5 )
+                                fprintf( strm, "%s = %g; ", param_sym(p), get_param_value(p));
+                fprintf( strm, "\n");
+        }
+        fprintf( strm, "    Vv: ");
+        for ( size_t v = 0; v < v_no(); ++v )
+                if ( *var_sym(v) != '.' || M->options.verbosely > 5 )
+                        fprintf( strm, "%s = %g; ", var_sym(v), get_var_value(v));
+        fprintf( strm, "\n");
+
+        if ( _sources.size() ) {
+                fprintf( strm, "   has sources:  ");
+                for ( auto &S : _sources )
+                        fprintf( strm, "%s << %s;  ",
+                                 (S.sink_type == SINK_PARAM) ? param_sym(S.idx) : var_sym(S.idx),
+                                 S.source->name());
+                fprintf( strm, "\n");
+        }
+
+        if ( is_listening() ) {
+                fprintf( strm, "   listening to %s%s%s\n",
+                         _listener_mem ? "mem" : "",
+                         _listener_mem && _listener_disk ? ", " : "",
+                         _listener_disk ? "disk" : "");
+        }
+}
+
+
+
+
+
+
+// source interface
+
+void
+cnrun::C_BaseUnit::
+detach_source( C_BaseSource *s, TSinkType sink_type, size_t idx)
+{
+        // list <SSourceInterface<C_BaseSource>>::iterator K;
+        // while ( (K = find( _sources.begin(), _sources.end(),
+        //                    )) != _sources.end() )
+        //         _sources.erase( K);
+        _sources.remove( SSourceInterface<C_BaseSource> (s, sink_type, idx));
+        M->unregister_unit_with_sources( this);
+}
+
+
+void
+cnrun::C_BaseUnit::
+apprise_from_sources()
+{
+        for ( auto &S : _sources )
+                switch ( S.sink_type ) {
+                case SINK_PARAM:
+//                        printf( "apprise_from_sources() for %s{%d} = %g\n", _label, S->idx, (*S->source)( model_time()));
+                        param_value( S.idx) = (*S.source)( model_time());
+                        param_changed_hook();
+                    break;
+                case SINK_VAR:
+                        var_value( S.idx) = (*S.source)( model_time());
+                    break;
+                }
+}
+
+
+cnrun::C_BaseUnit::
+~C_BaseUnit()
+{
+        if ( M )
+                M->vp( 5, "   deleting base unit \"%s\"\n", _label);
+
+        if ( is_listening() ) {
+                stop_listening();
+                if ( M && M->model_time() == 0. )
+                      // nothing has been written yet, delete the files on disk
+                        unlink( (string(_label) + ".var").c_str());
+        }
+        if ( M )
+                M->exclude_unit( this, CModel::TExcludeOption::no_delete);
+}
+
+
+
+
+
+
+// ----- C_BaseNeuron
+
+
+bool
+cnrun::C_BaseNeuron::
+connects_to( const C_BaseNeuron &to) const
+{
+        for ( auto &A : _axonal_harbour )
+                if ( A->has_target( to) )
+                        return true;
+        return false;
+}
+
+cnrun::C_BaseSynapse*
+cnrun::C_BaseNeuron::
+connects_via( const C_BaseNeuron &to,
+              SCleft::mapped_type *g_ptr) const
+{
+        for ( auto &A : _axonal_harbour )
+                if ( A->has_target( to) ) {
+                        if ( g_ptr )
+                                *g_ptr = to._dendrites.at(A);
+                        return A;
+                }
+        if ( g_ptr )
+                *g_ptr = NAN;
+        return nullptr;
+}
+
+
+void
+cnrun::C_BaseNeuron::
+dump( bool with_params, FILE *strm) const
+{
+        C_BaseUnit::dump( with_params);
+        if ( _spikelogger_agent && !(_spikelogger_agent->_status & CN_KL_IDLE) )
+                fprintf( strm, "   logging spikes at %g:%g\n", _spikelogger_agent->sample_period, _spikelogger_agent->sigma);
+        fprintf( strm, "\n");
+
+}
+
+
+cnrun::C_BaseNeuron::
+~C_BaseNeuron()
+{
+        if ( M )
+                M->vp( 4, "  deleting base neuron \"%s\"\n", _label);
+
+      // kill all efferents
+        for ( auto Y = _axonal_harbour.rbegin(); Y != _axonal_harbour.rend(); ++Y ) {
+                (*Y) -> _source = nullptr;
+                delete (*Y);
+        }
+      // unlink ourselves from all afferents
+        for ( auto Y = _dendrites.rbegin(); Y != _dendrites.rend(); ++Y )
+                Y->first->_targets.remove( this);
+
+        if ( _spikelogger_agent ) {
+                if ( M && !(_spikelogger_agent->_status & CN_KL_IDLE) )
+                        M->unregister_spikelogger( this);
+                delete _spikelogger_agent;
+                _spikelogger_agent = nullptr;
+        }
+}
+
+
+
+
+// --- SSpikeloggerService
+
+double
+cnrun::SSpikeloggerService::
+sdf( double at, double sample_width, double sigma, size_t *nspikes) const
+{
+        if ( nspikes )
+                *nspikes = 0;
+
+        double  dt,
+                result = 0.;
+        for ( auto &T : spike_history ) {
+                dt = T - at;
+                if ( dt < -sample_width/2. )
+                        continue;
+                if ( dt >  sample_width/2. )
+                        break;
+                if ( nspikes )
+                        ++(*nspikes);
+                result += exp( -dt*dt/(sigma * sigma));
+        }
+        return result;
+}
+
+
+double
+cnrun::SSpikeloggerService::
+shf( double at, double sample_width) const
+{
+        double  dt,
+                last_spike_at;
+        vector<double>
+                intervals;
+        bool    counted_one = false;
+        for ( auto &T : spike_history ) {
+                dt = T - at;
+                if ( dt < -sample_width/2. )
+                        continue;
+                if ( dt >  sample_width/2. )
+                        break;
+
+                if ( counted_one )
+                        intervals.push_back( last_spike_at - T);
+                else
+                        counted_one = true;
+
+                last_spike_at = T;
+        }
+
+        return (intervals.size() < 3)
+                ? 0
+                : gsl_stats_sd( intervals.data(), 1, intervals.size());
+}
+
+
+size_t
+cnrun::SSpikeloggerService::
+get_sxf_vector_custom( vector<double> *sdf_buffer, vector<double> *shf_buffer,
+                       vector<size_t> *nspikes_buffer,
+                       double sample_period_custom, double sigma_custom,
+                       double from, double to) const
+{
+        if ( to == 0. )
+                to = _client->M->model_time();
+
+        if ( sdf_buffer )
+                sdf_buffer->clear();
+        if ( shf_buffer )
+                shf_buffer->clear();
+        if ( nspikes_buffer )
+                nspikes_buffer->clear();
+
+        for ( double t = from; t <= to; t += sample_period_custom ) {
+                size_t  nspikes = 0;
+                double  sdf_value = sdf(
+                        t, sample_period_custom,
+                        sigma_custom, &nspikes);
+                if ( sdf_buffer )
+                        sdf_buffer->push_back( sdf_value);
+                if ( shf_buffer )
+                        shf_buffer->push_back( shf( t, sample_period_custom));
+                if ( nspikes_buffer )
+                        nspikes_buffer->push_back( nspikes);
+        }
+
+        return (to - from) / sample_period_custom;
+}
+
+
+void
+cnrun::SSpikeloggerService::
+sync_history() const
+{
+        if ( !_client->M || (_client->M && _client->M->is_diskless) )
+                return;
+
+        ofstream spikecnt_strm( (string(_client->_label) + ".spikes").c_str());
+        spikecnt_strm.precision( _client->precision);
+        spikecnt_strm << "#spike time\n";
+
+        for ( auto &V : spike_history )
+                spikecnt_strm << V << endl;
+
+        if ( _status & CN_KL_COMPUTESDF ) {
+                ofstream sdf_strm( (string(_client->_label) + ".sxf").c_str());
+                sdf_strm.precision( _client->precision);
+                sdf_strm << "#<time>\t<sdf>\t<shf>\t<nspikes>\n";
+
+                vector<double> sdf_vector, shf_vector;
+                vector<size_t> nspikes_vector;
+                get_sxf_vector( &sdf_vector, &shf_vector, &nspikes_vector,
+                                start_delay, 0);
+
+                double t = start_delay;
+                for ( size_t i = 0; i < sdf_vector.size(); ++i, t += sample_period )
+                        sdf_strm << t << "\t"
+                                 << sdf_vector[i] << "\t"
+                                 << shf_vector[i] << "\t"
+                                 << nspikes_vector[i] << endl;
+        }
+}
+
+
+size_t
+cnrun::SSpikeloggerService::
+n_spikes_since( double since) const
+{
+        size_t i = 0;
+        for ( auto &K : spike_history )
+                if ( K > since )
+                        return spike_history.size() - i++;
+        return 0;
+}
+
+
+
+// ----- CSynapse
+
+cnrun::C_BaseSynapse::
+C_BaseSynapse( TUnitType intype,
+               C_BaseNeuron *insource, C_BaseNeuron *intarget,
+               double ing, CModel *inM, int s_mask)
+      : C_BaseUnit (intype, "overwrite-me", inM, s_mask),
+        _source (insource),
+        t_last_release_started (-INFINITY)
+{
+        if ( M )
+                M->vp( 5, "Creating a \"%s\" base synapse\n", species());
+        _targets.push_back( intarget);
+        intarget->_dendrites[this] = ing;
+        _source->_axonal_harbour.push_back( this);
+        snprintf( _label, max_label_size-1, "%s:1", _source->_label);
+}
+
+
+
+
+
+
+cnrun::C_BaseSynapse*
+cnrun::C_BaseSynapse::
+clone_to_target( C_BaseNeuron *tgt, double g)
+{
+      // check if we have no existing connection already to tgt
+        if ( member( tgt, _targets) ) {
+                M->vp( 1, stderr, "Neuron \"%s\" already synapsing onto \"%s\"\n",
+                       _source->_label, tgt->_label);
+                return nullptr;
+        }
+
+        tgt -> _dendrites[this] = g;
+        _targets.push_back( tgt);
+
+        snprintf( _label, max_label_size-1, "%s:%zu", _source->_label, _targets.size());
+
+        return this;
+}
+
+
+
+
+cnrun::C_BaseSynapse*
+cnrun::C_BaseSynapse::
+make_clone_independent( C_BaseNeuron *tgt)
+{
+        double g = g_on_target( *tgt);
+        if ( !isfinite(g) || !M )
+                return nullptr;
+
+        if ( M )
+                M->vp( 4, "promoting a clone of %s synapse from \"%s\" to \"%s\"\n",
+                       species(), _label, tgt->_label);
+        // if ( unlikely (member( tgt, _targets)) )
+        //         fprintf( stderr, "ебать!\n");
+        _targets.remove( tgt);
+
+        // if ( unlikely (member( this, tgt->_dendrites)) )
+        //         fprintf( stderr, "ебать-колотить!\n");
+        tgt -> _dendrites.erase( this);
+
+        snprintf( _label, max_label_size-1, "%s:%zu", _source->_label, _targets.size());
+
+        C_BaseSynapse* ret = M -> add_synapse_species(
+                _type, _source, tgt, g,
+                CModel::TSynapseCloningOption::no /* prevents re-creation of a clone we have just excised */,
+                TIncludeOption::is_last);
+        // the newly added synapse has stock paramaters yet: copy ours
+        if ( ret ) {
+                ret->P = P;
+                // also see to vars
+                for ( size_t i = 0; i < v_no(); ++i )
+                        ret->var_value(i) = get_var_value(i);
+                return ret;
+        }
+        return nullptr;
+}
+
+
+
+
+
+
+void
+cnrun::C_BaseSynapse::
+dump( bool with_params, FILE *strm) const
+{
+        C_BaseUnit::dump( with_params);
+        fprintf( strm, "  gsyn on targets (%zu):  ", _targets.size());
+        for ( auto &T : _targets )
+                fprintf( strm, "%s: %g;  ", T->_label, g_on_target( *T));
+        fprintf( strm, "\n\n");
+}
+
+
+
+
+
+cnrun::C_BaseSynapse::
+~C_BaseSynapse()
+{
+        if ( M )
+                M->vp( 4, "  deleting base synapse \"%s\"\n", _label);
+
+        for ( auto &T : _targets )
+                if ( T )
+                        T->_dendrites.erase( this);
+
+        if ( _source ) {
+                _source->_axonal_harbour.remove( this);
+                if ( M )
+                        M->vp( 5, "    removing ourselves from \"%s\" axonals (%zu still there)\n",
+                               _source->_label, _source->_axonal_harbour.size());
+        }
+}
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/base-unit.hh b/upstream/src/libcnrun/base-unit.hh
new file mode 100644
index 0000000..21bfbd4
--- /dev/null
+++ b/upstream/src/libcnrun/base-unit.hh
@@ -0,0 +1,293 @@
+/*
+ *       File name:  libcn/base-unit.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ * Initial version:  2008-08-02
+ *
+ *         Purpose:  unit base class
+ *
+ *         License:  GPL-2+
+ */
+
+#ifndef CNRUN_LIBCN_BASEUNIT_H_
+#define CNRUN_LIBCN_BASEUNIT_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <fstream>
+#include <cstring>
+#include <vector>
+#include <list>
+
+#include "libstilton/lang.hh"
+#include "libstilton/string.hh"
+#include "forward-decls.hh"
+#include "types.hh"
+#include "sources.hh"
+
+
+using namespace std;
+using cnrun::stilton::str::sasprintf;
+
+namespace cnrun {
+
+namespace global {
+extern unsigned short precision;
+extern int verbosely;
+}
+
+// for all units
+#define CN_UERROR                        (1 << 0)
+#define CN_UOWNED                        (1 << 1)
+#define CN_UHASPARAMRANGE                (1 << 2)
+#define CN_ULISTENING_MEM                (1 << 3)
+#define CN_ULISTENING_DISK               (1 << 4)
+#define CN_ULISTENING_1VARONLY           (1 << 5)
+#define CN_ULISTENING_DEFERWRITE         (1 << 6)
+#define CN_ULISTENING_BINARY             (1 << 7)
+//#define CN_NDYNPARAMS                        (1 << 8)
+
+// only for neurons
+#define CN_NFIRING                       (1 <<  9)  // firing now
+#define CN_NREFRACT                      (1 << 10)  // in refractory phase now
+
+
+// the base unit provides the methods for the following:
+// * classification;
+// * access to parameters, tape reader and range interface;
+// * attachment to the mother model;
+// * listening, i.e., keeping a history of vars along a timeline;
+class C_BaseUnit {
+
+        DELETE_DEFAULT_METHODS (C_BaseUnit)
+
+        friend class CModel;
+        friend class SSpikeloggerService;
+
+    public:
+        static const constexpr size_t max_label_size = 40;
+
+    protected:
+        C_BaseUnit (TUnitType, const string& label,
+                    CModel*, int s_mask);
+    public:
+        virtual ~C_BaseUnit();  // surely virtual
+
+      // written variables precision
+        unsigned short precision;
+
+        int     status() const  {  return _status; }
+        TUnitType type() const  {  return _type;   }
+
+      // classification
+        int  traits()        const {  return __CNUDT[_type].traits;                  }
+        bool is_hostable()   const {  return __CNUDT[_type].traits & UT_HOSTED;      }
+        bool is_ddtbound()   const {  return __CNUDT[_type].traits & UT_DDTSET;      }
+        bool is_neuron()     const {  return _type >= NT_FIRST && _type <= NT_LAST;  }
+        bool is_synapse()    const {  return _type >= YT_FIRST && _type <= YT_LAST;  }
+        bool is_oscillator() const {  return __CNUDT[_type].traits & UT_OSCILLATOR;  }
+        bool is_conscious()  const {  return is_oscillator();                        }
+
+        unsigned long serial() const
+                {  return _serial_id;  }
+        const char *label() const  // for synapses, it is "%s:%d", src->label, targets.size()
+                {  return _label;  }
+        void set_label( const string& new_label)
+                {  strncpy( _label, new_label.c_str(), max_label_size-1); }
+
+        const char *class_name() const
+                {  return is_neuron() ? "Neuron" : "Synapse";  }
+        const char *species() const
+                {  return __CNUDT[_type].species;              }
+        const char *family() const
+                {  return __CNUDT[_type].family;               }
+        const char *type_description() const
+                {  return __CNUDT[_type].description;          }
+
+      // parent model
+        const CModel&
+        parent_model() const        { return *M; }
+        double
+        model_time() const;  // defined in model.h
+
+        bool is_owned() const       { return _status & CN_UOWNED; }
+
+      // parameter & variable names and symbols
+        const char *const param_name( size_t i)       const { return __CNUDT[_type].stock_param_names[i]; }
+        const char *const param_sym( size_t i)        const { return __CNUDT[_type].stock_param_syms[i];  }
+        int param_idx_by_sym( const string&) const __attribute__ ((pure));
+
+        const char *const var_name( size_t i)         const { return __CNUDT[_type].stock_var_names[i];   }
+        const char *const var_sym( size_t i)          const { return __CNUDT[_type].stock_var_syms[i];    }
+        int var_idx_by_sym( const string&) const __attribute__ ((pure));
+
+        unsigned short v_no() const        { return __CNUDT[_type].vno; }
+        unsigned short p_no() const        { return __CNUDT[_type].pno; }
+
+      // purity checks
+        bool is_not_altered() const
+                {
+                        return (memcmp( P.data(), __CNUDT[_type].stock_param_values,
+                                       sizeof (double) * p_no()) == 0) &&
+                                !has_sources();
+                }
+        bool has_same_params( const C_BaseUnit &rv) const
+                {
+                        return _type == rv._type &&
+                                memcmp( P.data(), rv.P.data(), sizeof (double) * p_no()) == 0;
+                }
+        bool has_sources() const __attribute__ ((pure))
+                {
+                        return not _sources.empty();
+                }
+        bool has_same_sources( const C_BaseUnit &rv) const __attribute__ ((pure))
+                {
+                        return _sources == rv._sources;
+                        // not sure taking the order of otherwise identical sources should matter
+                }
+        bool is_identical( const C_BaseUnit &rv) const __attribute__ ((pure))
+                {
+                        return _type == rv._type && has_same_params(rv) &&
+                                ((has_sources() && has_same_sources(rv)) ||
+                                 (!has_sources() && !rv.has_sources()));
+                }
+
+      // parameters
+        double
+        get_param_value( size_t p) const
+                {  return P[p];  }
+
+        double
+        get_param_value( const string& sym) const
+                {
+                        int id = param_idx_by_sym( sym);
+                        if ( unlikely (id == -1) )
+                                throw sasprintf( "Bad parameter name \"%s\" for unit \"%s\"", sym.c_str(), _label);
+                        return P[id];
+                }
+
+        double&
+        param_value( size_t p)
+                {
+                        return P[p];
+                }
+
+        double&
+        param_value( const string& sym)
+                {
+                        int id = param_idx_by_sym( sym);
+                        if ( unlikely (id == -1) )
+                                throw sasprintf( "Bad parameter name \"%s\" for unit \"%s\"",
+                                                 sym.c_str(), _label);
+                        return P[id];
+                }
+
+        void
+        reset_params()
+                {
+                        P.resize( p_no());
+                        memcpy( P.data(), __CNUDT[_type].stock_param_values,
+                                sizeof(double) * p_no());
+                        param_changed_hook();
+                }
+
+      // variables: differs per hosted or standalone
+        virtual double &var_value( size_t) = 0;
+        virtual const double &get_var_value( size_t) const = 0;
+        virtual void reset_vars() = 0;
+        virtual void reset_state();
+
+        virtual void dump( bool with_params = false, FILE *strm = stdout) const;
+
+      // state history
+        bool is_listening() const
+                {
+                        return _status & (CN_ULISTENING_DISK | CN_ULISTENING_MEM);
+                }
+        void start_listening( int mask = 0 | CN_ULISTENING_DISK);
+        void stop_listening();
+        void restart_listening()
+                {
+                        int lbits = _status & (CN_ULISTENING_DISK | CN_ULISTENING_MEM
+                                               | CN_ULISTENING_1VARONLY | CN_ULISTENING_DEFERWRITE);
+                        stop_listening();
+                        start_listening( lbits);
+                }
+        void pause_listening();
+        void resume_listening();
+
+        void tell();
+
+        const vector<double>*
+        listener_mem() const
+                { return _listener_mem; }
+
+      // source interface
+        enum TSinkType { SINK_PARAM, SINK_VAR };
+
+        template <class T>
+        struct SSourceInterface {
+            friend class C_BaseUnit;
+            friend class CModel;
+            private:
+                C_BaseSource *source;
+                TSinkType sink_type;
+                unsigned short idx;
+
+                SSourceInterface (T *insource, TSinkType insink_type, unsigned short inidx)
+                      : source (insource), sink_type (insink_type), idx (inidx)
+                        {}
+            public:
+                bool operator== ( const SSourceInterface &rv) const
+                        {
+                                return  source    == rv.source &&
+                                        sink_type == rv.sink_type &&
+                                        idx       == rv.idx;
+                        }
+        };
+        template <class T>
+        void attach_source( T *s, TSinkType t, unsigned short idx);
+        void detach_source( C_BaseSource*, TSinkType, size_t idx);
+
+        void apprise_from_sources();
+        virtual void param_changed_hook()
+                {}
+
+    protected:
+        TUnitType
+                _type;  // will look up p, pno and vno from __CNUDT using _type as index
+        int     _status;
+
+        unsigned long
+                _serial_id;  // assigned incrementally as read by import_NetworkML
+        char    _label[max_label_size];
+
+        CModel  *M;
+
+      // private copy of params
+        vector<double> P;
+
+        list<SSourceInterface<C_BaseSource>>
+                _sources;
+
+    private:
+      // where vars are written by tell()
+        int _binwrite_handle;
+        ofstream *_listener_disk;
+      // ... and/or stored, in a diskless model
+        vector<double> *_listener_mem;
+};
+
+}
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/forward-decls.hh b/upstream/src/libcnrun/forward-decls.hh
new file mode 100644
index 0000000..07ae859
--- /dev/null
+++ b/upstream/src/libcnrun/forward-decls.hh
@@ -0,0 +1,42 @@
+/*
+ *       File name:  libcn/forward-decls.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ * Initial version:  2014-09-16
+ *
+ *         Purpose:  forward declarations
+ *
+ *         License:  GPL-2+
+ */
+
+#ifndef CNRUN_LIBCN_FORWARDDECLS_H_
+#define CNRUN_LIBCN_FORWARDDECLS_H_
+
+namespace cnrun {
+
+class C_BaseUnit;
+class C_BaseNeuron;
+class C_BaseSynapse;
+class C_HostedNeuron;
+class C_HostedSynapse;
+class C_StandaloneNeuron;
+class C_StandaloneSynapse;
+
+class C_HostedConductanceBasedNeuron;
+class C_HostedRateBasedNeuron;
+
+class CNeuronMap;
+class CSynapseMap;
+
+class CModel;
+
+}
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/hosted-attr.hh b/upstream/src/libcnrun/hosted-attr.hh
new file mode 100644
index 0000000..84cc6f5
--- /dev/null
+++ b/upstream/src/libcnrun/hosted-attr.hh
@@ -0,0 +1,56 @@
+/*
+ *       File name:  libcn/hosted-attr.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ * Initial version:  2009-03-31
+ *
+ *         Purpose:  Interface class containing hosted unit attributes.
+ *
+ *         License:  GPL-2+
+ */
+
+#ifndef CNRUN_LIBCN_HOSTEDATTR_H_
+#define CNRUN_LIBCN_HOSTEDATTR_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include "libstilton/lang.hh"
+#include <vector>
+
+
+using namespace std;
+
+namespace cnrun {
+
+class C_HostedAttributes {
+
+        friend class CIntegrateRK65;
+        friend class CModel;
+
+    protected:
+      // variables for units in the model are catenated on a single
+      // vector<double>, as an essential optimization measure; each
+      // unit knows its own set of variables by this idx:
+        size_t idx;
+      // the containing model provides idx on registering our unit
+
+    public:
+        virtual void reset_vars() = 0;
+        virtual double &var_value( size_t) = 0;
+
+        virtual void derivative( vector<double>&, vector<double>&) = 0;
+};
+
+}
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/hosted-neurons.cc b/upstream/src/libcnrun/hosted-neurons.cc
new file mode 100644
index 0000000..acb7540
--- /dev/null
+++ b/upstream/src/libcnrun/hosted-neurons.cc
@@ -0,0 +1,766 @@
+/*
+ *       File name:  libcn/hosted-neurons.cc
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ * Initial version:  2008-10-16
+ *
+ *         Purpose:  hosted neuron classes (those having their
+ *                   state vars on parent model's integration vectors)
+ *
+ *         License:  GPL-2+
+ */
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <cmath>
+#include <iostream>
+
+#include "libstilton/lang.hh"
+
+#include "types.hh"
+#include "model.hh"
+
+
+cnrun::C_HostedNeuron::
+C_HostedNeuron (TUnitType intype, const string& inlabel,
+                double inx, double iny, double inz,
+                CModel* inM, int s_mask,
+                TIncludeOption include_option)
+      : C_BaseNeuron (intype, inlabel, inx, iny, inz, inM, s_mask)
+{
+        if ( M )
+                M->include_unit( this, include_option);
+        else {
+//                _status &= ~CN_UENABLED;
+                idx = (unsigned long)-1;
+        }
+}
+
+
+
+
+
+void
+cnrun::C_HostedConductanceBasedNeuron::
+do_detect_spike_or_whatever()
+{
+        if ( unlikely (E() >= M->options.spike_threshold) ) {
+                if ( !(_spikelogger_agent->_status & CN_KL_ISSPIKINGNOW ) ) {
+                        _spikelogger_agent->spike_history.push_back(
+                                _spikelogger_agent->t_last_spike_start = model_time());
+                        _spikelogger_agent->_status |= CN_KL_ISSPIKINGNOW;
+                }
+        } else
+//                if ( model_time() - t_last_spike_end > M->spike_lapse ) {
+                if ( _spikelogger_agent->_status & CN_KL_ISSPIKINGNOW ) {
+                        _spikelogger_agent->_status &= ~CN_KL_ISSPIKINGNOW;
+                        _spikelogger_agent->t_last_spike_end = model_time();
+                }
+}
+
+
+
+
+
+
+
+
+// SPECIFIC NEURONS:
+
+// ===== HH and variations
+
+const char* const cnrun::CN_ParamNames_NeuronHH_d[] = {
+        "Na conductance, " CN_PU_CONDUCTANCE,
+        "Na equi potential, " CN_PU_POTENTIAL,
+        "K conductance, " CN_PU_CONDUCTANCE,
+        "K equi potential, " CN_PU_POTENTIAL,
+        "Leak conductance, " CN_PU_CONDUCTANCE,
+        "Leak equi potential, " CN_PU_POTENTIAL,
+        "Membrane specific capacitance, " CN_PU_CAPACITY_DENSITY,
+
+        ".alpha_m_a",        ".alpha_m_b",        ".alpha_m_c",        ".beta_m_a",        ".beta_m_b",        ".beta_m_c",
+        ".alpha_h_a",        ".alpha_h_b",        ".alpha_h_c",        ".beta_h_a",        ".beta_h_b",        ".beta_h_c",
+        ".alpha_n_a",        ".alpha_n_b",        ".alpha_n_c",        ".beta_n_a",        ".beta_n_b",        ".beta_n_c",
+
+        "Externally applied DC, " CN_PU_CURRENT,
+};
+const char* const cnrun::CN_ParamSyms_NeuronHH_d[] = {
+        "gNa",
+        "ENa",
+        "gK",
+        "EK",
+        "gl",
+        "El",
+        "Cmem",
+
+        ".alpha_m_a",        ".alpha_m_b",        ".alpha_m_c",        ".beta_m_a",        ".beta_m_b",        ".beta_m_c",
+        ".alpha_h_a",        ".alpha_h_b",        ".alpha_h_c",        ".beta_h_a",        ".beta_h_b",        ".beta_h_c",
+        ".alpha_n_a",        ".alpha_n_b",        ".alpha_n_c",        ".beta_n_a",        ".beta_n_b",        ".beta_n_c",
+
+        "Idc",
+};
+const double cnrun::CN_Params_NeuronHH_d[] = {
+        7.15,   //   gNa: Na conductance in 1/(mOhms * cm^2)
+       50.0,    //   ENa: Na equi potential in mV
+        1.430,  //   gK: K conductance in 1/(mOhms * cm^2)
+      -95.0,    //   EK: K equi potential in mV
+        0.0267, //   gl: leak conductance in 1/(mOhms * cm^2)
+      -63.563,  //   El: leak equi potential in mV
+        0.143,  //   Cmem: membr. specific capacitance, muF/cm^2
+
+        0.32,   52.,   4.,
+        0.28,   25.,   5.,
+        0.128,  48.,  18.,
+        4.0,    25.,   5.,
+        0.032,  50.,   5.,
+        0.5,    55.,  40.,
+
+          0.                // Externally applied constant current
+};
+
+
+
+
+const double cnrun::CN_Vars_NeuronHH_d[] = {
+        -66.81,         // 0 - membrane potential E
+          0.023,        // 1 - prob. for Na channel activation m
+          0.800,        // 2 - prob. for not Na channel blocking h
+          0.220,        // 3 - prob. for K channel activation n
+};
+
+const char* const cnrun::CN_VarNames_NeuronHH_d[] = {
+        "Membrane potential, " CN_PU_POTENTIAL,
+        "Prob. of Na channel activation",
+        "1-Prob. of Na channel blocking",
+        "Prob. of K channel activation",
+};
+const char* const cnrun::CN_VarSyms_NeuronHH_d[] = {
+        "E",
+        ".m",
+        ".h",
+        ".n"
+};
+
+
+
+void
+__attribute__ ((hot))
+cnrun::CNeuronHH_d::
+derivative( vector<double>& x, vector<double>& dx)
+{
+      // differential eqn for E, the membrane potential
+        dE(dx) = (
+                     P[gNa] * gsl_pow_3(m(x)) * h(x) * (P[ENa] - E(x))
+                   + P[gK]  * gsl_pow_4(n(x))        * (P[EK]  - E(x))
+                   + P[gl]                           * (P[El]  - E(x)) + (Isyn(x) + P[Idc])
+                  ) / P[Cmem];
+
+        double _a, _b, K;
+      // diferential eqn for m, the probability for one Na channel activation
+      // particle
+        K = -P[alpha_m_b] - E(x),
+                _a = P[alpha_m_a] * K / expm1( K / P[alpha_m_c]);
+//        _a = 0.32 * (13.0 - E(x) - P[V0]) / expm1( (13.0 - E(x) - P[V0]) / 4.0);
+        K =  P[beta_m_b] + E(x),
+                _b = P[beta_m_a]  * K / expm1( K / P[beta_m_c]);
+//        _b = 0.28 * (E(x) + P[V0] - 40.0) / expm1( (E(x) + P[V0] - 40.0) / 5.0);
+        dm(dx) = _a * (1 - m(x)) - _b * m(x);
+
+      // differential eqn for h, the probability for the Na channel blocking
+      // particle to be absent
+        K = -P[alpha_h_b] - E(x),
+                _a = P[alpha_h_a] * exp( K / P[alpha_h_c]);
+//        _a = 0.128 * exp( (17.0 - E(x) - P[V0]) / 18.0);
+        K = -P[beta_h_b] - E(x),
+                _b = P[beta_h_a] / (exp( K / P[beta_h_c]) + 1);
+//        _b = 4.0 / (exp( (40.0 - E(x) - P[V0]) / 5.0) + 1.0);
+        dh(dx) = _a * (1 - h(x)) - _b * h(x);
+
+      // differential eqn for n, the probability for one K channel activation
+      // particle
+        K = -P[alpha_n_b] - E(x),
+                _a = P[alpha_n_a] * K / expm1( K / P[alpha_n_c]);
+//        _a = 0.032 * (15.0 - E(x) - P[V0]) / (exp( (15.0 - E(x) - P[V0]) / 5.0) - 1.0);
+        K = -P[beta_n_b] - E(x),
+                _b = P[beta_n_a] * exp( K / P[beta_n_c]);
+//        _b = 0.5 * exp( (10.0 - E(x) - P[V0]) / 40.0);
+        dn(dx)= _a * (1 - n(x)) -_b * n(x);
+}
+
+// void
+// CNeuronHH::derivative( vector<double>& x, vector<double>& dx)
+// {
+//        enum TParametersNeuronHH {
+//                gNa, ENa, gK,  EK, gl, El, Cmem, Idc
+//        };
+
+//       // differential eqn for E, the membrane potential
+//        dE(dx) = (
+//                   P[gNa] * ___pow3(m(x)) * h(x) * (P[ENa] - E(x))
+//                 + P[gK]  * ___pow4(n(x))        * (P[EK]  - E(x))
+//                 + P[gl]  *                        (P[El]  - E(x))  + (Isyn(x) + P[Idc])
+//                 ) / P[Cmem];
+
+//        double _a, _b;
+//       // diferential eqn for m, the probability for Na channel activation
+//        _a = (3.5 + 0.1 * E(x)) / -expm1( -3.5 - 0.1 * E(x));
+//        _b = 4.0 * exp( -(E(x) + 60.0) / 18.0);
+//        dm(dx) = _a * (1.0 - m(x)) - _b * m(x);
+
+//       // differential eqn for h, the probability for Na channel inactivation
+//        _a = 0.07 * exp( -E(x) / 20.0 - 3.0);
+//        _b = 1.0 / (exp( -3.0 - 0.1 * E(x)) + 1.0);
+//        dh(dx) = _a * (1.0 - h(x)) -_b * h(x);
+
+//       // differential eqn for n, the probability for K channel activation
+//        _a = (-0.5 - 0.01 * E(x)) / expm1( -5.0 - 0.1 * E(x));
+//        _b = 0.125 * exp( -(E(x) + 60.0) / 80.0);
+//        dn(dx) = _a * (1.0 - n(x)) - _b * n(x);
+// }
+
+
+
+
+
+
+
+
+const char* const cnrun::CN_ParamNames_NeuronHH2_d[] = {
+        "Na conductance, " CN_PU_CONDUCTANCE,
+        "Na equi potential, " CN_PU_POTENTIAL,
+        "K conductance, " CN_PU_CONDUCTANCE,
+        "K equi potential, " CN_PU_POTENTIAL,
+        "Leak conductance, " CN_PU_CONDUCTANCE,
+        "Leak equi potential, " CN_PU_POTENTIAL,
+        "Membrane specific capacitance, " CN_PU_CAPACITY_DENSITY,
+        "K leakage conductance, " CN_PU_CONDUCTANCE,
+        "K leakage equi potential, " CN_PU_POTENTIAL,
+
+        ".alpha_m_a",        ".alpha_m_b",        ".alpha_m_c",        ".beta_m_a",        ".beta_m_b",        ".beta_m_c",
+        ".alpha_h_a",        ".alpha_h_b",        ".alpha_h_c",        ".beta_h_a",        ".beta_h_b",        ".beta_h_c",
+        ".alpha_n_a",        ".alpha_n_b",        ".alpha_n_c",        ".beta_n_a",        ".beta_n_b",        ".beta_n_c",
+
+//        "Total equi potential (?), " CN_PU_POTENTIAL,
+
+        "Externally applied DC, " CN_PU_CURRENT,
+};
+const char* const cnrun::CN_ParamSyms_NeuronHH2_d[] = {
+        "gNa",
+        "ENa",
+        "gK",
+        "EK",
+        "gl",
+        "El",
+        "Cmem",
+        "gKl",
+        "EKl",
+
+        ".alpha_m_a",        ".alpha_m_b",        ".alpha_m_c",        ".beta_m_a",        ".beta_m_b",        ".beta_m_c",
+        ".alpha_h_a",        ".alpha_h_b",        ".alpha_h_c",        ".beta_h_a",        ".beta_h_b",        ".beta_h_c",
+        ".alpha_n_a",        ".alpha_n_b",        ".alpha_n_c",        ".beta_n_a",        ".beta_n_b",        ".beta_n_c",
+
+//        "V0",
+
+        "Idc",
+};
+const double cnrun::CN_Params_NeuronHH2_d[] = {
+        7.15,    //   gNa: Na conductance in 1/(mOhms * cm^2)
+       50.0,     //   ENa: Na equi potential in mV
+        1.43,    //   gK: K conductance in 1/(mOhms * cm^2)
+      -95.0,     //   EK: K equi potential in mV
+        0.0267,  //   gl: leak conductance in 1/(mOhms * cm^2)
+      -63.56,    //   El: leak equi potential in mV
+        0.143,   //   Cmem: membr. specific capacitance, muF/cm^2
+        0.00572, //   gKl: potassium leakage conductivity
+      -95.0,     //   EKl: potassium leakage equi pot in mV
+
+        0.32,   52.,   4.,
+        0.28,   25.,   5.,
+        0.128,  48.,  18.,
+        4.0,    25.,   5.,
+        0.032,  50.,   5.,
+        0.5,    55.,  40.,
+
+//       65.0,                //   V0: ~ total equi potential (?)
+
+        0.,                //   Idc: constant, externally applied current
+};
+
+
+const double cnrun::CN_Vars_NeuronHH2_d[] = {
+// as in a single-neuron run
+      -66.56,   // 0 - membrane potential E
+        0.0217, // 1 - prob. for Na channel activation m
+        0.993,  // 2 - prob. for not Na channel blocking h
+        0.051,  // 3 - prob. for K channel activation n
+
+// previously thought to be resting state values
+//      -60.0,              // 0 - membrane potential E
+//        0.0529324,        // 1 - prob. for Na channel activation m
+//        0.3176767,        // 2 - prob. for not Na channel blocking h
+//        0.5961207,        // 3 - prob. for K channel activation n
+};
+
+
+
+
+
+void
+cnrun::CNeuronHH2_d::
+derivative( vector<double>& x, vector<double>& dx)
+{
+        enum TParametersNeuronHH2 {
+                gNa, ENa, gK,  EK, gl, El, Cmem,
+                gKl, EKl, //V0,
+                alpha_m_a,        alpha_m_b,        alpha_m_c,
+                beta_m_a,        beta_m_b,        beta_m_c,
+                alpha_h_a,        alpha_h_b,        alpha_h_c,
+                beta_h_a,        beta_h_b,        beta_h_c,
+                alpha_n_a,        alpha_n_b,        alpha_n_c,
+                beta_n_a,        beta_n_b,        beta_n_c,
+                Idc,
+        };
+
+      // differential eqn for E, the membrane potential
+        dE(dx) = (
+                     P[gNa] * gsl_pow_3(m(x)) * h(x) * (P[ENa] - E(x))
+                   + P[gK]  * gsl_pow_4(n(x))        * (P[EK]  - E(x))
+                   + P[gl]                           * (P[El]  - E(x))
+                   + P[gKl]                          * (P[EKl] - E(x)) + (Isyn(x) + P[Idc])
+                  ) / P[Cmem];
+
+        double _a, _b, K;
+      // diferential eqn for m, the probability for one Na channel activation
+      // particle
+        K = -P[alpha_m_b] - E(x),
+                _a = P[alpha_m_a] * K / expm1( K / P[alpha_m_c]);
+//        _a = 0.32 * (13.0 - E(x) - P[V0]) / expm1( (13.0 - E(x) - P[V0]) / 4.0);
+        K =  P[beta_m_b] + E(x),
+                _b = P[beta_m_a]  * K / expm1( K / P[beta_m_c]);
+//        _b = 0.28 * (E(x) + P[V0] - 40.0) / expm1( (E(x) + P[V0] - 40.0) / 5.0);
+        dm(dx) = _a * (1 - m(x)) - _b * m(x);
+
+      // differential eqn for h, the probability for the Na channel blocking
+      // particle to be absent
+        K = -P[alpha_h_b] - E(x),
+                _a = P[alpha_h_a] * exp( K / P[alpha_h_c]);
+//        _a = 0.128 * exp( (17.0 - E(x) - P[V0]) / 18.0);
+        K = -P[beta_h_b] - E(x),
+                _b = P[beta_h_a] / (exp( K / P[beta_h_c]) + 1);
+//        _b = 4.0 / (exp( (40.0 - E(x) - P[V0]) / 5.0) + 1.0);
+        dh(dx) = _a * (1 - h(x)) - _b * h(x);
+
+      // differential eqn for n, the probability for one K channel activation
+      // particle
+        K = -P[alpha_n_b] - E(x),
+                _a = P[alpha_n_a] * K / expm1( K / P[alpha_n_c]);
+//        _a = 0.032 * (15.0 - E(x) - P[V0]) / (exp( (15.0 - E(x) - P[V0]) / 5.0) - 1.0);
+        K = -P[beta_n_b] - E(x),
+                _b = P[beta_n_a] * exp( K / P[beta_n_c]);
+//        _b = 0.5 * exp( (10.0 - E(x) - P[V0]) / 40.0);
+        dn(dx)= _a * (1 - n(x)) -_b * n(x);
+}
+
+
+
+
+
+
+
+
+//#ifdef CN_WANT_MORE_NEURONS
+
+
+const char* const cnrun::CN_ParamNames_NeuronEC_d[] = {
+        "Na conductance, " CN_PU_CONDUCTANCE,
+        "Na equi potential, " CN_PU_POTENTIAL,
+        "K conductance, " CN_PU_CONDUCTANCE,
+        "K equi potential, " CN_PU_POTENTIAL,
+        "Leak conductance, " CN_PU_CONDUCTANCE,
+        "Leak equi potential, " CN_PU_POTENTIAL,
+        "Membrane capacity density, " CN_PU_CAPACITY_DENSITY,
+        "Externally applied DC, " CN_PU_CURRENT,
+        "K leakage conductance, " CN_PU_CONDUCTANCE,
+        "K leakage equi potential, " CN_PU_POTENTIAL,
+        "Total equi potential, " CN_PU_POTENTIAL,
+        "gh1",
+        "gh2",
+        "Vh, " CN_PU_POTENTIAL
+};
+const char* const cnrun::CN_ParamSyms_NeuronEC_d[] = {
+        "gNa",
+        "ENa",
+        "gK",
+        "EK",
+        "gl",
+        "El",
+        "Cmem",
+        "Idc",
+        "gKl",
+        "EKl",
+        "V0",
+        "gh1",
+        "gh2",
+        "Vh"
+};
+const double cnrun::CN_Params_NeuronEC_d[] = {
+        7.15,   //  0 - gNa: Na conductance in 1/(mOhms * cm^2)
+       50.0,    //  1 - ENa: Na equi potential in mV
+        1.43,   //  2 - gK: K conductance in 1/(mOhms * cm^2)
+      -95.0,    //  3 - EK: K equi potential in mV
+        0.021,  //  4 - gl: leak conductance in 1/(mOhms * cm^2)
+      -55.0,    //  5 - El: leak equi potential in mV
+        0.286,  //  6 - Cmem: membr. capacity density in muF/cm^2 // 0.143
+        0.,     //  7 - Externally applied constant current
+        0.035,  //  8 - gKl: potassium leakage conductivity
+      -95.0,    //  9 - EKl: potassium leakage equi pot in mV
+       65.0,    // 10 - V0: ~ total equi potential (?)
+        0.0185, // 11 - gh1 // 1.85
+        0.01,   // 12 - gh2
+      -20.0,    // 13 - Vh
+};
+
+const char* const cnrun::CN_VarNames_NeuronEC_d[] = {
+        "Membrane potential",
+        "Prob. of Na channel activation",
+        "Prob. of not Na channel blocking",
+        "Prob. of K channel activation",
+        "Ih1 activation",
+        "Ih2 activation"
+};
+const char* const cnrun::CN_VarSyms_NeuronEC_d[] = {
+        "E",
+        ".m",
+        ".h",
+        ".n",
+        ".Ih1",
+        ".Ih2"
+};
+const double cnrun::CN_Vars_NeuronEC_d[] = {
+      -64.1251,    // 0 - membrane potential E
+        0.0176331, // 1 - prob. for Na channel activation m
+        0.994931,  // 2 - prob. for not Na channel blocking h
+        0.0433969, // 3 - prob. for K channel activation n
+        0.443961,  // 4 - Ih1 activation
+        0.625308   // 5 - Ih2 activation
+};
+
+
+
+
+#define _xfunc(a,b,k,V)  ((a) * (V) + (b)) / (1.0 - exp(((V)+(b)/(a))/(k)))
+
+void
+cnrun::CNeuronEC_d::
+derivative( vector<double>& x, vector<double>& dx)
+{
+        enum TParametersNeuronEC {
+                gNa, ENa, gK,  EK, gl, El, Cmem, Idc,
+                gKl, EKl, V0,
+                gh1, gh2,
+                Vh
+        };
+
+        double _a, _b;
+      // differential eqn for E, the membrane potential
+        dE(dx) = -(gsl_pow_3( m(x)) * h(x) * P[gNa] * (E(x) - P[ENa]) +
+                 gsl_pow_4( n(x)) * P[gK] * (E(x) - P[EK]) +
+                 (Ih1(x) * P[gh1] + Ih2(x) * P[gh2]) * (E(x) - P[Vh])+
+                 P[gl] * (E(x) - P[El]) + P[gKl] * (E(x) - P[EKl]) - Isyn(x)) / P[Cmem];
+
+      // diferential eqn for m, the probability for one Na channel activation particle
+        _a = 0.32 * (13.0 - E(x) - P[V0]) / expm1( (13.0 - E(x) - P[V0]) / 4.0);
+        _b = 0.28 * (E(x) + P[V0] - 40.0) / expm1( (E(x) + P[V0] - 40.0) / 5.0);
+        dm(dx) = _a * (1.0 - m(x)) - _b * m(x);
+
+      // differential eqn for h, the probability for the Na channel blocking particle to be absent
+        _a = 0.128 * exp( (17.0 - E(x) - P[V0]) / 18.0);
+        _b = 4.0 / (exp( (40.0 - E(x) - P[V0]) / 5.0) + 1.0);
+        dh(dx) = _a * (1.0 - h(x)) - _b * h(x);
+
+      // differential eqn for n, the probability for one K channel activation particle
+        _a = 0.032 * (15.0 - E(x) - P[V0]) / expm1( (15.0 - E(x) - P[V0]) / 5.0);
+        _b = 0.5 * exp( (10.0 - E(x) - P[V0]) / 40.0);
+        dn(dx) = _a * (1.0 - n(x)) - _b * n(x);
+
+      // differential equation for the Ih1 activation variable
+        _a = _xfunc (-2.89e-3, -0.445,  24.02, E(x));
+        _b = _xfunc ( 2.71e-2, -1.024, -17.40, E(x));
+        dIh1(dx) = _a * (1.0 - Ih1(x)) - _b * Ih1(x);
+
+      // differential equation for the Ih2 activation variable
+        _a = _xfunc (-3.18e-3, -0.695,  26.72, E(x));
+        _b = _xfunc ( 2.16e-2, -1.065, -14.25, E(x));
+        dIh2(dx) = _a * (1.0 - Ih2(x)) - _b * Ih2(x);
+}
+
+#undef _xfunc
+
+
+
+
+
+
+
+
+
+
+
+
+const char* const cnrun::CN_ParamNames_NeuronECA_d[] = {
+        "Na conductance, " CN_PU_CONDUCTANCE,
+        "Na equi potential, " CN_PU_POTENTIAL,
+        "K conductance, " CN_PU_CONDUCTANCE,
+        "K equi potential, " CN_PU_POTENTIAL,
+        "Leak conductance, " CN_PU_CONDUCTANCE,
+        "Leak equi potential, " CN_PU_POTENTIAL,
+        "Membrane capacity density, " CN_PU_CAPACITY_DENSITY,
+        "Externally applied DC, " CN_PU_CURRENT,
+        "gNap",
+        "gh",
+        "Vh",
+};
+const char* const cnrun::CN_ParamSyms_NeuronECA_d[] = {
+        "gNa",
+        "ENa",
+        "gK",
+        "EK",
+        "gl",
+        "El",
+        "Cmem",
+        "Idc",
+        "gNap",
+        "gh",
+        "Vh",
+};
+const double cnrun::CN_Params_NeuronECA_d[] = {
+        52.0,        //  0 - Na conductance in 1/(mOhms * cm^2)
+        55.0,        //  1 - Na equi potential in mV
+        11.0,        //  2 - K conductance in 1/(mOhms * cm^2)
+       -90.0,        //  3 - K equi potential in mV
+         0.5,        //  4 - Leak conductance in 1/(mOhms * cm^2)
+       -65.0,        //  5 - Leak equi potential in mV
+         1.5,        //  6 - Membr. capacity density in muF/cm^2
+         0.,         //  7 - Externally applied constant current
+         0.5,        //  8 - gNap
+         1.5,        //  9 - gh
+       -20.0,        // 10 - Vh
+};
+
+const char* const cnrun::CN_VarNames_NeuronECA_d[] = {
+        "Membrane potential",
+        "Prob. of Na channel activation",
+        "Prob. of Na channel blocking",
+        "Prob. of K channel activation",
+        "mNap",
+        "Ih1 activation",
+        "Ih2 activation"
+};
+const char* const cnrun::CN_VarSyms_NeuronECA_d[] = {
+        "E",
+        ".m",
+        ".h",
+        ".n",
+        ".mNap",
+        ".Ih1",
+        ".Ih2"
+};
+const double cnrun::CN_Vars_NeuronECA_d[] = {
+      -53.77902178,    // E
+        0.0262406368,  // prob. for Na channel activation m
+        0.9461831106,  // prob. for not Na channel blocking h
+        0.1135915933,  // prob. for K channel activation n
+        0.08109646237, // Nap
+        0.06918464221, // Ih1 activation
+        0.09815937825  // Ih2 activation
+};
+
+
+
+void
+cnrun::CNeuronECA_d::
+derivative( vector<double>& x, vector<double>& dx)
+{
+        enum TParametersNeuronECA {  // lacks SParametersNeuronEC's gKl and EKl, so derives directly from HH
+                gNa, ENa, gK,  EK, gl, El, Cmem, Idc,
+                gNap, gh,
+                Vh
+        };
+
+      // differential eqn for E, the membrane potential
+        dE(dx) = -((gsl_pow_3( m(x)) * h(x) * P[gNa] + P[gNap] * mNap(x)) * (E(x) - P[ENa]) +
+                   gsl_pow_4( n(x)) * P[gK] * (E(x) - P[EK]) +
+                   P[gh] * (Ih1(x) * 0.65 + Ih2(x) * 0.35) * (E(x) - P[Vh]) +
+                   P[gl] * (E(x) - P[El]) - (Isyn(x) + P[Idc]) + 2.85) / P[Cmem];
+
+        double _a, _b;
+      // diferential eqn for m, the probability for one Na channel activation particle
+        _a = -0.1 * (E(x) + 23) / expm1( -0.1 * (E(x) + 23));
+        _b =  4.  * exp( -(E(x) + 48) / 18);
+        dm(dx) = _a * (1. - m(x)) - _b * m(x);
+
+      // differential eqn for h, the probability for the Na channel blocking particle to be absent
+        _a = 0.07 * exp( -(E(x) + 37.0) / 20.0);
+        _b = 1. / (exp( -0.1 * (E(x) + 7.)) + 1.0);
+        dh(dx) = _a * (1.0 - h(x)) - _b * h(x);
+
+      // differential eqn for n, the probability for one K channel activation particle
+        _a = -0.01  * (E(x) + 27) / expm1( -0.1 * (E(x) + 27));
+        _b =  0.125 * exp( -(E(x) + 37) / 80);
+        dn(dx) = _a * (1.0 - n(x)) - _b * n(x);
+
+        _a = 1. / (0.15 * (1 + exp( -(E(x) + 38) / 6.5)));
+        _b = exp( -(E(x) + 38) / 6.5) / (0.15 * (1 + exp( -(E(x) + 38) / 6.5)));
+        dmNap(dx) = _a * (1.0 - mNap(x)) - _b * mNap(x);
+
+      // differential equation for the Ihf activation variable
+        _a = 1. / (1 + exp( (E(x) + 79.2) / 9.78));
+        _b = 0.51 / (exp( (E(x) - 1.7) / 10) + exp( -(E(x) + 340) / 52)) + 1;
+        dIh1(dx) = (_a - Ih1(x)) / _b;
+
+      // differential equation for the Ihs activation variable
+        _a = 1. / (1 + exp( (E(x) + 71.3) / 7.9));
+        _b = 5.6 / (exp( (E(x) - 1.7) / 14) + exp( -(E(x) + 260) / 43)) + 1;
+        dIh2(dx) = (_a - Ih2(x)) / _b;
+}
+
+
+
+
+// =========== oscillators
+
+const char* const cnrun::CN_ParamNames_OscillatorColpitts[] = {
+        "a",
+        "g",
+        "q",
+        "η"
+};
+const char* const cnrun::CN_ParamSyms_OscillatorColpitts[] = {
+        "a",
+        "g",
+        "q",
+        "eta"
+};
+const double cnrun::CN_Params_OscillatorColpitts[] = {
+        1.0,    // a
+        0.0797, // g
+        0.6898, // q
+        6.2723  // eta
+};
+
+
+const char* const cnrun::CN_VarNames_OscillatorColpitts[] = {
+        "x0",
+        "x1",
+        "x2"
+};
+const char* const cnrun::CN_VarSyms_OscillatorColpitts[] = {
+        "x0",
+        "x1",
+        "x2"
+};
+const double cnrun::CN_Vars_OscillatorColpitts[] = {
+        0.02,
+        0.69,
+       -0.53
+};
+
+
+void
+cnrun::COscillatorColpitts::
+derivative( vector<double>& x, vector<double>& dx)
+{
+        enum TParametersOscilColpitts {
+                a, g, q,
+                eta
+        };
+
+        dx0(dx) =  P[a]   *  x1(x) + Isyn(x);
+        dx1(dx) = -P[g]   * (x0(x) + x2(x)) - P[q] * x1(x);
+        dx2(dx) =  P[eta] * (x1(x) + 1.0 - exp( -x0(x)));
+//        dx[idx  ] =  p[0] *  x[idx+1] + Isyn;
+//        dx[idx+1] = -p[1] * (x[idx  ] + x[idx+2]) - p[2] * x[idx+1];
+//        dx[idx+2] =  p[3] * (x[idx+1] + 1.0 - exp(-x[idx]));
+}
+
+
+
+
+
+
+/*
+
+const char* const CN_ParamNames_OscillatorLV[] = {
+        "Self inhibition",
+};
+const char* const CN_ParamSyms_OscillatorLV[] = {
+        "rho_ii",
+};
+const double CN_Params_OscillatorLV[] = {
+        1.0,        // 0 - rho_ii: "self inhibition"
+};
+
+
+const char* const CN_VarNames_OscillatorLV[] = {
+        "Membrane potential, " CN_PU_POTENTIAL,
+        "Firing rate"
+};
+const char* const CN_VarSyms_OscillatorLV[] = {
+        "E",
+        "fr"
+};
+const double CN_Vars_OscillatorLV[] = {
+        0.,        // 0 - added a place for E
+        0.1        // 1 - firing rate
+};
+
+
+*/
+
+
+
+
+
+
+
+const char* const cnrun::CN_ParamNames_OscillatorVdPol[] = {
+        "η",
+        "ω²",
+//        "\317\203"
+};
+const char* const cnrun::CN_ParamSyms_OscillatorVdPol[] = {
+        "eta",
+        "omegasq", // omega^2
+//        "sigma"
+};
+const double cnrun::CN_Params_OscillatorVdPol[] = {
+        1.0,        // eta
+        0.1,        // omega^2
+//        0.0        // noise level
+};
+
+const char* const cnrun::CN_VarNames_OscillatorVdPol[] = {
+        "Amplitude",
+        "v"
+};
+const char* const cnrun::CN_VarSyms_OscillatorVdPol[] = {
+        "A",
+        "v"
+};
+const double cnrun::CN_Vars_OscillatorVdPol[] = {
+        0.1,       // amplitude
+        0.0        // internal var
+};
+
+
+//#endif // CN_WANT_MORE_NEURONS
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/hosted-neurons.hh b/upstream/src/libcnrun/hosted-neurons.hh
new file mode 100644
index 0000000..d77e2b5
--- /dev/null
+++ b/upstream/src/libcnrun/hosted-neurons.hh
@@ -0,0 +1,358 @@
+/*
+ *       File name:  libcn/hosted-neurons.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ * Initial version:  2008-10-11
+ *
+ *         Purpose:  hosted neuron classes (those having their
+ *                   state vars on parent model's integration vectors)
+ *
+ *         License:  GPL-2+
+ */
+
+#ifndef CNRUN_LIBCN_HOSTEDNEURONS_H_
+#define CNRUN_LIBCN_HOSTEDNEURONS_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <gsl/gsl_math.h>
+
+#include "forward-decls.hh"
+#include "base-neuron.hh"
+#include "hosted-attr.hh"
+
+namespace cnrun {
+
+enum class TIncludeOption { is_last, is_notlast, };
+
+class C_HostedNeuron
+  : public C_BaseNeuron, public C_HostedAttributes {
+
+        DELETE_DEFAULT_METHODS (C_HostedNeuron)
+
+    protected:
+        C_HostedNeuron (TUnitType intype, const string& inlabel,
+                        double x, double y, double z,
+                        CModel*, int s_mask,
+                        TIncludeOption include_option);
+    public:
+        void reset_vars();
+        double &var_value( size_t);
+        const double &get_var_value( size_t) const;
+};
+
+
+
+
+
+class C_HostedConductanceBasedNeuron
+  : public C_HostedNeuron {
+
+        DELETE_DEFAULT_METHODS (C_HostedConductanceBasedNeuron)
+
+    protected:
+        C_HostedConductanceBasedNeuron (TUnitType intype, const string& inlabel,
+                                        double inx, double iny, double inz,
+                                        CModel* inM, int s_mask,
+                                        TIncludeOption include_option)
+              : C_HostedNeuron (intype, inlabel, inx, iny, inz, inM, s_mask, include_option)
+                {}
+
+    public:
+        double  E() const; // needs access to parent model var vector, defined in model.h
+        double  E( vector<double> &b) const  { return b[idx+0]; }
+        double& dE( vector<double> &b)       { return b[idx+0]; }
+
+        size_t n_spikes_in_last_dt() const;
+
+        void do_detect_spike_or_whatever();
+};
+
+
+
+
+
+// for completeness' sake -- no descendants yet
+class C_HostedRateBasedNeuron
+  : public C_HostedNeuron {
+
+        DELETE_DEFAULT_METHODS (C_HostedRateBasedNeuron)
+
+    protected:
+        C_HostedRateBasedNeuron (TUnitType intype, const string& inlabel,
+                                 double inx, double iny, double inz,
+                                 CModel* inM, int s_mask,
+                                 TIncludeOption include_option)
+              : C_HostedNeuron (intype, inlabel, inx, iny, inz, inM, s_mask, include_option)
+                {}
+
+    public:
+        size_t n_spikes_in_last_dt() const;
+};
+
+
+
+
+
+
+
+
+
+
+// Hodgkin-Huxley classic
+
+class CNeuronHH_d
+  : public C_HostedConductanceBasedNeuron {
+
+        DELETE_DEFAULT_METHODS (CNeuronHH_d)
+
+    public:
+        CNeuronHH_d (const string& inlabel,
+                     double x, double y, double z,
+                     CModel *inM, int s_mask = 0,
+                     TIncludeOption include_option = TIncludeOption::is_last)
+              : C_HostedConductanceBasedNeuron (NT_HH_D, inlabel, x, y, z,
+                                                inM, s_mask, include_option)
+                {}
+
+      // parameters (since gcc 4.4, accessible from within member functions defined outside class definition, gee!)
+        enum {
+                gNa, ENa, gK,  EK, gl, El, Cmem,
+                alpha_m_a,        alpha_m_b,        alpha_m_c,        beta_m_a,        beta_m_b,        beta_m_c,
+                alpha_h_a,        alpha_h_b,        alpha_h_c,        beta_h_a,        beta_h_b,        beta_h_c,
+                alpha_n_a,        alpha_n_b,        alpha_n_c,        beta_n_a,        beta_n_b,        beta_n_c,
+                Idc,
+        };
+
+      // current state
+      // these wrappers mainly for code legibility in derivative(); otherwise, not used
+      // for reporting, CModel accesses vars as V[idx+n]
+        double   m( vector<double>& b) const { return b[idx+1]; }
+        double   h( vector<double>& b) const { return b[idx+2]; }
+        double   n( vector<double>& b) const { return b[idx+3]; }
+        double& dm( vector<double>& b)       { return b[idx+1]; }
+        double& dh( vector<double>& b)       { return b[idx+2]; }
+        double& dn( vector<double>& b)       { return b[idx+3]; }
+
+        void derivative( vector<double>&, vector<double>&) __attribute__ ((hot));
+};
+
+
+
+
+
+
+
+class CNeuronHH2_d
+  : public C_HostedConductanceBasedNeuron {
+
+        DELETE_DEFAULT_METHODS (CNeuronHH2_d)
+
+    public:
+        CNeuronHH2_d (const string& inlabel,
+                      double x, double y, double z,
+                      CModel *inM, int s_mask = 0,
+                      TIncludeOption include_option = TIncludeOption::is_last)
+              : C_HostedConductanceBasedNeuron( NT_HH2_D, inlabel, x, y, z,
+                                                inM, s_mask, include_option)
+                {}
+
+        double   m( vector<double>& b) const { return b[idx+1]; }
+        double   h( vector<double>& b) const { return b[idx+2]; }
+        double   n( vector<double>& b) const { return b[idx+3]; }
+        double& dm( vector<double>& b)       { return b[idx+1]; }
+        double& dh( vector<double>& b)       { return b[idx+2]; }
+        double& dn( vector<double>& b)       { return b[idx+3]; }
+
+        void derivative( vector<double>&, vector<double>&);
+};
+
+
+
+//#ifdef CN_WANT_MORE_NEURONS
+
+// Entorhinal cortex stellate cell
+
+class CNeuronEC_d
+  : public C_HostedConductanceBasedNeuron {
+
+        DELETE_DEFAULT_METHODS (CNeuronEC_d)
+
+    public:
+        CNeuronEC_d( const string& inlabel,
+                     double x, double y, double z,
+                     CModel *inM, int s_mask = 0,
+                     TIncludeOption include_option = TIncludeOption::is_last)
+              : C_HostedConductanceBasedNeuron (NT_EC_D, inlabel, x, y, z,
+                                                inM, s_mask, include_option)
+                {}
+
+        double    m   ( vector<double>& b) const { return b[idx+1]; }
+        double    h   ( vector<double>& b) const { return b[idx+2]; }
+        double    n   ( vector<double>& b) const { return b[idx+3]; }
+        double    Ih1 ( vector<double>& b) const { return b[idx+4]; }
+        double    Ih2 ( vector<double>& b) const { return b[idx+5]; }
+        double&   dm  ( vector<double>& b)       { return b[idx+1]; }
+        double&   dh  ( vector<double>& b)       { return b[idx+2]; }
+        double&   dn  ( vector<double>& b)       { return b[idx+3]; }
+        double& dIh1  ( vector<double>& b)       { return b[idx+4]; }
+        double& dIh2  ( vector<double>& b)       { return b[idx+5]; }
+
+
+        void derivative( vector<double>&, vector<double>&);
+};
+
+
+
+
+
+
+class CNeuronECA_d
+  : public C_HostedConductanceBasedNeuron {
+
+        DELETE_DEFAULT_METHODS (CNeuronECA_d)
+
+    public:
+        CNeuronECA_d( const string& inlabel,
+                      double x, double y, double z,
+                      CModel *inM, int s_mask = 0,
+                      TIncludeOption include_option = TIncludeOption::is_last)
+              : C_HostedConductanceBasedNeuron( NT_ECA_D, inlabel, x, y, z,
+                                                inM, s_mask, include_option)
+                {}
+
+        double      m( vector<double>& b) const { return b[idx+1]; }
+        double      h( vector<double>& b) const { return b[idx+2]; }
+        double      n( vector<double>& b) const { return b[idx+3]; }
+        double   mNap( vector<double>& b) const { return b[idx+4]; }
+        double    Ih1( vector<double>& b) const { return b[idx+5]; }
+        double    Ih2( vector<double>& b) const { return b[idx+6]; }
+
+        double&    dm( vector<double>& b)       { return b[idx+1]; }
+        double&    dh( vector<double>& b)       { return b[idx+2]; }
+        double&    dn( vector<double>& b)       { return b[idx+3]; }
+        double& dmNap( vector<double>& b)       { return b[idx+4]; }
+        double&  dIh1( vector<double>& b)       { return b[idx+5]; }
+        double&  dIh2( vector<double>& b)       { return b[idx+6]; }
+
+        void derivative( vector<double>&, vector<double>&);
+};
+
+//#endif  // CN_WANT_MORE_NEURONS
+
+
+
+
+
+
+
+
+//#ifdef CN_WANT_MORE_NEURONS
+
+class COscillatorColpitts
+  : public C_HostedConductanceBasedNeuron {
+
+        DELETE_DEFAULT_METHODS (COscillatorColpitts)
+
+    public:
+        COscillatorColpitts( const string& inlabel,
+                             double x, double y, double z,
+                             CModel *inM, int s_mask = 0,
+                             TIncludeOption include_option = TIncludeOption::is_last)
+              : C_HostedConductanceBasedNeuron (NT_COLPITTS, inlabel, x, y, z,
+                                                inM, s_mask, include_option)
+                {}
+
+        double   x0( vector<double>& b) const { return b[idx+0]; }  // there's no E() for this one
+        double   x1( vector<double>& b) const { return b[idx+1]; }
+        double   x2( vector<double>& b) const { return b[idx+2]; }
+        double& dx0( vector<double>& b)       { return b[idx+0]; }
+        double& dx1( vector<double>& b)       { return b[idx+1]; }
+        double& dx2( vector<double>& b)       { return b[idx+2]; }
+
+        virtual void derivative( vector<double>&, vector<double>&);
+};
+
+
+
+
+
+
+
+/*
+// ne marche pas
+
+class COscillatorLV
+  : public C_HostedConductanceBasedNeuron {
+
+    public:
+        double   fr( vector<double>& b) const        { return b[idx+1]; }
+        double& dfr( vector<double>& b)                { return b[idx+1]; }
+
+        COscillatorLV( const char *inlabel,
+                       double x, double y, double z,
+                       CModel *inM, int s_mask = 0,
+                       CModel::TIncludeOption include_option = true)
+              : C_HostedConductanceBasedNeuron( NT_LV, inlabel, x, y, z,
+                                                inM, s_mask, include_option)
+                {}
+
+        enum TParametersOscilLV {
+                rho
+        };
+        void derivative( vector<double>& x, vector<double>& dx)
+                {
+                        dE(dx) = fr(x) * (1.0 - P[rho] * fr(x)) - Isyn(x);
+                }
+};
+
+
+*/
+
+
+
+
+class COscillatorVdPol
+  : public C_HostedConductanceBasedNeuron {
+
+        DELETE_DEFAULT_METHODS (COscillatorVdPol)
+
+     public:
+        COscillatorVdPol (const string& inlabel,
+                          double x, double y, double z,
+                          CModel *inM, int s_mask = 0,
+                          TIncludeOption include_option = TIncludeOption::is_last)
+              : C_HostedConductanceBasedNeuron (NT_VDPOL, inlabel, x, y, z,
+                                                inM, s_mask, include_option)
+                {}
+
+        double   amp( vector<double>& b) const  { return b[idx+0]; }
+        double    _x( vector<double>& b) const  { return b[idx+1]; }
+        double& damp( vector<double>& b)        { return b[idx+0]; }
+        double&  d_x( vector<double>& b)        { return b[idx+1]; }
+
+        enum TParametersOscilVdPol {
+                eta, omega2
+        };
+        void derivative( vector<double> &x, vector<double> &dx)
+                {
+                        damp(dx) = _x(x);
+                        d_x(dx) = (P[eta] - gsl_pow_2( amp(x))) * _x(x) - P[omega2] * amp(x) + Isyn(x);
+                }
+};
+
+//#endif  // CN_WANT_MORE_NEURONS
+
+}
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/hosted-synapses.cc b/upstream/src/libcnrun/hosted-synapses.cc
new file mode 100644
index 0000000..8a57542
--- /dev/null
+++ b/upstream/src/libcnrun/hosted-synapses.cc
@@ -0,0 +1,351 @@
+/*
+ *       File name:  libcn/hosted-synapses.cc
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ * Initial version:  2009-04-03
+ *
+ *         Purpose:  hosted synapse classes (those having their
+ *                   state vars on parent model's integration vectors)
+ *
+ *         License:  GPL-2+
+ */
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <iostream>
+
+#include "hosted-synapses.hh"
+
+#include "types.hh"
+#include "model.hh"
+
+using namespace std;
+
+
+// the base synapse here
+cnrun::C_HostedSynapse::
+C_HostedSynapse (const TUnitType type_,
+                 C_BaseNeuron *source_, C_BaseNeuron *target_,
+                 const double g_, CModel *M_, int s_mask,
+                 TIncludeOption include_option)
+      : C_BaseSynapse (type_, source_, target_, g_, M_, s_mask),
+        C_HostedAttributes()
+{
+        if ( M )
+                M->include_unit( this, include_option);
+        else
+                idx = (unsigned long)-1;
+}
+
+
+
+
+
+// -- parameters
+
+const char* const cnrun::CN_ParamNames_SynapseAB_dd[] = {
+//        "Synaptic strength g, " CN_PU_CONDUCTANCE,
+        "Reversal potential Esyn, " CN_PU_POTENTIAL,
+        "Presyn threshold potential Epre, " CN_PU_POTENTIAL,
+        "Rise rate α, " CN_PU_RATE,
+        "Decay rate β, " CN_PU_RATE,
+        "Time of transmitter release, " CN_PU_TIME,
+//        "Noise level \317\203",
+};
+const char* const cnrun::CN_ParamSyms_SynapseAB_dd[] = {
+//        "gsyn",
+        "Esyn",
+        "Epre",
+        "alpha",
+        "beta",
+        "trel",
+//        "sigma",
+};
+
+const double cnrun::CN_Params_SynapseAB_dd[] = {
+//        0.12,
+        0,
+      -20,
+        0.5,
+        0.05,
+        5.0,
+//        0.
+};
+
+const double cnrun::CN_Params_SynapseABMinus_dd[] = {
+//        0.12,
+        0,
+      -20,
+        0.27785150819749,
+        0.05,
+        5.0,
+//        0.
+};
+
+const double cnrun::CN_Params_SynapseMxAB_dd[] = {
+//        0.12,
+        0,
+      -20,
+        0.27785150819749,  // the only parameter differing from its AB namesake,
+                           // which is also by principle the same as in the ABMinus variation
+        0.05,
+        5.0,
+//        0.
+};
+
+
+const char* const cnrun::CN_ParamNames_SynapseAB_dr[] = {
+//        "Synaptic strength g, " CN_PU_CONDUCTANCE,
+        "Assumed (target->E - Esyn), " CN_PU_POTENTIAL,
+        "Presyn threshold potential Epre, " CN_PU_POTENTIAL,
+        "Rise rate α, " CN_PU_RATE,
+        "Decay rate β, " CN_PU_RATE,
+        "Time of transmitter release, " CN_PU_TIME,
+//        "Noise level \317\203",
+};
+const char* const cnrun::CN_ParamSyms_SynapseAB_dr[] = {
+//        "gsyn",
+        "Ediff",
+        "Epre",
+        "alpha",
+        "beta",
+        "trel",
+//        "sigma",
+};
+
+
+const double cnrun::CN_Params_SynapseMxAB_dr[] = {
+//        0.12,
+      -60 - 0,  // Ediff: a reasonable Esyn - target->E, the latter being -60 mV at rest
+      -20,
+        0.27785150819749,
+        0.05,
+        5.0,
+//        0.
+};
+
+
+
+
+
+
+
+const char* const cnrun::CN_ParamNames_SynapseAB_rr[] = {
+//        "Synaptic strength g, " CN_PU_CONDUCTANCE,
+        "Assumed (target->E - Esyn), " CN_PU_VOLTAGE,
+        "Rise rate α, " CN_PU_RATE,
+        "Decay rate β, " CN_PU_RATE,
+        "Refractory period T, " CN_PU_TIME,
+//        "Noise level \317\203",
+};
+const char* const cnrun::CN_ParamSyms_SynapseAB_rr[] = {
+//        "gsyn",
+        "Ediff",
+        "alpha",
+        "beta",
+        "T",
+//        "sigma",
+};
+const double cnrun::CN_Params_SynapseAB_rr[] = {
+//        0.12,
+      -60 - 0,
+        0.27785150819749,
+        0.05,
+        5,
+//        0.
+};
+
+
+
+const char* const cnrun::CN_ParamNames_SynapseRall_dd[] = {
+//        "Synaptic strength g, " CN_PU_CONDUCTANCE,
+        "Reversal potential, " CN_PU_POTENTIAL,
+        "Presynaptic threshold potential, " CN_PU_POTENTIAL,
+        "τ, " CN_PU_RATE,
+//        "Noise level \317\203",
+};
+const char* const cnrun::CN_ParamSyms_SynapseRall_dd[] = {
+//        "gsyn",
+        "Esyn",
+        "Epre",
+        "tau",
+//        "sigma",
+};
+const double cnrun::CN_Params_SynapseRall_dd[] = {
+//        0.12,
+        0,
+      -20,
+        2,
+//        0.
+};
+
+
+
+
+// -- variables
+
+const char* const cnrun::CN_VarNames_SynapseAB[] = {
+        "Amount of neurotransmitter released S"
+};
+const char* const cnrun::CN_VarSyms_SynapseAB[] = {
+        "S"
+};
+const double cnrun::CN_Vars_SynapseAB[] = {
+        0.
+};
+
+
+const char* const cnrun::CN_VarNames_SynapseRall[] = {
+        "Amount of neurotransmitter released S",
+        "Amount of neurotransmitter absorbed R",
+};
+const char* const cnrun::CN_VarSyms_SynapseRall[] = {
+        "S",
+        "R",
+};
+const double cnrun::CN_Vars_SynapseRall[] = {
+        0.,
+        0.
+};
+
+
+
+
+
+
+
+void
+cnrun::CSynapseAB_dd::
+derivative( vector<double>& x, vector<double>& dx)
+{
+        if ( x[0] - t_last_release_started <= P[_rtime_] ) {
+              // continue release from an old spike
+                dS(dx) = P[_alpha_] * (1 - S(x)) - P[_beta_] * S(x);
+        } else
+                if ( _source->E(x) > P[_Epre_] ) {
+                      // new spike ... start releasing
+                        t_last_release_started = x[0];
+                        dS(dx) = P[_alpha_] * (1 - S(x)) - P[_beta_] * S(x);
+                } else {
+                      // no release
+                        dS(dx) = -P[_beta_] * S(x);
+                }
+}
+
+
+
+
+void
+cnrun::CSynapseABMinus_dd::
+derivative( vector<double>& x, vector<double>& dx)
+{
+        if ( x[0] - t_last_release_started <= P[_rtime_] ) {
+              // continue release from an old spike
+                dS(dx) = P[_alpha_] * 1 - P[_beta_] * S(x);
+        } else
+                if ( _source->E(x) > P[_Epre_] ) {
+                      // new spike ... start releasing
+                        t_last_release_started = x[0];
+                        dS(dx) = P[_alpha_] * 1 - P[_beta_] * S(x);
+                } else {
+                      // no release
+                        dS(dx) = -P[_beta_] * S(x);
+                }
+}
+
+
+
+
+// -------- Multiplexing AB
+
+void
+cnrun::CSynapseMxAB_dd::
+derivative( vector<double>& x, vector<double>& dx)
+{
+//        printf( "%s %lu %d %g\n", _source->label, _source->serial_id, _source->idx, _source->E(x));
+
+        if ( q() > 0 ) {
+                size_t effective_q = q();
+              // as we nudge along a little within RK's operational
+              // dt, some spikes can expire in that brief while:
+              // decrement q then, just for this while
+                while ( effective_q  &&  M->model_time(x) - _kq[q()-effective_q] > P[_rtime_] )
+                        --effective_q;
+#ifdef CN_MORECODE__
+                if ( effective_q < q() )
+                        M->vp( 6, "YMxAB %s smacks %zu spike(s) of %zu at %g(+%g)\n", label,
+                               (size_t)q() - effective_q, (size_t)q(),
+                               M->model_time(),
+                               M->model_time(x) - M->model_time());
+#endif
+                dS(dx) = P[_alpha_] * effective_q - P[_beta_] * S(x);
+        } else
+              // no release, decay
+                dS(dx) = -P[_beta_] * S(x);
+}
+
+
+
+void
+cnrun::CSynapseMxAB_dd::
+update_queue()
+{
+        size_t k = _source -> n_spikes_in_last_dt();
+        while ( k-- )
+                _kq.push_back( model_time());
+
+      // see if the oldest spike has gone past synapse release time
+      // disregard spike duration, measure time from saved spike_start
+      // (which is == spike_end)
+        while ( true ) {
+                if ( q() > 0 && model_time() - _kq.front() > P[_rtime_] )
+                        _kq.erase( _kq.begin());
+                else
+                        break;
+        }
+}
+
+
+
+
+
+
+
+
+
+
+void
+cnrun::CSynapseAB_rr::
+derivative( vector<double>& x, vector<double>& dx)
+{
+        // if ( source()->F(x) > 0 )
+        //         printf( "%s->F(x) = %g\n", _source->label, source()->F(x));
+        dS(dx) = -P[_beta_] * S(x)
+                + P[_alpha_] * _numerator / (exp( P[_beta_] / source()->F(x)) + 1);
+}
+
+
+
+
+
+
+
+inline int Heaviside( double val)  { return (val >= 0) ? 1 : 0; }
+
+void
+cnrun::CSynapseRall_dd::
+derivative( vector<double>& x, vector<double>& dx)
+{
+        dR(dx) = 1 / P[_tau_] * (-R(x) + Heaviside( _source->E(x) - P[_Epre_]));
+        dS(dx) = 1 / P[_tau_] * (-S(x) + R(x));
+}
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/hosted-synapses.hh b/upstream/src/libcnrun/hosted-synapses.hh
new file mode 100644
index 0000000..db2cc59
--- /dev/null
+++ b/upstream/src/libcnrun/hosted-synapses.hh
@@ -0,0 +1,318 @@
+/*
+ *       File name:  libcn/hosted-synapses.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ * Initial version:  2009-04-01
+ *
+ *         Purpose:  hosted synapse classes (those having their
+ *                   state vars on parent model's integration vectors)
+ *
+ *         License:  GPL-2+
+ */
+
+#ifndef CNRUN_LIBCN_HOSTEDSYNAPSES_H_
+#define CNRUN_LIBCN_HOSTEDSYNAPSES_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <vector>
+#include <queue>
+#include <cfloat>
+
+#include "base-synapse.hh"
+#include "hosted-attr.hh"
+#include "mx-attr.hh"
+#include "hosted-neurons.hh"
+#include "standalone-neurons.hh"
+
+
+using namespace std;
+
+namespace cnrun {
+
+class C_HostedSynapse
+  : public C_BaseSynapse, public C_HostedAttributes {
+
+        DELETE_DEFAULT_METHODS (C_HostedSynapse)
+
+    protected:
+        C_HostedSynapse (TUnitType type_,
+                         C_BaseNeuron *source_, C_BaseNeuron *target_,
+                         double g_, CModel*, int s_mask = 0,
+                         TIncludeOption = TIncludeOption::is_last);
+    public:
+        void reset_vars();
+        double &var_value( size_t);
+        const double &get_var_value( size_t) const;
+
+        double   S() const; // needs access to parent model var vector, defined in model.h
+        double   S( vector<double> &b) const        { return b[idx+0]; }
+        double& dS( vector<double> &b) const        { return b[idx+0]; }
+};
+
+
+
+
+// Note on synapses classification per source/target being a tonic
+// (rate) or phasic (discrete) unit:
+//
+// * Where a synapse connects _to_ a Rate neuron, it will have Ediff
+//   in lieu of Esyn and compute Isyn accordingly, otherwise inheriting
+//   its parameters.
+//
+// * Where a synapse connects _from_ a Rate unit, its derivative
+//   method follows a completely different equation.  It now has a
+//   different set of parameters, too.
+
+// The suffix in a class name, _xy, means x, source, y, target, with
+// `d' for discrete, `r' for rate.
+
+
+// The `multiplexing' part has a relevance for the source of the
+// synapse, with magic to collect and multiplex more than a single
+// spike per dt.
+//
+// * The source is a specialized (`dot'), and inherently phasic, unit.
+// * All parameters are inherited from the base class.
+
+
+// Alpha-Beta family
+
+class CSynapseAB_dd
+  : public C_HostedSynapse {
+
+        DELETE_DEFAULT_METHODS (CSynapseAB_dd)
+
+    public:
+        CSynapseAB_dd (C_BaseNeuron *insource, C_BaseNeuron *intarget,
+                       double ing, CModel *inM, int s_mask = 0,
+                       TIncludeOption include_option = TIncludeOption::is_last,
+                       TUnitType alt_type = YT_AB_DD)
+              : C_HostedSynapse (alt_type, insource, intarget,
+                                 ing, inM, s_mask, include_option)
+                {}
+
+        enum {
+                _Esyn_, _Epre_, _alpha_, _beta_, _rtime_
+        };
+
+        double Isyn( const C_BaseNeuron &with_neuron, double g) const  __attribute__ ((hot))
+                {
+                        return -g * S() * (with_neuron.E() - P[_Esyn_]);
+//                        return -P[_gsyn_] * S() * (_target->E() - P[_Esyn_]);
+                }
+        double Isyn( vector<double>& b, const C_BaseNeuron &with_neuron, double g) const  __attribute__ ((hot))
+                {
+                        return -g * S(b) * (with_neuron.E(b) - P[_Esyn_]);
+//                        return -P[_gsyn_] * S(b) * (_target->E(b) - P[_Esyn_]);
+                }
+
+        void derivative( vector<double>&, vector<double>&)  __attribute__ ((hot));
+};
+
+
+class CNeuronHHRate;
+
+// TODO
+class CSynapseAB_dr;
+class CSynapseAB_rd;
+
+
+class CSynapseAB_rr
+  : public C_HostedSynapse {
+
+        DELETE_DEFAULT_METHODS (CSynapseAB_rr)
+
+    public:
+        CSynapseAB_rr (C_BaseNeuron *insource, C_BaseNeuron *intarget,
+                       double ing, CModel *inM, int s_mask = 0,
+                       TIncludeOption include_option = TIncludeOption::is_last,
+                       TUnitType alt_type = YT_AB_RR)
+              : C_HostedSynapse( alt_type, insource, intarget,
+                                 ing, inM, s_mask, include_option)
+                {}
+
+        enum {
+                _Ediff_, _alpha_, _beta_, _T_, _sigma_
+        };
+
+      // supply own Isyn to avoid referencing target->E
+        double Isyn( const C_BaseNeuron &with_neuron, double g) const
+                {
+                        return -g * S() * P[_Ediff_];
+                }
+        double Isyn( vector<double>& x, const C_BaseNeuron &with_neuron, double g) const
+                {
+                        return -g * S(x) * P[_Ediff_];
+                }
+
+        void derivative( vector<double>&, vector<double>&);
+
+        void param_changed_hook()
+                {
+                        _numerator = exp( P[_beta_] * P[_T_]) + 1;
+                }
+    private:
+        double  _numerator;
+};
+
+
+
+
+
+
+
+
+class CSynapseMxAB_dd
+  : public CSynapseAB_dd, public C_MultiplexingAttributes {
+
+        DELETE_DEFAULT_METHODS (CSynapseMxAB_dd)
+
+    public:
+        CSynapseMxAB_dd (C_BaseNeuron *insource, C_BaseNeuron *intarget,
+                         double ing, CModel *inM, int s_mask = 0,
+                         TIncludeOption include_option = TIncludeOption::is_last,
+                         TUnitType alt_type = YT_MXAB_DD)
+              : CSynapseAB_dd (insource, intarget,
+                               ing, inM, s_mask, include_option,
+                               alt_type)
+                {}
+
+        void reset_state()
+                {
+                        C_HostedSynapse::reset_state();
+                        C_MultiplexingAttributes::reset();
+                }
+
+      // because Mx*'s synapse source is always a standalone, non-integratable neuron,
+      // which don't propagate vars onto M->V, we fold S(x) to make the actual S value available
+      // from within the integrator
+        double S() const                        { return C_HostedSynapse::S(); }
+        double S( vector<double> &unused) const { return C_HostedSynapse::S(); }
+
+        void derivative( vector<double>&, vector<double>&)  __attribute__ ((hot));
+
+    private:
+        friend class CModel;
+        void update_queue();
+};
+
+
+
+
+
+class CSynapseMxAB_dr
+  : public CSynapseMxAB_dd {
+
+        DELETE_DEFAULT_METHODS (CSynapseMxAB_dr)
+
+    public:
+        CSynapseMxAB_dr (C_BaseNeuron *insource, C_BaseNeuron *intarget,
+                         double ing, CModel *inM, int s_mask = 0,
+                         TIncludeOption include_option = TIncludeOption::is_last)
+              : CSynapseMxAB_dd (insource, intarget,
+                                 ing, inM, s_mask, include_option,
+                                 YT_MXAB_DR)
+                {}
+
+        enum { _Ediff_, /* ... */ };
+        double Isyn( const C_BaseNeuron &with_neuron, double g) const
+                {
+                        return -g * S() * P[_Ediff_];
+                }
+        double Isyn( vector<double>& unused, const C_BaseNeuron &with_neuron, double g) const
+                {
+                        return -g * S() * P[_Ediff_];
+                }
+};
+
+
+
+
+
+
+
+
+
+class CSynapseABMinus_dd
+  : public CSynapseAB_dd {
+
+        DELETE_DEFAULT_METHODS (CSynapseABMinus_dd)
+
+    public:
+        CSynapseABMinus_dd (C_BaseNeuron *insource, C_BaseNeuron *intarget,
+                            double ing, CModel *inM, int s_mask = 0,
+                            TIncludeOption include_option = TIncludeOption::is_last)
+              : CSynapseAB_dd (insource, intarget,
+                               ing, inM, s_mask, include_option,
+                               YT_ABMINUS_DD)
+                {}
+
+        enum {
+                _Esyn_, _Epre_, _alpha_, _beta_, _rtime_, _sigma_
+        };
+
+        void derivative( vector<double>&, vector<double>&);
+};
+
+
+// TODO
+class CSynapseABMinus_dr;
+class CSynapseABMinus_rd;
+class CSynapseABMinus_rr;
+
+
+
+
+// Rall
+
+class CSynapseRall_dd
+  : public C_HostedSynapse {
+
+        DELETE_DEFAULT_METHODS (CSynapseRall_dd)
+
+    public:
+        CSynapseRall_dd (C_BaseNeuron *insource, C_BaseNeuron *intarget,
+                         double ing, CModel *inM, int s_mask = 0,
+                         TIncludeOption include_option = TIncludeOption::is_last)
+              : C_HostedSynapse (YT_RALL_DD, insource, intarget,
+                                 ing, inM, s_mask, include_option)
+                {}
+
+        double&  R( vector<double>& b)        { return b[idx+1]; }
+        double& dR( vector<double>& b)        { return b[idx+1]; }
+
+        enum {
+                _Esyn_, _Epre_, _tau_, _sigma_
+        };
+
+        double Isyn( const C_BaseNeuron &with_neuron, double g) const
+                {
+                        return -g * S() * (with_neuron.E() - P[_Esyn_]);
+                }
+        double Isyn( vector<double>&b, const C_BaseNeuron &with_neuron, double g) const
+                {
+                        return -g * S(b) * (with_neuron.E(b) - P[_Esyn_]);
+                }
+
+        void derivative( vector<double>&, vector<double>&);
+};
+
+// TODO
+class CSynapseRall_rd;
+class CSynapseRall_dr;
+class CSynapseRall_rr;
+
+}
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/integrate-base.hh b/upstream/src/libcnrun/integrate-base.hh
new file mode 100644
index 0000000..e4d6399
--- /dev/null
+++ b/upstream/src/libcnrun/integrate-base.hh
@@ -0,0 +1,64 @@
+/*
+ *       File name:  libcn/integrate-base.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny
+ * Initial version:  2008-09-23
+ *
+ *         Purpose:  base class for integrators, to be plugged into CModel.
+ *
+ *         License:  GPL-2+
+ */
+
+#ifndef CNRUN_LIBCN_INTEGRATE_BASE_H_
+#define CNRUN_LIBCN_INTEGRATE_BASE_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include "libstilton/lang.hh"
+#include "forward-decls.hh"
+
+
+namespace cnrun {
+
+class CIntegrate_base {
+
+        DELETE_DEFAULT_METHODS (CIntegrate_base)
+
+    public:
+        double  _dt_min, _dt_max, _dt_cap,
+                _eps, _eps_abs, _eps_rel,
+                dt;  // that which is current
+
+        bool    is_owned;
+
+        CModel *model;
+
+        CIntegrate_base (const double& dt_min, const double& dt_max, const double& dt_cap,
+                         const double& eps, const double& eps_abs, const double& eps_rel,
+                         bool inis_owned)
+              : _dt_min (dt_min), _dt_max (dt_max), _dt_cap (dt_cap),
+                _eps (eps), _eps_abs (eps_abs), _eps_rel (eps_rel),
+                dt (dt_min),
+                is_owned (inis_owned)
+                {}
+        virtual ~CIntegrate_base()
+                {}
+
+        virtual void cycle() = 0;
+        virtual void fixate() = 0;
+        virtual void prepare() = 0;
+};
+
+}
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/integrate-rk65.hh b/upstream/src/libcnrun/integrate-rk65.hh
new file mode 100644
index 0000000..ae1d033
--- /dev/null
+++ b/upstream/src/libcnrun/integrate-rk65.hh
@@ -0,0 +1,59 @@
+/*
+ *       File name:  libcn/integrate-rk65.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny
+ * Initial version:  2008-09-23
+ *
+ *         Purpose:  A Runge-Kutta 6-5 integrator.
+ *
+ *         License:  GPL-2+
+ */
+
+#ifndef CNRUN_LIBCN_INTEGRATERK65_H_
+#define CNRUN_LIBCN_INTEGRATERK65_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <vector>
+#include "libstilton/lang.hh"
+#include "forward-decls.hh"
+#include "integrate-base.hh"
+
+using namespace std;
+
+namespace cnrun {
+
+class CIntegrateRK65
+  : public CIntegrate_base {
+
+        DELETE_DEFAULT_METHODS (CIntegrateRK65)
+
+    public:
+        CIntegrateRK65 (double dt_min_ = 1e-6, double dt_max_ = .5, double dt_cap_ = 5,
+                        double eps_ = 1e-8,  double eps_abs_ = 1e-12, double eps_rel_ = 1e-6,
+                        bool is_owned_ = true)
+              : CIntegrate_base (dt_min_, dt_max_, dt_cap_,
+                                 eps_, eps_abs_, eps_rel_, is_owned_)
+                {}
+
+        void cycle() __attribute__ ((hot));
+        void fixate() __attribute__ ((hot));
+        void prepare();
+
+    private:
+        vector<double> Y[9], F[9], y5;
+};
+
+}
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/model-cycle.cc b/upstream/src/libcnrun/model-cycle.cc
new file mode 100644
index 0000000..5610f48
--- /dev/null
+++ b/upstream/src/libcnrun/model-cycle.cc
@@ -0,0 +1,561 @@
+/*
+ *       File name:  cnrun/model-cycle.cc
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ * Initial version:  2008-08-02
+ *
+ *         Purpose:  CModel top cycle
+ *
+ *         License:  GPL-2+
+ */
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <ctime>
+#include <cstdlib>
+#include <iostream>
+
+#include "libstilton/lang.hh"
+#include "integrate-rk65.hh"
+#include "model.hh"
+
+
+using namespace std;
+
+
+/*--------------------------------------------------------------------------
+  Implementation of a 6-5 Runge-Kutta method with adaptive time step
+  mostly taken from the book "The numerical analysis of ordinary differential
+  equations - Runge-Kutta and general linear methods" by J.C. Butcher, Wiley,
+  Chichester, 1987 and a free adaptation to a 6 order Runge Kutta method
+  of an ODE system with additive white noise
+--------------------------------------------------------------------------*/
+
+inline namespace {
+
+double __Butchers_a[9][8] = {
+        { },
+        { 1./9 },
+        { .5/9,        .5/9 },
+        { 0.416666666666667e-1,        0., 0.125 },
+        { 1./6, 0., -0.5, 2./3 },
+        { 0.1875e+1, 0., -0.7875e+1, 0.7e+1, -0.5 },
+        { -0.4227272727272727e+1, 0., 0.176995738636364e+2, -0.142883522727273e+2, 0.522017045454545, 0.104403409090909e+1 },
+        { 0.840622673179752e+1, 0., -0.337303717185049e+2, 0.271460231129622e+2, 0.342046929709216, -0.184653767923258e+1, 0.577349465373733 },
+        { 0.128104575163399, 0., 0., -0.108433734939759, 0.669375, -0.146666666666667, 0.284444444444444, 0.173176381998583 },
+};
+
+
+double __Butchers_b[9] = {
+        0.567119155354449e-1,
+        0.,
+        0.,
+        0.210909572355356,
+        0.141490384615385,
+        0.202051282051282,
+        0.253186813186813,
+        0.843679809736684e-1,
+        0.512820512820513e-1
+};
+} // inline namespace
+
+
+
+void
+cnrun::CIntegrateRK65::
+prepare()
+{
+        for ( unsigned short i = 0; i < 9; ++i ) {
+                Y[i].resize( model->_var_cnt);
+                F[i].resize( model->_var_cnt);
+        }
+        y5.resize( model->_var_cnt);
+
+        if ( model->n_standalone_units() > 0 )
+                if ( _dt_max > model->_discrete_dt ) {
+                        _dt_max = model->_discrete_dt;
+                        model->vp( 1, "CIntegrateRK65: Set dt_max to model->discrete_dt: %g\n", _dt_max);
+                }
+}
+
+
+void
+__attribute__ ((hot))
+cnrun::CIntegrateRK65::
+cycle()
+{
+      // omp stuff found inapplicable due to considerable overhead in sys time
+      // (thread creation)
+        unsigned int i, j, k;
+
+        double  aF;
+
+      // calculate iterative terms rk65_Y[__i] and rk65_F[__i] (to sixth order)
+        for ( i = 0; i < 9; ++i ) {
+//#pragma omp parallel for schedule(static,model->_var_cnt/2+1) firstprivate(aF,j,i)
+                for ( k = 0; k < model->_var_cnt; ++k ) {
+                        aF = 0.0;
+                        for ( j = 0; j < i; ++j )
+                                aF += __Butchers_a[i][j] * F[j][k];
+                        Y[i][k] = model->V[k] + dt * aF;
+                }
+              // see to this vector's dt
+                F[i][0] = 1.;
+
+//#pragma omp consider...
+                for ( auto& N : model->hosted_neurons )
+                        N -> derivative( Y[i], F[i]);
+                for ( auto& S : model->hosted_synapses )
+                        S -> derivative( Y[i], F[i]);
+        }
+
+      // sum up Y[i] and F[i] to build 5th order scheme -> y5
+//#pragma omp parallel for private(aF,j)
+        for ( k = 0; k < model->_var_cnt; ++k ) {
+                aF = 0.0;
+                for ( j = 0; j < 8; ++j )
+                        aF += __Butchers_a[8][j] * F[j][k];
+                y5[k] = model->V[k] + dt * aF;
+        }
+
+      // sum up Y[i] and F[i] to build 6th order scheme -> W
+//#pragma omp parallel for schedule(static,model->_var_cnt/2+1) private(aF,j)
+        for ( k = 0; k < model->_var_cnt; ++k ) {
+                aF = 0.0;
+                for ( j = 0; j < 9; ++j )
+                        aF += __Butchers_b[j] * F[j][k];
+                model->W[k] = model->V[k] + dt * aF;
+        }
+
+      // kinkiness in synapses causes dt to rocket
+        double  dtx = min( _dt_max, dt * _dt_cap);
+
+      // determine minimal necessary new dt to get error < eps based on the
+      // difference between results in y5 and W
+        double try_eps, delta, try_dt;
+      // exclude time (at index 0)
+//#pragma omp parallel for private(try_eps,delta,try_dtj)
+        for ( k = 1; k < model->_var_cnt; ++k ) {
+                try_eps = max( _eps_abs, min (_eps, abs(_eps_rel * model->W[k])));
+                delta = abs( model->W[k] - y5[k]);
+                if ( delta > DBL_EPSILON * y5[k] ) {
+                        try_dt = exp( (log(try_eps) - log(delta)) / 6) * dt;
+                        if ( try_dt < dtx )
+                                dtx = try_dt;
+                }
+        }
+      // make sure we don't grind to a halt
+        if ( dtx < _dt_min )
+                dtx = _dt_min;
+
+      // set the new step
+        dt = dtx;
+}
+
+
+
+
+
+
+
+
+// -------------- CModel::advance and dependents
+
+unsigned int
+cnrun::CModel::
+advance( const double dist, double * const cpu_time_used_p)
+{
+        if ( units.size() == 0 ) {
+                vp( 1, "Model is empty\n");
+                return 0;
+        }
+        if ( is_ready )
+                prepare_advance();
+
+        bool    have_hosted_units = !!n_hosted_units(),
+                have_standalone_units = !!n_standalone_units(),
+                have_ddtbound_units = !!n_ddtbound_units();
+
+        if ( have_hosted_units && !have_standalone_units && !have_ddtbound_units )
+                return _do_advance_on_pure_hosted( dist, cpu_time_used_p);
+        if ( !have_hosted_units && have_standalone_units && !have_ddtbound_units )
+                return _do_advance_on_pure_standalone( dist, cpu_time_used_p);
+        if ( !have_hosted_units && !have_standalone_units && have_ddtbound_units )
+                return _do_advance_on_pure_ddtbound( dist, cpu_time_used_p);
+
+        unsigned int retval = _do_advance_on_mixed( dist, cpu_time_used_p);
+        return retval;
+}
+
+void
+__attribute__ ((hot))
+cnrun::CModel::
+_setup_schedulers()
+{
+        regular_periods.clear();
+        regular_periods_last_checked.clear();
+        if ( units_with_periodic_sources.size() ) { // determine period(s) at which to wake up reader update loop
+                for ( auto& U : units_with_periodic_sources )
+                        for ( auto& S : U -> _sources )
+                                regular_periods.push_back(
+                                        (reinterpret_cast<CSourcePeriodic*>(S.source)) -> period());
+                regular_periods.unique();
+                regular_periods.sort();
+                regular_periods_last_checked.resize( regular_periods.size());
+        }
+
+        if ( regular_periods.size() > 0 )
+                vp( 2, "%zd timepoint(s) in scheduler_update_periods: %s\n\n",
+                    regular_periods.size(),
+                    stilton::str::join( regular_periods).c_str());
+
+      // ensure all schedulers are effective at the beginning, too
+        for ( auto& U : units_with_periodic_sources )
+                U->apprise_from_sources();
+}
+
+
+void
+cnrun::CModel::
+prepare_advance()
+{
+        if ( options.log_dt && !_dt_logger )
+                _dt_logger = new ofstream( string(name + ".dt").c_str());
+        if ( options.log_spikers && !_spike_logger )
+                _spike_logger = new ofstream( string(name + ".spikes").c_str());
+
+        _setup_schedulers();
+
+        if ( !n_hosted_units() )
+                _integrator->dt = _discrete_dt;
+
+        is_ready = true;
+
+        vp( 5, stderr, "Model prepared\n");
+}
+
+
+
+// comment concerning for_all_conscious_neurons loop:
+// these have no next_time_E or suchlike, have `fixate' implicit herein; also,
+// conscious neurons fire irrespective of whatever happens elsewhere in the model, and
+// they, logically, have no inputs
+
+#define _DO_ADVANCE_COMMON_INLOOP_BEGIN \
+        make_units_with_continuous_sources_apprise_from_sources();      \
+        {                                                               \
+                auto I = regular_periods.begin();                       \
+                auto Ic = regular_periods_last_checked.begin();         \
+                for ( ; I != regular_periods.end(); ++I, ++Ic )         \
+                        if ( unlikely (model_time() >= *I * (*Ic + 1)) ) { \
+                                (*Ic)++;                                \
+                                make_units_with_periodic_sources_apprise_from_sources(); \
+                        }                                               \
+        }                                                               \
+        make_conscious_neurons_possibly_fire();                         \
+                                                                        \
+        for ( auto& Y : multiplexing_synapses ) \
+                if ( Y->_source )                                        \
+                        Y->update_queue();
+
+
+#define _DO_ADVANCE_COMMON_INLOOP_MID \
+        if ( have_listeners ) {                                         \
+                if ( have_discrete_listen_dt ) {                        \
+                        if ( model_time() - last_made_listen >= options.listen_dt ) { \
+                                make_listening_units_tell();            \
+                                last_made_listen += options.listen_dt;  \
+                        }                                               \
+                } else                                                  \
+                        make_listening_units_tell();                    \
+        }                                                               \
+        if ( unlikely (options.log_dt) )                                \
+                (*_dt_logger) << model_time() << "\t" << dt() << endl;  \
+                                                                        \
+        for ( auto& N : spikelogging_neurons ) {                        \
+                N -> do_detect_spike_or_whatever();                     \
+                if ( !is_diskless &&                                    \
+                     N->n_spikes_in_last_dt() &&                        \
+                     options.log_spikers ) {                            \
+                        (*_spike_logger) << model_time() << "\t";       \
+                        if ( options.log_spikers_use_serial_id )        \
+                                (*_spike_logger) << N->_serial_id << endl; \
+                        else                                            \
+                                (*_spike_logger) << N->_label << endl;  \
+                }                                                       \
+        }
+
+
+#define _DO_ADVANCE_COMMON_INLOOP_END \
+        ++_cycle;                                                       \
+        ++steps;                                                        \
+        if ( options.verbosely != 0 ) {                                 \
+                if ( unlikely (((double)(clock() - cpu_time_lastchecked)) / CLOCKS_PER_SEC > 2) ) { \
+                        cpu_time_lastchecked = clock();                 \
+                        if ( options.display_progress_percent && !options.display_progress_time ) \
+                                fprintf( stderr, "\r\033[%dC%4.1f%%\r", \
+                                         (options.verbosely < 0) ? -(options.verbosely+1)*8 : 0, \
+                                         100 - (model_time() - time_ending) / (time_started - time_ending) * 100); \
+                        else if ( options.display_progress_time && !options.display_progress_percent ) \
+                                fprintf( stderr, "\r\033[%dC%'6.0fms\r", \
+                                         (options.verbosely < 0) ? -(options.verbosely+1)*16 : 0, \
+                                         model_time());                 \
+                        else if ( options.display_progress_percent && options.display_progress_time ) \
+                                fprintf( stderr, "\r\033[%dC%'6.0fms %4.1f%%\r", \
+                                         (options.verbosely < 0) ? -(options.verbosely+1)*24 : 0, \
+                                         model_time(),                  \
+                                         100 - (model_time() - time_ending) / (time_started - time_ending) * 100); \
+                        fflush( stderr);                                \
+                }                                                       \
+        }
+
+
+#define _DO_ADVANCE_COMMON_EPILOG \
+        make_spikeloggers_sync_history();                               \
+        cpu_time_ended = clock();                                        \
+        double cpu_time_taken_seconds = ((double) (cpu_time_ended - cpu_time_started)) / CLOCKS_PER_SEC; \
+        if ( cpu_time_used_p )                                                \
+                *cpu_time_used_p = cpu_time_taken_seconds;                \
+        if ( options.verbosely > 0 || options.verbosely <= -1 ) {                        \
+                fprintf( stderr, "\r\033[K");                                \
+                fflush( stderr);                                        \
+        }                                                                \
+        vp( 0, "@%.1fmsec (+%.1f in %lu cycles in %.2f sec CPU time:"   \
+            " avg %.3g \302\265s/cyc, ratio to CPU time %.2g)\n\n",     \
+            model_time(), dist, steps, cpu_time_taken_seconds,          \
+            model_time()/_cycle * 1e3, model_time() / cpu_time_taken_seconds / 1e3);
+
+
+
+
+
+unsigned int
+__attribute__ ((hot))
+cnrun::CModel::
+_do_advance_on_pure_hosted( const double dist, double * const cpu_time_used_p)
+{
+        bool    have_listeners = (listening_units.size() > 0),
+                have_discrete_listen_dt = (options.listen_dt > 0.);
+
+        clock_t cpu_time_started = clock(),
+                cpu_time_ended,
+                cpu_time_lastchecked = cpu_time_started;
+
+        double  time_started = model_time(),
+                time_ending = time_started + dist,
+                last_made_listen = time_started;
+
+        unsigned long steps = 0;
+        do {
+                _DO_ADVANCE_COMMON_INLOOP_BEGIN
+
+                _integrator->cycle();
+
+                _DO_ADVANCE_COMMON_INLOOP_MID
+
+              // fixate
+                _integrator->fixate();
+
+                _DO_ADVANCE_COMMON_INLOOP_END
+
+              // model_time is advanced implicitly in _integrator->cycle()
+        } while ( model_time() < time_ending );
+
+        _DO_ADVANCE_COMMON_EPILOG
+
+        return steps;
+}
+
+
+
+unsigned int
+__attribute__ ((hot))
+cnrun::CModel::
+_do_advance_on_pure_standalone( const double dist, double * const cpu_time_used_p)
+{
+        bool    have_listeners = !!listening_units.size(),
+                have_discrete_listen_dt = (options.listen_dt > 0.);
+
+        clock_t cpu_time_started = clock(),
+                cpu_time_ended,
+                cpu_time_lastchecked = cpu_time_started;
+
+        double  time_started = model_time(),
+                time_ending = time_started + dist,
+                last_made_listen = time_started;
+
+        unsigned long steps = 0;
+        do {
+                _DO_ADVANCE_COMMON_INLOOP_BEGIN
+
+              // service simple units w/out any vars on the integration vector V
+                for ( auto& N : standalone_neurons )
+                        if ( !N->is_conscious() )
+                                N -> preadvance();
+                for ( auto& Y : standalone_synapses )
+                        Y -> preadvance();
+
+              // even in the case of n_hosted_{neurons,units} == 0, we would need _integrator->cycle() to advance V[0],
+              // which is our model_time(); which is kind of expensive, so here's a shortcut
+                V[0] += _discrete_dt;
+                // _discrete_time += _discrete_dt;  // not necessary
+
+                _DO_ADVANCE_COMMON_INLOOP_MID
+
+              // fixate
+                for ( auto& N : standalone_neurons )
+                        if ( !N->is_conscious() )
+                                N -> fixate();
+                for ( auto& Y : standalone_synapses )
+                        Y -> fixate();
+
+                _DO_ADVANCE_COMMON_INLOOP_END
+
+        } while ( model_time() < time_ending );
+
+        _DO_ADVANCE_COMMON_EPILOG
+
+        return steps;
+}
+
+
+
+
+
+
+
+unsigned int
+__attribute__ ((hot))
+cnrun::CModel::
+_do_advance_on_pure_ddtbound( const double dist, double * const cpu_time_used_p)
+{
+        bool    have_listeners = (listening_units.size() > 0),
+                have_discrete_listen_dt = (options.listen_dt > 0.);
+
+        clock_t cpu_time_started = clock(),
+                cpu_time_ended,
+                cpu_time_lastchecked = cpu_time_started;
+
+        double  time_started = model_time(),
+                time_ending = time_started + dist,
+                last_made_listen = time_started;
+
+        unsigned long steps = 0;
+        do {
+                _DO_ADVANCE_COMMON_INLOOP_BEGIN
+
+              // lastly, service units only serviceable at discrete dt
+                for ( auto& N : ddtbound_neurons )
+                        if ( !N->is_conscious() )
+                                N -> preadvance();
+                for ( auto& Y : ddtbound_synapses )
+                        Y -> preadvance();
+
+                V[0] += _discrete_dt;
+                _discrete_time += _discrete_dt;
+
+                _DO_ADVANCE_COMMON_INLOOP_MID
+
+              // fixate
+                for ( auto& N : ddtbound_neurons )
+                        if ( !N->is_conscious() )
+                                N -> fixate();
+                for ( auto& Y : ddtbound_synapses )
+                        Y -> fixate();
+
+                _DO_ADVANCE_COMMON_INLOOP_END
+
+        } while ( model_time() < time_ending );
+
+        _DO_ADVANCE_COMMON_EPILOG
+
+        return steps;
+}
+
+
+
+
+
+unsigned int
+__attribute__ ((hot))
+cnrun::CModel::
+_do_advance_on_mixed( const double dist, double * const cpu_time_used_p)
+{
+        bool    have_hosted_units = !!n_hosted_units(),
+                have_listeners = !!listening_units.size(),
+                have_discrete_listen_dt = (options.listen_dt > 0.),
+                need_fixate_ddtbound_units;
+
+        clock_t cpu_time_started = clock(),
+                cpu_time_ended,
+                cpu_time_lastchecked = cpu_time_started;
+
+        double  time_started = model_time(),
+                time_ending = time_started + dist,
+                last_made_listen = time_started;
+
+        unsigned long steps = 0;
+        do {
+                _DO_ADVANCE_COMMON_INLOOP_BEGIN
+
+                _integrator->cycle();
+
+              // service simple units w/out any vars on the integration vector V
+                for ( auto& N : standalone_neurons )
+                        if ( !N->is_conscious() )
+                                N -> preadvance();
+                for ( auto& Y : standalone_synapses )
+                        Y -> preadvance();
+
+              // lastly, service units only serviceable at discrete dt
+                if ( this->have_ddtb_units && model_time() >= _discrete_time ) {
+                        for ( auto& N : ddtbound_neurons )
+                                if ( !N->is_conscious() )
+                                        N -> preadvance();
+                        for ( auto& Y : ddtbound_synapses )
+                                Y -> preadvance();
+
+                        _discrete_time += _discrete_dt;
+                        need_fixate_ddtbound_units = true;
+                } else
+                        need_fixate_ddtbound_units = false;
+
+                if ( !have_hosted_units )
+                        V[0] += _discrete_dt;
+
+                _DO_ADVANCE_COMMON_INLOOP_MID
+
+              // fixate
+                _integrator->fixate();
+
+                for ( auto& N : standalone_neurons )
+                        if ( !N->is_conscious() )
+                                N -> fixate();
+                for ( auto& Y : standalone_synapses )
+                        Y -> fixate();
+
+                if ( need_fixate_ddtbound_units ) {
+                        for ( auto& N : ddtbound_neurons )
+                                if ( !N->is_conscious() )
+                                        N -> fixate();
+                        for ( auto& Y : ddtbound_synapses )
+                                Y -> fixate();
+                }
+
+                _DO_ADVANCE_COMMON_INLOOP_END
+
+        } while ( model_time() < time_ending );
+
+        _DO_ADVANCE_COMMON_EPILOG
+
+        return steps;
+}
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/model-nmlio.cc b/upstream/src/libcnrun/model-nmlio.cc
new file mode 100644
index 0000000..2286c7b
--- /dev/null
+++ b/upstream/src/libcnrun/model-nmlio.cc
@@ -0,0 +1,495 @@
+/*
+ *       File name:  libcn/model-nmlio.cc
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ * Initial version:  2008-09-02
+ *
+ *         Purpose:  NeuroML import/export.
+ *
+ *         License:  GPL-2+
+ */
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <string>
+#include <iostream>
+#include <regex.h>
+
+#include "forward-decls.hh"
+#include "model.hh"
+
+
+using namespace std;
+
+#ifdef LIBXML_READER_ENABLED
+
+
+int
+cnrun::CModel::
+import_NetworkML( const string& fname, TNMLImportOption import_option)
+{
+        // LIBXML_TEST_VERSION;
+
+        xmlDoc *doc = xmlReadFile( fname.c_str(), nullptr, 0);
+        if ( !doc )
+                return TNMLIOResult::nofile;
+
+        int retval = import_NetworkML( doc, fname, import_option);
+
+        xmlFreeDoc( doc);
+
+        return retval;
+}
+
+
+
+
+
+inline namespace {
+
+xmlNode*
+find_named_root_child_elem( xmlNode *node,     // node to start search from
+                            const char *elem)  // name of the element searched for
+{
+        xmlNode *n;
+        for ( n = node->children; n; n = n->next ) {
+                if ( n->type == XML_ELEMENT_NODE ) {
+                        if ( xmlStrEqual( n->name, BAD_CAST elem) )
+                                return n;
+// the <populations> and <projections> nodes are expected to appear as
+// direct children of the root node; so don't go search deeper than that
+
+//                        if ( n->children ) { // go search deeper
+//                                ni = find_named_elem( n->children, elem);
+//                                if ( ni )
+//                                        return ni;
+//                        }
+                }
+        }
+        return nullptr;
+}
+
+}
+
+int
+cnrun::CModel::
+import_NetworkML( xmlDoc *doc, const string& fname,
+                  TNMLImportOption import_option)
+{
+        int retval = 0;
+
+        // we pass up on validation (for which we would need to keep a
+        // .dtd or Schema at hand), and proceed to extracting elements
+
+        xmlNode *root_node = xmlDocGetRootElement( doc),
+                *n;
+
+      // read meta:notes and make out a name for the model
+        if ( !root_node ) {
+                vp( 0, stderr, "import_NetworkML(\"%s\"): No root element\n", fname.c_str());
+                retval = TNMLIOResult::noelem;
+                goto out;
+        }
+
+      // give it a name: assume it's generated by neuroConstruct for now
+        if ( import_option == TNMLImportOption::reset ) {
+                reset();
+                if ( !(n = find_named_root_child_elem( root_node, "notes")) ) {
+                        vp( 1, stderr, "<notes> element not found; model will be unnamed\n");
+                        // this is not critical, so just keep the user
+                        // informed and proceed
+                } else
+                        if ( n->type == XML_ELEMENT_NODE ) {  // only concern ourselves with nodes of this type
+                                xmlChar *notes_s = xmlNodeGetContent( n);
+                                // look for a substring specific to neuroConstruct, which is obviously speculative
+                                regex_t RE;
+                                regcomp( &RE, ".*project: (\\w*).*", REG_EXTENDED);
+                                regmatch_t M[1+1];
+                                name = (0 == regexec( &RE, (char*)notes_s, 1+1, M, 0))
+                    ? string ((char*)notes_s + M[1].rm_so, M[1].rm_eo - M[1].rm_so)
+                    : "(unnamed)";
+                                xmlFree( notes_s);
+                        } else
+                                name = "(unnamed)";
+        }
+
+        vp( 1, "Model \"%s\": %s topology from %s\n",
+            name.c_str(),
+            (import_option == TNMLImportOption::merge) ?"Merging" :"Importing",
+            fname.c_str());
+
+        // In the following calls to _process_{populations,instances}
+        // functions, the actual order of appearance of these nodes in
+        // the xml file doesn't matter, thanks to the xml contents
+        // being already wholly read and available to us as a tree.
+
+      // process <populations>
+        if ( !(n = find_named_root_child_elem( root_node, "populations")) ) {
+                retval = TNMLIOResult::noelem;
+                goto out;
+        } // assume there is only one <populations> element: don't loop to catch more
+        if ( (retval = _process_populations( n->children)) < 0)        // note n->children, which is in fact a pointer to the first child
+                goto out;
+
+      // process <projections>
+      // don't strictly require any projections as long as there are some neurons
+        if ( (n = find_named_root_child_elem( root_node, "projections")) ) {
+                if ( (retval = _process_projections( n->children)) < 0 )
+                        goto out;
+        } else
+                vp( 2, "No projections found\n");
+
+out:
+        // we are done with topology; now put units' variables on a vector
+        finalize_additions();
+        // can call time_step only after finalize_additions
+
+        return retval;
+}
+
+
+
+
+
+int
+cnrun::CModel::
+_process_populations( xmlNode *n)
+{
+        xmlChar *group_id_s = nullptr,
+                *cell_type_s = nullptr;
+
+        int     pop_cnt = 0;
+
+        try {
+                for ( ; n; n = n->next ) {  // if is nullptr (parent had no children), we won't do a single loop
+                        if ( n->type != XML_ELEMENT_NODE || !xmlStrEqual( n->name, BAD_CAST "population") )
+                                continue;
+
+                        group_id_s = xmlGetProp( n, BAD_CAST "name");
+                        // BAD_CAST is just a cast to xmlChar*
+                        // with a catch that libxml functions
+                        // expect strings pointed to to be good UTF
+                        if ( !group_id_s ) {
+                                vp( 0, stderr, "<population> element missing a \"name\" attribute near line %d\n", n->line);
+                                return TNMLIOResult::badattr;
+                        }
+                      // probably having an unnamed popuation isn't an error so serious as to abort the
+                      // operation, but discipline is above all
+
+                        cell_type_s = xmlGetProp( n, BAD_CAST "cell_type");
+                        // now we know the type of cells included in this population; remember it to pass on to
+                        // _process_population_instances, where it is used to select an appropriate unit type
+                        // when actually adding a neuron to the model
+
+                      // but well, let's check if we have units of that species in stock
+                        if ( !unit_species_is_neuron((char*)cell_type_s) && !unit_family_is_neuron((char*)cell_type_s) ) {
+                                vp( 0, stderr, "Bad cell species or family (\"%s\") in population \"%s\"\n",
+                                    (char*)cell_type_s, group_id_s);
+                                throw TNMLIOResult::badcelltype;
+                        }
+
+                        xmlNode *nin = n->children;  // again, ->children means ->first
+                        if ( nin )
+                                for ( ; nin; nin = nin->next )  // deal with multiple <instances> nodes
+                                        if ( nin->type == XML_ELEMENT_NODE && xmlStrEqual( nin->name, BAD_CAST "instances") ) {
+                                                int subretval = _process_population_instances(
+                                                        nin->children,
+                                                        group_id_s, cell_type_s);
+                                                if ( subretval < 0 )
+                                                        throw subretval;
+
+                                                vp( 2, " %5d instance(s) of type \"%s\" in population \"%s\"\n",
+                                                    subretval, cell_type_s,  group_id_s);
+                                                ++pop_cnt;
+                                        }
+
+                        xmlFree( cell_type_s), xmlFree( group_id_s);
+                }
+
+                vp( 1, "\tTotal %d population(s)\n", pop_cnt);
+
+        } catch (int ex) {
+                xmlFree( cell_type_s), xmlFree( group_id_s);
+
+                return ex;
+        }
+
+        return pop_cnt;
+}
+
+
+
+
+
+
+int
+cnrun::CModel::
+_process_projections( xmlNode *n)
+{
+        // much the same code as in _process_populations
+
+        xmlChar *prj_name_s = nullptr,
+                *prj_src_s = nullptr,
+                *prj_tgt_s = nullptr,
+                *synapse_type_s = nullptr;
+
+        size_t pop_cnt = 0;
+
+        try {
+                for ( ; n; n = n->next ) {
+                        if ( n->type != XML_ELEMENT_NODE || !xmlStrEqual( n->name, BAD_CAST "projection") )
+                                continue;
+
+                        prj_name_s = xmlGetProp( n, BAD_CAST "name");
+                        if ( !prj_name_s ) {
+                                fprintf( stderr, "<projection> element missing a \"name\" attribute near line %u\n", n->line);
+                                return TNMLIOResult::badattr;
+                        }
+
+                        prj_src_s  = xmlGetProp( n, BAD_CAST "source");
+                        prj_tgt_s  = xmlGetProp( n, BAD_CAST "target");
+                        if ( !prj_src_s || !prj_tgt_s ) {
+                                fprintf( stderr, "Projection \"%s\" missing a \"source\" and/or \"target\" attribute near line %u\n",
+                                         prj_name_s, n->line);
+                                throw TNMLIOResult::badattr;
+                        }
+
+                        xmlNode *nin;
+                        nin = n->children;
+                        if ( !nin )
+                                fprintf( stderr, "Empty <projection> node near line %d\n", n->line);
+
+                        for ( ; nin; nin = nin->next )
+                                if ( nin->type == XML_ELEMENT_NODE && xmlStrEqual( nin->name, BAD_CAST "synapse_props") ) {
+                                        synapse_type_s = xmlGetProp( nin, BAD_CAST "synapse_type");
+                                        if ( !unit_species_is_synapse( (char*)synapse_type_s) &&
+                                             !unit_family_is_synapse( (char*)synapse_type_s) ) {
+                                                fprintf( stderr, "Bad synapse type \"%s\" near line %u\n",
+                                                         (char*)synapse_type_s, nin->line);
+                                                throw TNMLIOResult::badcelltype;
+                                        }
+                                }
+
+                        for ( nin = n->children; nin; nin = nin->next )
+                                if ( nin->type == XML_ELEMENT_NODE && xmlStrEqual( nin->name, BAD_CAST "connections") ) {
+                                        int subretval = _process_projection_connections(
+                                                nin->children,
+                                                prj_name_s, synapse_type_s,
+                                                prj_src_s, prj_tgt_s);
+                                        if ( subretval < 0 )
+                                                throw subretval;
+
+                                        vp( 2, " %5d connection(s) of type \"%s\" in projection \"%s\"\n",
+                                            subretval, synapse_type_s,  prj_name_s);
+                                        ++pop_cnt;
+                                }
+                        xmlFree( prj_name_s), xmlFree( prj_src_s), xmlFree( prj_tgt_s);
+                }
+
+                vp( 1, "\tTotal %zd projection(s)\n", pop_cnt);
+
+        } catch (int ex) {
+                xmlFree( prj_name_s), xmlFree( prj_src_s), xmlFree( prj_tgt_s);
+                return ex;
+        }
+
+        return (int)pop_cnt;
+}
+
+
+
+
+
+
+
+int
+cnrun::CModel::
+_process_population_instances(
+        xmlNode *n,
+        const xmlChar *group_prefix,
+        const xmlChar *type_s)
+{
+        int     retval = 0;  // also keeps a count of added neurons
+
+        double  x, y, z;
+        char    cell_id[C_BaseUnit::max_label_size];
+
+        xmlNode *nin;
+
+        xmlChar *id_s = nullptr;
+        try {
+                for ( ; n; n = n->next ) {
+                        if ( n->type != XML_ELEMENT_NODE || !xmlStrEqual( n->name, BAD_CAST "instance") )
+                                continue;
+
+                        xmlChar *id_s = xmlGetProp( n, BAD_CAST "id");
+                        if ( !id_s ) {
+                              // could be less strict here and allow empty ids, which will then be composed
+                              // from group_prefix + id (say, "LN0", "LN1" and so on); but then, as
+                              // individual <projection>s would have to reference both endpoints by explicit
+                              // ids, it is obviously prone to error to have <instance> ids depend solely on
+                              // their order of appearance.
+                              // So we bark at empty ids.
+                                fprintf( stderr, "<instance> element without an \"id\" attribute near line %u\n", n->line);
+                                return TNMLIOResult::badattr;
+                        }
+
+                        size_t total_len = xmlStrlen( group_prefix) + xmlStrlen( id_s);
+                        if ( total_len >= C_BaseUnit::max_label_size ) {
+                                fprintf( stderr, "Combined label for an <instance> (\"%s%s\") exceeding %zu characters near line %d\n",
+                                         group_prefix, id_s, C_BaseUnit::max_label_size, n->line);
+                                throw TNMLIOResult::biglabel;
+                        }
+                        _longest_label = max(
+                                _longest_label,
+                                (unsigned short)snprintf(
+                                        cell_id, C_BaseUnit::max_label_size-1, "%s.%s",
+                                        group_prefix, id_s));  // here, a new instance is given a name
+                        xmlFree( id_s);
+
+                        if ( !(nin = n->children) )
+                                return retval;
+
+                        for ( ; nin; nin = nin->next ) {
+                                if ( !(nin->type == XML_ELEMENT_NODE &&
+                                       xmlStrEqual( nin->name, BAD_CAST "location")) )
+                                        continue;
+
+                                xmlChar *x_s = xmlGetProp( nin, BAD_CAST "x"),
+                                        *y_s = xmlGetProp( nin, BAD_CAST "y"),
+                                        *z_s = xmlGetProp( nin, BAD_CAST "z");
+                              // here we do actually insert neurons into the model
+                                if ( !(x_s && y_s && z_s) )
+                                        vp( 1, stderr, "<location> element missing full set of coordinates near line %d\n", nin->line);
+                                        // not an error
+                                x = strtod( (char*)x_s, nullptr), y = strtod( (char*)y_s, nullptr), z = strtod( (char*)z_s, nullptr);
+                                xmlFree( x_s), xmlFree( y_s), xmlFree( z_s);
+
+                                C_BaseNeuron *neu = add_neuron_species(
+                                        (char*)type_s, cell_id,
+                                        TIncludeOption::is_notlast);
+
+                                if ( !neu || neu->_status & CN_UERROR ) {
+                                        if ( neu )
+                                                delete neu;
+                                        fprintf( stderr, "Failed to add a neuron \"%s\" near line %u\n", cell_id, n->line);
+                                        return TNMLIOResult::structerror;
+                                } else {
+                                        neu->_serial_id = _global_unit_id_reservoir++;
+                                        neu->pos = make_tuple( x, y, z);
+                                        ++retval;
+                                }
+                        }
+                }
+        } catch (int ex) {
+                xmlFree( id_s);
+                return ex;
+        }
+
+        return retval;
+}
+
+
+
+
+int
+cnrun::CModel::
+_process_projection_connections(
+        xmlNode *n,
+        const xmlChar *synapse_name,
+        const xmlChar *type_s,
+        const xmlChar *src_grp_prefix,
+        const xmlChar *tgt_grp_prefix)
+{
+        // similar to _process_population_instances, except that we read some more attributes (source and
+        // target units)
+
+        int     retval = 0;  // is also a counter of synapses
+
+        char    //synapse_id [C_BaseUnit::max_label_size],
+                src_s[C_BaseUnit::max_label_size],
+                tgt_s[C_BaseUnit::max_label_size];
+        double  weight;
+
+        C_BaseSynapse *y;
+
+        xmlChar *src_cell_id_s = nullptr,
+                *tgt_cell_id_s = nullptr,
+                *weight_s      = nullptr;
+        try {
+                for ( ; n; n = n->next ) {
+                        if ( n->type != XML_ELEMENT_NODE || !xmlStrEqual( n->name, BAD_CAST "connection") )
+                                continue;
+
+                        src_cell_id_s = xmlGetProp( n, BAD_CAST "pre_cell_id"),
+                        tgt_cell_id_s = xmlGetProp( n, BAD_CAST "post_cell_id"),
+                        weight_s      = xmlGetProp( n, BAD_CAST "weight");
+                        if ( /*!synapse_id_s || */ !src_cell_id_s || !tgt_cell_id_s ) {
+                                fprintf( stderr, "A <connection> element without \"pre_cell_id\" and/or \"post_cell_id\" attribute near line %u\n", n->line);
+                                throw TNMLIOResult::badattr;
+                        }
+
+                        snprintf( src_s, C_BaseUnit::max_label_size-1, "%s.%s", src_grp_prefix, src_cell_id_s);
+                        snprintf( tgt_s, C_BaseUnit::max_label_size-1, "%s.%s", tgt_grp_prefix, tgt_cell_id_s);
+
+                        if ( !weight_s ) {
+                                vp( 3, stderr, "Assuming 0 for a synapse of \"%s.%s\" to \"%s%s\" without a \"weight\" attribute near line %u\n",
+                                    src_grp_prefix, src_cell_id_s, tgt_grp_prefix, tgt_cell_id_s, n->line);
+                                weight = 0.;
+                        }
+                        /* xmlFree( synapse_id_s), */ xmlFree( src_cell_id_s), xmlFree( tgt_cell_id_s),
+                                xmlFree( weight_s);
+
+                        y = add_synapse_species(
+                                (char*)type_s, src_s, tgt_s,
+                                weight,
+                                TSynapseCloningOption::yes,
+                                TIncludeOption::is_notlast);
+
+                        if ( !y || y->_status & CN_UERROR ) {
+                                if ( y )
+                                        delete y;
+                                fprintf( stderr, "Failed to add an \"%s\" synapse from \"%s\" to \"%s\" near line %u\n",
+                                         (char*)type_s, src_s, tgt_s, n->line);
+                                return TNMLIOResult::structerror;
+                        } else
+                                ++retval;
+                }
+
+        } catch (int ex) {
+                /* xmlFree( synapse_id_s), */ xmlFree( src_cell_id_s), xmlFree( tgt_cell_id_s),
+                        xmlFree( weight_s);
+                return ex;
+        }
+
+        return retval;
+}
+
+
+
+int
+cnrun::CModel::
+export_NetworkML( const string& fname)
+{
+        int retval = 0;
+
+        LIBXML_TEST_VERSION;
+
+        fprintf( stderr, "export_NetworkML() not implemented yet\n");
+
+        return retval;
+}
+
+
+#else
+# error Need an XMLREADER-enabled libxml2 (>2.6)
+#endif // LIBXML_READER_ENABLED
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/model-struct.cc b/upstream/src/libcnrun/model-struct.cc
new file mode 100644
index 0000000..e747583
--- /dev/null
+++ b/upstream/src/libcnrun/model-struct.cc
@@ -0,0 +1,1042 @@
+/*
+ *       File name:  libcn/mmodel-struct.cc
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny
+ * Initial version:  2008-09-02
+ *
+ *         Purpose:  CModel household.
+ *
+ *         License:  GPL-2+
+ */
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <sys/time.h>
+#include <csignal>
+#include <iostream>
+#include <set>
+#include <algorithm>
+#include <functional>
+
+#include "libstilton/string.hh"
+#include "model.hh"
+
+
+using namespace std;
+using namespace cnrun::stilton::str;
+
+
+cnrun::CModel::
+CModel (const string& inname,
+        CIntegrate_base *inintegrator,
+        const SModelOptions& inoptions)
+      : name (inname),
+        options (inoptions),
+        _global_unit_id_reservoir (0l),
+        V (1),
+        W (1),
+        _var_cnt (1),                        // reserve [0] for model_time
+        _cycle (0),
+        _discrete_time (0.),  _discrete_dt (NAN),
+        _dt_logger (nullptr),
+        _spike_logger (nullptr),        // open these streams at first write instead in prepare_advance()
+        is_ready (false),
+        is_diskless (false),
+        have_ddtb_units (false),
+        _longest_label (1)
+{
+        V[0] = 0.;
+
+        (_integrator = inintegrator) -> model = this;
+
+        {
+                const gsl_rng_type * T;
+                gsl_rng_env_setup();
+                T = gsl_rng_default;
+                if ( gsl_rng_default_seed == 0 ) {
+                        struct timeval tp = { 0L, 0L };
+                        gettimeofday( &tp, nullptr);
+                        gsl_rng_default_seed = tp.tv_usec;
+                }
+                _rng = gsl_rng_alloc( T);
+        }
+
+      // don't abort interpreter with ^C
+        signal( SIGINT, SIG_IGN);
+}
+
+
+cnrun::CModel::
+~CModel()
+{
+        vp( 4, "Deleting all units...\n");
+
+        while (units.size())
+                if ( units.back() -> is_owned() )
+                        delete units.back();
+                else
+                        units.pop_back();
+
+        if ( _integrator->is_owned )
+                delete _integrator;
+
+        delete _dt_logger;
+        delete _spike_logger;
+
+        while ( _sources.size() ) {
+                delete _sources.back();
+                _sources.pop_back();
+        }
+
+        gsl_rng_free( _rng);
+}
+
+
+void
+cnrun::CModel::
+reset( TResetOption option)
+{
+        _cycle = 0;
+        V[0] = 0.;
+
+        _integrator->dt = _integrator->_dt_min;
+
+        reset_state_all_units();
+        if ( option == TResetOption::with_params )
+                for_each ( units.begin(), units.end(),
+                           [] (C_BaseUnit* u) { u->reset_params(); });
+
+        regular_periods.clear();
+        regular_periods_last_checked.clear();
+        // this will cause scheduler_update_periods_* to be recomputed by prepare_advance()
+
+        is_ready = false;
+
+        if ( options.log_dt ) {
+                delete _dt_logger;
+                _dt_logger = new ofstream( (name + ".dtlog").data());
+        }
+        if ( options.log_spikers ) {
+                delete _spike_logger;
+                _spike_logger = new ofstream( (name + ".spikes").data());
+        }
+}
+
+
+
+cnrun::C_BaseUnit*
+cnrun::CModel::
+unit_by_label( const string& label) const
+{
+        for ( const auto& U : units )
+                if ( label == U->_label )
+                        return U;
+        return nullptr;
+}
+
+
+cnrun::C_BaseNeuron*
+cnrun::CModel::
+neuron_by_label( const string& label) const
+{
+        for ( const auto& U : units )
+                if ( U->is_neuron() && label == U->label() )
+                        return static_cast<C_BaseNeuron*>(U);
+        return nullptr;
+}
+
+
+cnrun::C_BaseSynapse*
+cnrun::CModel::
+synapse_by_label( const string& label) const
+{
+        for ( const auto& U : units )
+                if ( U->is_synapse() && label == U->label() )
+                        return static_cast<C_BaseSynapse*>(U);
+        return nullptr;
+}
+
+
+
+
+
+// ----- registering units with core lists
+void
+cnrun::CModel::
+_include_base_unit( C_BaseUnit* u)
+{
+        if ( any_of( units.begin(), units.end(),
+                     bind(equal_to<C_BaseUnit*>(), placeholders::_1, u)) )
+                vp( 1, stderr, "Unit %s found already included in model %s\n",
+                    u->_label, name.c_str());
+        else
+                units.push_back( u);
+
+        vp( 5, "  registered base unit %s\n", u->_label);
+
+        if ( u->has_sources() )
+                register_unit_with_sources( u);
+
+        if ( u->is_listening() ) {
+                if ( count( listening_units.begin(), listening_units.end(), u) )
+                        vp( 1, stderr, "Unit \"%s\" already on listening list\n",
+                            u->_label);
+                else
+                        listening_units.push_back( u);
+        }
+
+        u->M = this;
+        u->_serial_id = _global_unit_id_reservoir++;
+}
+
+
+
+
+int
+cnrun::CModel::
+include_unit( C_HostedNeuron *u, const TIncludeOption option)
+{
+        _include_base_unit( u);
+
+        u->idx = _var_cnt;
+        _var_cnt += u->v_no();
+
+        hosted_neurons.push_back( u);
+
+        // if ( u->_spikelogger_agent  &&  !(u->_spikelogger_agent->_status & CN_KL_IDLE) )
+        //         spikelogging_neurons.push_back( u);
+
+        if ( u->is_conscious() )
+                conscious_neurons.push_back( u);
+
+        if ( option == TIncludeOption::is_last )
+                finalize_additions();
+
+        return 0;
+}
+
+int
+cnrun::CModel::
+include_unit( C_HostedSynapse *u, const TIncludeOption option)
+{
+        _include_base_unit( u);
+
+        u->idx = _var_cnt;
+        _var_cnt += u->v_no();
+
+        hosted_synapses.push_back( u);
+
+        if ( u->traits() & UT_MULTIPLEXING )
+                multiplexing_synapses.push_back( u);
+
+        if ( option == TIncludeOption::is_last )
+                finalize_additions();
+
+        return 0;
+}
+
+
+
+int
+cnrun::CModel::
+include_unit( C_StandaloneNeuron *u)
+{
+        _include_base_unit( u);
+
+        // if ( u->_spikelogger_agent  &&  !(u->_spikelogger_agent->_status & CN_KL_IDLE) )
+        //         spikelogging_neurons.push_back( u);
+
+        if ( u->is_conscious() )
+                conscious_neurons.push_back( u);
+
+        if ( u->is_ddtbound() )
+                ddtbound_neurons.push_back( u);
+        else
+                standalone_neurons.push_back( u);
+
+        return 0;
+}
+
+
+int
+cnrun::CModel::
+include_unit( C_StandaloneSynapse *u)
+{
+/*
+        if ( _check_new_synapse( u) ) {
+//                u->enable( false);
+                u->M = nullptr;
+                return -1;
+        }
+*/
+        _include_base_unit( u);
+
+        if ( u->is_ddtbound() )
+                ddtbound_synapses.push_back( u);
+        else
+                standalone_synapses.push_back( u);
+
+        if ( u->traits() & UT_MULTIPLEXING )
+                multiplexing_synapses.push_back( u);
+
+        return 0;
+}
+
+
+
+// preserve the unit if !do_delete, so it can be re-included again
+cnrun::C_BaseUnit*
+cnrun::CModel::
+exclude_unit( C_BaseUnit *u, const TExcludeOption option)
+{
+        vp( 5, stderr, "-excluding unit \"%s\"", u->_label);
+
+        if ( u->has_sources() )
+                unregister_unit_with_sources( u);
+
+        if ( u->is_listening() )
+                u->stop_listening();  // also calls unregister_listener
+
+        if ( u->is_synapse() && u->traits() & UT_MULTIPLEXING )
+                multiplexing_synapses.erase( find( multiplexing_synapses.begin(), multiplexing_synapses.end(), u));
+
+        if ( u->is_conscious() )
+                conscious_neurons.erase(
+                        find( conscious_neurons.begin(), conscious_neurons.end(),
+                              u));
+
+        if ( u->is_hostable() ) {
+                size_t  our_idx;
+                if ( u->is_neuron() ) {
+                        hosted_neurons.erase( find( hosted_neurons.begin(), hosted_neurons.end(), u));
+                        our_idx = ((C_HostedNeuron*)u) -> idx;
+                } else {
+                        hosted_synapses.erase( find( hosted_synapses.begin(), hosted_synapses.end(), u));
+                        our_idx = ((C_HostedSynapse*)u) -> idx;
+                }
+
+              // shrink V
+                vp( 5, stderr, " (shrink V by %d)", u->v_no());
+                for ( auto& N : hosted_neurons )
+                        if ( N->idx > our_idx )
+                                N->idx -= u->v_no();
+                for ( auto& Y : hosted_synapses )
+                        if ( Y->idx > our_idx )
+                                Y->idx -= u->v_no();
+                memmove( &V[our_idx], &V[our_idx+u->v_no()],
+                         (_var_cnt - our_idx - u->v_no()) * sizeof(double));
+                V.resize( _var_cnt -= u->v_no());
+        }
+
+        if ( u->is_ddtbound() ) {
+                if ( u->is_neuron() )
+                        ddtbound_neurons.erase( find( ddtbound_neurons.begin(), ddtbound_neurons.end(), u));
+                else
+                        ddtbound_synapses.erase( find( ddtbound_synapses.begin(), ddtbound_synapses.end(), u));
+        }
+
+        if ( !u->is_hostable() ) {
+                if ( u->is_neuron() )
+                        standalone_neurons.remove(
+                                static_cast<C_StandaloneNeuron*>(u));
+                else
+                        standalone_synapses.remove(
+                                static_cast<C_StandaloneSynapse*>(u));
+        }
+
+        units.remove( u);
+
+        if ( option == TExcludeOption::with_delete ) {
+                delete u;
+                u = nullptr;
+        } else
+                u->M = nullptr;
+
+        vp( 5, stderr, ".\n");
+        return u;
+}
+
+
+
+
+
+
+
+// listeners & spikeloggers
+
+void
+cnrun::CModel::
+register_listener( C_BaseUnit *u)
+{
+        if ( not count( listening_units.begin(), listening_units.end(), u) )
+                listening_units.push_back( u);
+}
+
+void
+cnrun::CModel::
+unregister_listener( C_BaseUnit *u)
+{
+        listening_units.remove( u);
+}
+
+
+
+
+
+
+
+void
+cnrun::CModel::
+register_spikelogger( C_BaseNeuron *n)
+{
+        spikelogging_neurons.push_back( n);
+        spikelogging_neurons.sort();
+        spikelogging_neurons.unique();
+}
+
+void
+cnrun::CModel::
+unregister_spikelogger( C_BaseNeuron *n)
+{
+        spikelogging_neurons.remove(
+                static_cast<decltype(spikelogging_neurons)::value_type>(n));
+}
+
+
+
+
+
+// units with sources
+
+void
+cnrun::CModel::
+register_unit_with_sources( C_BaseUnit *u)
+{
+        for ( auto& I : u->_sources )
+                if ( I.source->is_periodic() )
+                        units_with_periodic_sources.push_back( u);
+                else
+                        units_with_continuous_sources.push_back( u);
+        units_with_continuous_sources.unique();
+        units_with_periodic_sources.unique();
+}
+
+void
+cnrun::CModel::
+unregister_unit_with_sources( C_BaseUnit *u)
+{
+        units_with_continuous_sources.remove(
+                static_cast<decltype(units_with_continuous_sources)::value_type>(u));
+        units_with_periodic_sources.remove(
+                static_cast<decltype(units_with_periodic_sources)::value_type>(u));
+}
+
+
+
+
+
+
+
+
+cnrun::C_BaseNeuron*
+cnrun::CModel::
+add_neuron_species( const string& type_s, const string& label,
+                    const TIncludeOption include_option,
+                    const double x, const double y, const double z)
+{
+        TUnitType t = unit_species_by_string( type_s);
+        if ( unlikely (t == NT_VOID || !unit_species_is_neuron(type_s)) ) {
+                fprintf( stderr, "Unrecognised neuron species: \"%s\"\n", type_s.c_str());
+                return nullptr;
+        } else
+                return add_neuron_species( t, label, include_option, x, y, z);
+}
+
+cnrun::C_BaseNeuron*
+cnrun::CModel::
+add_neuron_species( TUnitType type, const string& label,
+                    const TIncludeOption include_option,
+                    double x, double y, double z)
+{
+        C_BaseNeuron *n;
+        switch ( type ) {
+        case NT_HH_D:
+                n = new CNeuronHH_d( label, x, y, z, this, CN_UOWNED, include_option);
+            break;
+        case NT_HH_R:
+                n = new CNeuronHH_r( label, x, y, z, this, CN_UOWNED);
+            break;
+
+        case NT_HH2_D:
+                n = new CNeuronHH2_d( label, x, y, z, this, CN_UOWNED, include_option);
+            break;
+        // case NT_HH2_R:
+        //         n = new CNeuronHH2_r( label, x, y, z, this, CN_UOWNED, include_option);
+        //     break;
+//#ifdef CN_WANT_MORE_NEURONS
+        case NT_EC_D:
+                n = new CNeuronEC_d( label, x, y, z, this, CN_UOWNED, include_option);
+            break;
+        case NT_ECA_D:
+                n = new CNeuronECA_d( label, x, y, z, this, CN_UOWNED, include_option);
+            break;
+/*
+        case NT_LV:
+                n = new COscillatorLV( label, x, y, z, this, CN_UOWNED, include_option);
+            break;
+ */
+        case NT_COLPITTS:
+                n = new COscillatorColpitts( label, x, y, z, this, CN_UOWNED, include_option);
+            break;
+        case NT_VDPOL:
+                n = new COscillatorVdPol( label, x, y, z, this, CN_UOWNED, include_option);
+            break;
+//#endif
+        case NT_DOTPOISSON:
+                n = new COscillatorDotPoisson( label, x, y, z, this, CN_UOWNED);
+            break;
+        case NT_POISSON:
+                n = new COscillatorPoisson( label, x, y, z, this, CN_UOWNED);
+            break;
+
+        case NT_DOTPULSE:
+                n = new CNeuronDotPulse( label, x, y, z, this, CN_UOWNED);
+            break;
+
+        case NT_MAP:
+                n = new CNeuronMap( label, x, y, z, this, CN_UOWNED);
+            break;
+
+        default:
+                return nullptr;
+        }
+        if ( n && n->_status & CN_UERROR ) {
+                delete n;
+                return nullptr;
+        }
+        return n;
+}
+
+
+
+
+
+
+
+
+cnrun::C_BaseSynapse*
+cnrun::CModel::
+add_synapse_species( const string& type_s,
+                     const string& src_l, const string& tgt_l,
+                     const double g,
+                     const TSynapseCloningOption cloning_option,
+                     const TIncludeOption include_option)
+{
+        TUnitType ytype = unit_species_by_string( type_s);
+        bool    given_species = true;
+        if ( ytype == NT_VOID && (given_species = false, ytype = unit_family_by_string( type_s)) == NT_VOID ) {
+                vp( 0, stderr, "Unrecognised synapse species or family: \"%s\"\n", type_s.c_str());
+                return nullptr;
+        }
+
+        C_BaseNeuron
+                *src = neuron_by_label( src_l),
+                *tgt = neuron_by_label( tgt_l);
+        if ( !src || !tgt ) {
+                vp( 0, stderr, "Phony source (\"%s\") or target (\"%s\")\n", src_l.c_str(), tgt_l.c_str());
+                return nullptr;
+        }
+
+        if ( given_species )  // let lower function do the checking
+                return add_synapse_species( ytype, src, tgt, g, cloning_option, include_option);
+
+        switch ( ytype ) {
+      // catch by first entry in __CNUDT, assign proper species per source and target traits
+        case YT_AB_DD:
+                if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED )
+                        ytype = YT_AB_RR;
+                else if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) )
+                        ytype = YT_AB_RD;
+                else if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED )
+                        if ( src->traits() & UT_DOT )
+                                ytype = YT_MXAB_DR;
+                        else
+                                ytype = YT_AB_DR;
+                else
+                        if ( src->traits() & UT_DOT )
+                                ytype = YT_MXAB_DD;
+                        else
+                                ytype = YT_AB_DD;
+            break;
+
+        case YT_ABMINUS_DD:
+                if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED )
+                        ytype = YT_ABMINUS_RR;
+                else if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) )
+                        ytype = YT_ABMINUS_RD;
+                else if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED )
+                        if ( src->traits() & UT_DOT )
+                                ytype = YT_MXABMINUS_DR;
+                        else
+                                ytype = YT_ABMINUS_DR;
+                else
+                        if ( src->traits() & UT_DOT )
+                                ytype = YT_MXABMINUS_DD;
+                        else
+                                ytype = YT_ABMINUS_DD;
+            break;
+
+        case YT_RALL_DD:
+                if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED )
+                        ytype = YT_RALL_RR;
+                else if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) )
+                        ytype = YT_RALL_RD;
+                else if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED )
+                        if ( src->traits() & UT_DOT )
+                                ytype = YT_MXRALL_DR;
+                        else
+                                ytype = YT_RALL_DR;
+                else
+                        if ( src->traits() & UT_DOT )
+                                ytype = YT_MXRALL_DD;
+                        else
+                                ytype = YT_RALL_DD;
+            break;
+
+        case YT_MAP:
+                if ( src->traits() & UT_DDTSET)
+                        if ( src->traits() & UT_DOT )
+                                ytype = YT_MXMAP;
+                        else
+                                ytype = YT_MAP;
+                else {
+                        vp( 0, stderr, "Map synapses can only connect Map neurons\n");
+                        return nullptr;
+                }
+            break;
+        default:
+                vp( 0, stderr, "Bad synapse type: %s\n", type_s.c_str());
+                return nullptr;
+        }
+
+        return add_synapse_species( ytype, src, tgt, g, cloning_option, include_option);
+}
+
+
+
+
+cnrun::C_BaseSynapse*
+cnrun::CModel::
+add_synapse_species( TUnitType ytype,
+                     C_BaseNeuron *src, C_BaseNeuron *tgt,
+                     double g,
+                     TSynapseCloningOption cloning_option, TIncludeOption include_option)
+{
+        vp( 5, "add_synapse_species( \"%s\", \"%s\", \"%s\", %g, %d, %d)\n",
+            __CNUDT[ytype].species, src->_label, tgt->_label, g, cloning_option, include_option);
+
+        C_BaseSynapse *y = nullptr;
+
+      // consider cloning
+        if ( cloning_option == TSynapseCloningOption::yes && src->_axonal_harbour.size() )
+                for ( auto& L : src->_axonal_harbour )
+                        if ( L->_type == ytype &&
+                             L->is_not_altered() )
+                                return L->clone_to_target( tgt, g);
+
+        switch ( ytype ) {
+      // the __CNUDT entry at first TUnitType element whose
+      // 'name' matches the type id supplied, captures all cases for a given synapse family
+        case YT_AB_RR:
+                if (  src->traits() & UT_RATEBASED &&  tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
+                        y = new CSynapseAB_rr( src, tgt, g, this, CN_UOWNED, include_option);
+            break;
+        case YT_AB_RD:
+                if (  src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
+                        // y = new CSynapseAB_rd( synapse_id, src, tgt, this, CN_UOWNED, false);
+                        throw "AB_rd not implemented";
+            break;
+        case YT_AB_DR:
+                if ( !(src->traits() & UT_RATEBASED) &&  tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
+                        // y = new CSynapseAB_rr( synapse_id, src, tgt, this, CN_UOWNED, false);
+                        throw "AB_dr not implemented";
+            break;
+        case YT_AB_DD:
+                if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
+                        y = new CSynapseAB_dd( src, tgt, g, this, CN_UOWNED, include_option);
+            break;
+        case YT_MXAB_DR:
+                if ( !(src->traits() & UT_RATEBASED) &&  tgt->traits() & UT_RATEBASED &&  src->traits() & UT_DOT )
+                        y = new CSynapseMxAB_dr( src, tgt, g, this, CN_UOWNED, include_option);
+            break;
+        case YT_MXAB_DD:
+                if (  !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) &&  src->traits() & UT_DOT )
+                        y = new CSynapseMxAB_dd( src, tgt, g, this, CN_UOWNED, include_option);
+            break;
+
+
+        case YT_ABMINUS_RR:
+                if (  src->traits() & UT_RATEBASED &&  tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
+                        // y = new CSynapseABMINUS_rr( src, tgt, g, this, CN_UOWNED, include_option);
+                        throw "ABMINUS_rr not implemented";
+            break;
+        case YT_ABMINUS_RD:
+                if (  src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
+                        // y = new CSynapseABMINUS_rd( synapse_id, src, tgt, this, CN_UOWNED, false);
+                        throw "ABMINUS_rd not implemented";
+            break;
+        case YT_ABMINUS_DR:
+                if ( !(src->traits() & UT_RATEBASED) &&  tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
+                        // y = new CSynapseABMINUS_rr( synapse_id, src, tgt, this, CN_UOWNED, false);
+                        throw "ABMINUS_dr not implemented";
+            break;
+        case YT_ABMINUS_DD:
+                if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
+                        y = new CSynapseABMinus_dd( src, tgt, g, this, CN_UOWNED, include_option);
+            break;
+        case YT_MXABMINUS_DR:
+                if ( !(src->traits() & UT_RATEBASED) &&  tgt->traits() & UT_RATEBASED &&  src->traits() & UT_DOT )
+                        // y = new CSynapseMxABMinus_dr( src, tgt, g, this, CN_UOWNED, include_option);
+                        throw "MxABMinus_dr not implemented";
+            break;
+        case YT_MXABMINUS_DD:
+                if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) &&  src->traits() & UT_DOT )
+                        // y = new CSynapseMxABMinus_dd( src, tgt, g, this, CN_UOWNED, include_option);
+                        throw "MxABMinus_dd not implemented";
+            break;
+
+
+        case YT_RALL_RR:
+                if (  src->traits() & UT_RATEBASED &&  tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
+                        // y = new CSynapseRall_rr( src, tgt, g, this, CN_UOWNED, include_option);
+                        throw "Rall_rr not implemented";
+            break;
+        case YT_RALL_RD:
+                if (  src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
+                        // y = new CSynapseRall_rd( synapse_id, src, tgt, this, CN_UOWNED, false);
+                        throw "Rall_rd not implemented";
+            break;
+        case YT_RALL_DR:
+                if ( !(src->traits() & UT_RATEBASED) &&  tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
+                        // y = new CSynapseRall_rr( synapse_id, src, tgt, this, CN_UOWNED, false);
+                        throw "Rall_dr not implemented";
+            break;
+        case YT_RALL_DD:
+                if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
+                        y = new CSynapseRall_dd( src, tgt, g, this, CN_UOWNED, include_option);
+            break;
+        case YT_MXRALL_DR:
+                if ( !(src->traits() & UT_RATEBASED) &&  tgt->traits() & UT_RATEBASED &&  src->traits() & UT_DOT )
+                        // y = new CSynapseMxRall_dr( src, tgt, g, this, CN_UOWNED, include_option);
+                        throw "MxRall_dr not implemented";
+            break;
+        case YT_MXRALL_DD:
+                if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) &&  src->traits() & UT_DOT )
+                        // y = new CSynapseMxRall_dd( src, tgt, g, this, CN_UOWNED, include_option);
+                        throw "MxRall_dd not implemented";
+            break;
+
+
+        case YT_MAP:
+                if ( src->traits() & UT_DDTSET)
+                        if ( src->traits() & UT_DOT )
+                                y = new CSynapseMxMap( src, tgt, g, this, CN_UOWNED);
+                        else
+                                y = new CSynapseMap( src, tgt, g, this, CN_UOWNED);
+                else
+                        throw "Map synapses can only connect Map neurons";
+            break;
+
+        default:
+                return nullptr;
+        }
+
+        if ( !y || y->_status & CN_UERROR ) {
+                if ( y )
+                        delete y;
+                return nullptr;
+        }
+
+        vp( 5, "new synapse \"%s->%s\"\n", y->_label, tgt->label());
+        y->set_g_on_target( *tgt, g);
+
+        return y;
+}
+
+
+void
+cnrun::CModel::
+finalize_additions()
+{
+        V.resize( _var_cnt);
+        W.resize( _var_cnt);
+
+        for ( auto& U : hosted_neurons )
+                U->reset_vars();
+        for ( auto& U : hosted_synapses )
+                U->reset_vars();
+
+        // if ( options.sort_units ) {
+        //         units.sort(
+        //                 [] (C_BaseUnit *&lv, C_BaseUnit *&rv) {
+        //                         return strcmp( lv->label(), rv->label()) < 0;
+        //                 });
+        // }
+
+        _integrator->prepare();
+}
+
+
+void
+cnrun::CModel::
+cull_deaf_synapses()
+{
+      // 1. Need to traverse synapses backwards due to shifts its
+      //    vector will undergo on element deletions;
+      // 2. Omit those with a param reader, scheduler or range, but
+      //    only if it is connected to parameter "gsyn"
+        auto Yi = hosted_synapses.rbegin();
+        while ( Yi != hosted_synapses.rend() ) {
+                auto& Y = **Yi;
+                if ( Y.has_sources() )
+                        continue;
+                auto Ti = Y._targets.begin();
+                while ( Ti != Y._targets.end() ) {
+                        auto& T = **Ti;
+                        if ( Y.g_on_target( T) == 0  ) {
+                                vp( 3, stderr, " (deleting dendrite to \"%s\" of a synapse \"%s\" with gsyn == 0)\n",
+                                    T._label, Y._label);
+                                T._dendrites.erase( &Y);
+                                ++Ti;
+                                Y._targets.erase( prev(Ti));
+
+                                snprintf( Y._label, C_BaseUnit::max_label_size-1,
+                                          "%s:%zu", Y._source->_label, Y._targets.size());
+                        }
+                }
+                ++Yi;
+                if ( (*prev(Yi))->_targets.empty() )
+                        delete *prev(Yi);
+        }
+
+        // older stuff
+/*
+        for_all_synapses_reversed (Y) {
+                int gsyn_pidx = (*Y) -> param_idx_by_sym( "gsyn");
+                if ( ((*Y)->param_schedulers && device_list_concerns_parm( (*Y)->param_schedulers, gsyn_pidx)) ||
+                     ((*Y)->param_readers    && device_list_concerns_parm( (*Y)->param_readers,    gsyn_pidx)) ||
+                     ((*Y)->param_ranges     && device_list_concerns_parm( (*Y)->param_ranges,     gsyn_pidx)) ) {
+                        vp( 2, " (preserving doped synapse with zero gsyn: \"%s\")\n", (*Y)->_label);
+                        continue;
+                }
+                if ( gsyn_pidx > -1 && (*Y)->param_value( gsyn_pidx) == 0. ) {
+                        vp( 2, " (deleting synapse with zero gsyn: \"%s\")\n", (*Y)->_label);
+                        delete (*Y);
+                        ++cnt;
+                }
+        }
+        if ( cnt )
+                vp( 0, "Deleted %zd deaf synapses\n", cnt);
+*/
+}
+
+
+
+// needs to be called after a neuron is put out
+void
+cnrun::CModel::
+cull_blind_synapses()
+{
+        auto Yi = hosted_synapses.begin();
+        // units remove themselves from all lists, including the one
+        // iterated here
+        while ( Yi != hosted_synapses.end() ) {
+                auto& Y = **Yi;
+                if ( !Y._source && !Y.has_sources() ) {
+                        vp( 3, " (deleting synapse with NULL source: \"%s\")\n", Y._label);
+                        delete &Y;  // units are smart, self-erase
+                                    // themselves from the list we are
+                                    // iterating over here
+                } else
+                        ++Yi;
+        }
+        auto Zi = standalone_synapses.begin();
+        while ( Zi != standalone_synapses.end() ) {
+                auto& Y = **Zi;
+                if ( !Y._source && !Y.has_sources() ) {
+                        vp( 3, " (deleting synapse with NULL source: \"%s\")\n", Y._label);
+                        delete &Y;
+                } else
+                        ++Zi;
+        }
+}
+
+
+void
+cnrun::CModel::
+reset_state_all_units()
+{
+        for ( auto& U : units )
+                U -> reset_state();
+}
+
+
+
+
+void
+cnrun::CModel::
+coalesce_synapses()
+{
+startover:
+        for ( auto& U1i : units ) {
+                if ( not U1i->is_synapse() )
+                        continue;
+                auto& U1 = *static_cast<C_BaseSynapse*>(U1i);
+                for ( auto& U2i : units ) {
+                        auto& U2 = *static_cast<C_BaseSynapse*>(U2i);
+                        if ( &U2 == &U1 )
+                                continue;
+
+                        if ( U1._source == U2._source && U1.is_identical( U2) ) {
+                                vp( 5, "coalescing \"%s\" and \"%s\"\n", U1.label(), U2.label());
+                                for ( auto& T : U2._targets ) {
+                                        U1._targets.push_back( T);
+                                        T->_dendrites[&U1] = T->_dendrites[&U2];
+                                }
+                                snprintf( U1._label, C_BaseUnit::max_label_size-1,
+                                          "%s:%zu", U1._source->label(), U1._targets.size());
+
+                                delete &U2;
+
+                                goto startover;  // because we have messed with both iterators
+                        }
+                }
+        }
+}
+
+
+
+
+
+inline const char*
+__attribute__ ((pure))
+pl_ending( size_t cnt)
+{
+        return cnt == 1 ? "" : "s";
+}
+
+void
+cnrun::CModel::
+dump_metrics( FILE *strm) const
+{
+        fprintf( strm,
+                 "\nModel \"%s\"%s:\n"
+                 "  %5zd unit%s total (%zd Neuron%s, %zd Synapse%s):\n"
+                 "    %5zd hosted,\n"
+                 "    %5zd standalone\n"
+                 "    %5zd discrete dt-bound\n"
+                 "  %5zd Listening unit%s\n"
+                 "  %5zd Spikelogging neuron%s\n"
+                 "  %5zd Unit%s being tuned continuously\n"
+                 "  %5zd Unit%s being tuned periodically\n"
+                 "  %5zd Spontaneously firing neuron%s\n"
+                 "  %5zd Multiplexing synapse%s\n"
+                 " %6zd vars on integration vector\n\n",
+                 name.c_str(), is_diskless ? " (diskless)" : "",
+                 units.size(), pl_ending(units.size()),
+                 n_total_neurons(), pl_ending(n_total_neurons()),
+                 n_total_synapses(), pl_ending(n_total_synapses()),
+                 n_hosted_units(),
+                 n_standalone_units(),
+                 ddtbound_neurons.size() + ddtbound_synapses.size(),
+                 listening_units.size(), pl_ending(listening_units.size()),
+                 spikelogging_neurons.size(), pl_ending(spikelogging_neurons.size()),
+                 units_with_continuous_sources.size(), pl_ending(units_with_continuous_sources.size()),
+                 units_with_periodic_sources.size(), pl_ending(units_with_periodic_sources.size()),
+                 conscious_neurons.size(), pl_ending(conscious_neurons.size()),
+                 multiplexing_synapses.size(), pl_ending(multiplexing_synapses.size()),
+                 _var_cnt-1);
+        if ( have_ddtb_units )
+                fprintf( strm, "Discrete dt: %g msec\n", discrete_dt());
+}
+
+void
+cnrun::CModel::
+dump_state( FILE *strm) const
+{
+        fprintf( strm,
+                 "Model time: %g msec\n"
+                 "Integrator dt_min: %g msec, dt_max: %g msec\n"
+                 "Logging at: %g msec\n\n",
+                 model_time(),
+                 dt_min(), dt_max(),
+                 options.listen_dt);
+}
+
+
+
+void
+cnrun::CModel::
+dump_units( FILE *strm) const
+{
+        fprintf( strm, "\nUnit types in the model:\n");
+
+        set<int> found_unit_types;
+        unsigned p = 0;
+
+        fprintf( strm, "\n===== Neurons:\n");
+        for ( auto& U : units )
+                if ( U->is_neuron() && found_unit_types.count( U->type()) == 0 ) {
+                        found_unit_types.insert( U->type());
+
+                        fprintf( strm, "--- %s: %s\nParameters: ---\n",
+                                 U->species(), U->type_description());
+                        for ( p = 0; p < U->p_no(); ++p )
+                                if ( *U->param_sym(p) != '.' || options.verbosely > 5 )
+                                        fprintf( strm, " %-12s %s %s\n",
+                                                 U->param_sym(p),
+                                                 double_dot_aligned_s( U->param_value(p), 4, 6).c_str(),
+                                                 U->param_name(p));
+                        fprintf( strm, "Variables: ---\n");
+                        for ( p = 0; p < U->v_no(); ++p )
+                                if ( *U->var_sym(p) != '.' || options.verbosely > 5 )
+                                        fprintf( strm, "%-12s\t= %s %s\n",
+                                                 U->var_sym(p),
+                                                 double_dot_aligned_s( U->var_value(p), 4, 6).c_str(),
+                                                 U->var_name(p));
+                }
+        fprintf( strm, "\n===== Synapses:\n");
+        for ( auto& U : units )
+                if ( U->is_synapse() && found_unit_types.count( U->type()) == 0 ) {
+                        found_unit_types.insert( U->type());
+
+                        fprintf( strm, "--- %s: %s\nParameters: ---\n",
+                                 U->species(), U->type_description());
+                        fprintf( strm, "    parameters:\n");
+                        for ( p = 0; p < U->p_no(); ++p )
+                                if ( *U->param_sym(p) != '.' || options.verbosely > 5 )
+                                        fprintf( strm, "%-12s\t= %s %s\n",
+                                                 U->param_sym(p),
+                                                 double_dot_aligned_s( U->param_value(p), 4, 6).c_str(),
+                                                 U->param_name(p));
+                        fprintf( strm, "Variables: ---\n");
+                        for ( p = 0; p < U->v_no(); ++p )
+                                if ( *U->var_sym(p) != '.' || options.verbosely > 5 )
+                                        fprintf( strm, "%-12s\t= %s %s\n",
+                                                 U->var_sym(p),
+                                                 double_dot_aligned_s( U->var_value(p), 4, 6).c_str(),
+                                                 U->var_name(p));
+
+                }
+        fprintf( strm, "\n");
+}
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/model-tags.cc b/upstream/src/libcnrun/model-tags.cc
new file mode 100644
index 0000000..847529d
--- /dev/null
+++ b/upstream/src/libcnrun/model-tags.cc
@@ -0,0 +1,422 @@
+/*
+ *       File name:  libcn/mmodel-tags.cc
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny
+ * Initial version:  2014-09-25
+ *
+ *         Purpose:  CModel household (process_*_tags(), and other methods using regexes).
+ *
+ *         License:  GPL-2+
+ */
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <regex.h>
+
+#include "libstilton/string.hh"
+#include "model.hh"
+
+
+using namespace std;
+
+vector<cnrun::C_BaseUnit*>
+cnrun::CModel::
+list_units( const string& label) const
+{
+        vector<C_BaseUnit*> Q;
+
+        regex_t RE;
+        if ( 0 != regcomp( &RE, label.c_str(), REG_EXTENDED | REG_NOSUB)) {
+                vp( 0, stderr, "Invalid regexp in list_units: \"%s\"\n", label.c_str());
+                return move(Q);
+        }
+
+        for ( auto& U : units )
+                if ( regexec( &RE, U->label(), 0, 0, 0) != REG_NOMATCH )
+                        Q.push_back(U);
+
+        return move(Q);
+}
+
+
+// tags
+
+size_t
+cnrun::CModel::
+process_listener_tags( const list<STagGroupListener> &Listeners)
+{
+        size_t count = 0;
+        regex_t RE;
+        for ( auto& P : Listeners ) {
+                if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
+                        vp( 0, stderr, "Invalid regexp in process_listener_tags: \"%s\"\n", P.pattern.c_str());
+                        continue;
+                }
+                for ( auto& Ui : units ) {
+                        auto& U = *Ui;
+                        if ( regexec( &RE, U._label, 0, 0, 0) == 0 ) {
+                                if ( P.invert_option == STagGroup::TInvertOption::no ) {
+                                        U.start_listening( P.bits);
+                                        vp( 3, " (unit \"%s\" listening%s)\n",
+                                            U._label, P.bits & CN_ULISTENING_1VARONLY ? ", to one var only" :"");
+                                } else {
+                                        U.stop_listening();
+                                        vp( 3, " (unit \"%s\" not listening)\n", U._label);
+                                }
+                                ++count;
+                        }
+                }
+        }
+
+        return count;
+}
+
+
+size_t
+cnrun::CModel::
+process_spikelogger_tags( const list<STagGroupSpikelogger> &Spikeloggers)
+{
+        size_t count = 0;
+        regex_t RE;
+        for ( auto& P : Spikeloggers ) {
+                if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
+                        vp( 0, stderr, "Invalid regexp in process_spikelogger_tags: \"%s\"\n", P.pattern.c_str());
+                        continue;
+                }
+                for ( auto& Ni : standalone_neurons ) {
+                        auto& N = *Ni;
+                        if ( regexec( &RE, N._label, 0, 0, 0) == 0 ) {
+                                if ( P.invert_option == STagGroup::TInvertOption::no ) {
+                                        bool log_sdf = !(P.period == 0. || P.sigma == 0.);
+                                        if ( ( log_sdf && !N.enable_spikelogging_service(
+                                                       P.period, P.sigma, P.from))
+                                             or
+                                             (!log_sdf && !N.enable_spikelogging_service()) ) {
+                                                vp( 0, stderr, "Cannot have \"%s\" log spikes because it is not a conductance-based neuron (of type %s)\n",
+                                                         N._label, N.species());
+                                                continue;
+                                        }
+                                } else
+                                        N.disable_spikelogging_service();
+                                ++count;
+
+                                vp( 3, " (%sabling spike logging for standalone neuron \"%s\")\n",
+                                    (P.invert_option == STagGroup::TInvertOption::no) ? "en" : "dis", N._label);
+                        }
+                }
+                for ( auto& Ni : hosted_neurons ) {
+                        auto& N = *Ni;
+                        if ( regexec( &RE, N._label, 0, 0, 0) == 0 ) {
+                                if ( P.invert_option == STagGroup::TInvertOption::no ) {
+                                        bool log_sdf = !(P.period == 0. || P.sigma == 0.);
+                                        if ( ( log_sdf && !N.enable_spikelogging_service( P.period, P.sigma, P.from))
+                                             or
+                                             (!log_sdf && !N.enable_spikelogging_service()) ) {
+                                                vp( 1, stderr, "Cannot have \"%s\" log spikes because it is not a conductance-based neuron (of type %s)\n",
+                                                    N._label, N.species());
+                                                return -1;
+                                        }
+                                } else
+                                        N.disable_spikelogging_service();
+                                ++count;
+
+                                vp( 3, " (%sabling spike logging for hosted neuron \"%s\")\n",
+                                    (P.invert_option == STagGroup::TInvertOption::no) ? "en" : "dis", N._label);
+                        }
+                }
+        }
+
+        return count;
+}
+
+
+size_t
+cnrun::CModel::
+process_putout_tags( const list<STagGroup> &ToRemove)
+{
+        size_t count = 0;
+      // execute some
+        regex_t RE;
+        for ( auto& P : ToRemove ) {
+                if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
+                        vp( 0,  stderr, "Invalid regexp in process_putout_tags: \"%s\"\n", P.pattern.c_str());
+                        continue;
+                }
+                auto Ui = units.rbegin();
+                while ( Ui != units.rend() ) {
+                        ++Ui;
+                        auto& U = **prev(Ui);
+                        if ( regexec( &RE, U._label, 0, 0, 0) == 0 ) {
+                                vp( 2, " (put out unit \"%s\")\n", U._label);
+                                delete &U;
+                                ++count;
+                        }
+                }
+        }
+
+        cull_blind_synapses();
+
+        return count;
+}
+
+
+size_t
+cnrun::CModel::
+process_decimate_tags( const list<STagGroupDecimate> &ToDecimate)
+{
+        size_t count = 0;
+      // decimate others
+        regex_t RE;
+        for ( auto& P : ToDecimate ) {
+                if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
+                        vp( 0, stderr, "Invalid regexp in process_decimate_tags: \"%s\"\n", P.pattern.c_str());
+                        continue;
+                }
+
+              // collect group
+                vector<C_BaseUnit*> dcmgroup;
+                for ( auto& U : units )
+                        if ( regexec( &RE, U->_label, 0, 0, 0) == 0 )
+                                dcmgroup.push_back( U);
+                random_shuffle( dcmgroup.begin(), dcmgroup.end());
+
+              // execute
+                size_t  to_execute = rint( dcmgroup.size() * P.fraction), n = to_execute;
+                while ( n-- ) {
+                        delete dcmgroup[n];
+                        ++count;
+                }
+
+                vp( 3, " (decimated %4.1f%% (%zu units) of %s)\n",
+                    P.fraction*100, to_execute, P.pattern.c_str());
+
+        }
+
+        cull_blind_synapses();
+
+        return count;
+}
+
+
+
+
+
+
+size_t
+cnrun::CModel::
+process_paramset_static_tags( const list<STagGroupNeuronParmSet> &tags)
+{
+        size_t count = 0;
+        regex_t RE;
+        for ( auto& P : tags ) {
+                if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
+                        vp( 0, stderr, "Invalid regexp in process_paramset_static_tags: \"%s\"\n", P.pattern.c_str());
+                        continue;
+                }
+
+                vector<string> current_tag_assigned_labels;
+
+                for ( auto& Ui : units ) {
+                        if ( not Ui->is_neuron() )
+                                continue;
+                        auto& N = *static_cast<C_BaseNeuron*>(Ui);
+                        if ( regexec( &RE, N.label(), 0, 0, 0) == REG_NOMATCH )
+                                continue;
+                      // because a named parameter can map to a different param_id in different units, rather
+                      // do lookup every time
+
+                        int p_d = -1;
+                        C_BaseUnit::TSinkType kind = (C_BaseUnit::TSinkType)-1;
+                        if ( (p_d = N.param_idx_by_sym( P.parm)) != -1 )
+                                kind = C_BaseUnit::SINK_PARAM;
+                        else if ( (p_d = N.var_idx_by_sym( P.parm)) != -1 )
+                                kind = C_BaseUnit::SINK_VAR;
+                        if ( p_d == -1 ) {
+                                vp( 1, stderr, "%s \"%s\" (type \"%s\") has no parameter or variable named \"%s\"\n",
+                                    N.class_name(), N.label(), N.species(), P.parm.c_str());
+                                continue;
+                        }
+
+                        switch ( kind ) {
+                        case C_BaseUnit::SINK_PARAM:
+                                N.param_value(p_d) = (P.invert_option == STagGroup::TInvertOption::no)
+                                        ? P.value : __CNUDT[N.type()].stock_param_values[p_d];
+                                N.param_changed_hook();
+                            break;
+                        case C_BaseUnit::SINK_VAR:
+                                N.var_value(p_d) = P.value;
+                            break;
+                        }
+                        ++count;
+
+                        current_tag_assigned_labels.push_back( N.label());
+                }
+
+                if ( current_tag_assigned_labels.empty() ) {
+                        vp( 1,  stderr, "No neuron labelled matching \"%s\"\n", P.pattern.c_str());
+                        continue;
+                }
+
+                vp( 3, " set [%s]{%s} = %g\n",
+                    stilton::str::join(current_tag_assigned_labels, ", ").c_str(),
+                    P.parm.c_str(), P.value);
+        }
+
+        return count;
+}
+
+
+
+
+
+size_t
+cnrun::CModel::
+process_paramset_static_tags( const list<STagGroupSynapseParmSet> &tags)
+{
+        size_t count = 0;
+        auto process_tag = [&] (const STagGroupSynapseParmSet& P,
+                                regex_t& REsrc, regex_t& REtgt) -> void {
+                vector<string> current_tag_assigned_labels;
+
+                bool do_gsyn = (P.parm == "gsyn");
+
+                vp( 5, "== setting %s -> %s {%s} = %g...\n",
+                    P.pattern.c_str(), P.target.c_str(), P.parm.c_str(), P.value);
+
+                for ( auto& Uai : units ) {
+                        if ( not Uai->is_neuron() )
+                                continue;
+                        if ( regexec( &REsrc, Uai->label(), 0, 0, 0) == REG_NOMATCH )
+                                continue;
+                        auto& Ua = *static_cast<C_BaseNeuron*>(Uai);
+
+                        for ( auto& Ubi : units ) {
+                                if ( not Ubi->is_neuron() )
+                                        continue;
+                                if ( regexec( &REtgt, Ubi->label(), 0, 0, 0) == REG_NOMATCH ) /* || Ua == Ub */
+                                        continue;
+                                auto& Ub = *static_cast<C_BaseNeuron*>(Ubi);
+                                auto y = Ua.connects_via(Ub);
+                                if ( !y )
+                                        continue;
+
+                                if ( do_gsyn ) {
+                                        y->set_g_on_target( Ub, P.value);
+                                        current_tag_assigned_labels.push_back( y->label());
+                                        continue;
+                                }
+
+                                int p_d = -1;
+                                C_BaseUnit::TSinkType kind = (C_BaseUnit::TSinkType)-1;
+                                if ( (p_d = y->param_idx_by_sym( P.parm)) > -1 )
+                                        kind = C_BaseUnit::SINK_PARAM;
+                                else if ( (p_d = y->var_idx_by_sym( P.parm)) > -1 )
+                                        kind = C_BaseUnit::SINK_VAR;
+                                if ( p_d == -1 ) {
+                                        vp( 1, stderr, "%s \"%s\" (type \"%s\") has no parameter or variable named \"%s\"\n",
+                                            y->class_name(), y->label(), y->species(), P.parm.c_str());
+                                        continue;
+                                }
+
+                                switch ( kind ) {
+                                case C_BaseUnit::SINK_PARAM:
+                                        if ( y->_targets.size() > 1 )
+                                                y = y->make_clone_independent(
+                                                        &Ub);  // lest brethren synapses to other targets be clobbered
+                                        y->param_value(p_d) = (P.invert_option == STagGroup::TInvertOption::no)
+                                                ? P.value : __CNUDT[y->type()].stock_param_values[p_d];
+                                        y->param_changed_hook();
+                                    break;
+                                case C_BaseUnit::SINK_VAR:
+                                        y->var_value(p_d) = P.value;
+                                    break;
+                                }
+                                ++count;
+
+                                current_tag_assigned_labels.push_back( y->label());
+                        }
+                }
+                if ( current_tag_assigned_labels.empty() ) {
+                        vp( 1, stderr, "No synapse connecting any of \"%s\" to \"%s\"\n", P.pattern.c_str(), P.target.c_str());
+                        return;
+                }
+
+                vp( 3, " set [%s]{%s} = %g\n",
+                    stilton::str::join(current_tag_assigned_labels, ", ").c_str(),
+                    P.parm.c_str(), P.value);
+        };
+
+        for ( auto& P : tags ) {
+                regex_t REsrc, REtgt;
+                if (0 != regcomp( &REsrc, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB) ) {  // P->pattern acting as src
+                        vp( 0, stderr, "Invalid regexp in process_paramset_static_tags (src): \"%s\"\n", P.pattern.c_str());
+                        continue;
+                }
+                if (0 != regcomp( &REtgt, P.target.c_str(), REG_EXTENDED | REG_NOSUB) ) {
+                        vp( 0, stderr, "Invalid regexp in process_paramset_static_tags (tgt): \"%s\"\n", P.target.c_str());
+                        continue;
+                }
+
+                process_tag( P, REsrc, REtgt);
+        }
+
+        coalesce_synapses();
+
+        return count;
+}
+
+
+
+size_t
+cnrun::CModel::
+process_paramset_source_tags( const list<STagGroupSource> &tags)
+{
+        size_t count = 0;
+        regex_t RE;
+        for ( auto& P : tags ) {
+                if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
+                        vp( 0, stderr, "Invalid regexp in process_paramset_source_tags: \"%s\"\n", P.pattern.c_str());
+                        continue;
+                }
+
+                for ( auto& U : units ) {
+                        if ( regexec( &RE, U->label(), 0, 0, 0) == REG_NOMATCH )
+                                continue;
+
+                        int p_d = -1;
+                        C_BaseUnit::TSinkType kind = (C_BaseUnit::TSinkType)-1;
+                        if ( (p_d = U->param_idx_by_sym( P.parm)) > -1 )
+                                kind = C_BaseUnit::SINK_PARAM;
+                        else if ( (p_d = U->var_idx_by_sym( P.parm)) > -1 )
+                                kind = C_BaseUnit::SINK_VAR;
+                        if ( p_d == -1 ) {
+                                vp( 1, stderr, "%s \"%s\" (type \"%s\") has no parameter or variable named \"%s\"\n",
+                                    U->class_name(), U->label(), U->species(), P.parm.c_str());
+                                continue;
+                        }
+
+                        if ( P.invert_option == STagGroup::TInvertOption::no ) {
+                                U -> attach_source( P.source, kind, p_d);
+                                vp( 3, "Connected source \"%s\" to \"%s\"{%s}\n",
+                                    P.source->name(), U->label(), P.parm.c_str());
+                        } else {
+                                U -> detach_source( P.source, kind, p_d);
+                                vp( 3, "Disconnected source \"%s\" from \"%s\"{%s}\n",
+                                    P.source->name(), U->label(), P.parm.c_str());
+                        }
+                        ++count;
+                }
+        }
+
+        return count;
+}
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/model.hh b/upstream/src/libcnrun/model.hh
new file mode 100644
index 0000000..a0468ea
--- /dev/null
+++ b/upstream/src/libcnrun/model.hh
@@ -0,0 +1,715 @@
+/*
+ *       File name:  libcn/model.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ * Initial version:  2008-09-02
+ *
+ *         Purpose:  Main model class.
+ *
+ *         License:  GPL-2+
+ */
+
+/*--------------------------------------------------------------------------
+
+The wrapper class which takes lists of pointers to neurons and synapses
+which are networked to a neural system and assembles a common state
+vector and handles the derivatives. At the same time it serves the neurons
+and synapses their state at any given time and allows them to adjust their
+parameters.
+
+--------------------------------------------------------------------------*/
+
+
+#ifndef CNRUN_LIBCN_MODEL_H_
+#define CNRUN_LIBCN_MODEL_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <list>
+#include <vector>
+#include <string>
+
+#include "libxml/parser.h"
+#include "libxml/tree.h"
+
+#include "gsl/gsl_rng.h"
+
+#include "libstilton/misc.hh"
+#include "forward-decls.hh"
+#include "base-neuron.hh"
+#include "base-synapse.hh"
+#include "hosted-neurons.hh"
+#include "hosted-synapses.hh"
+#include "standalone-neurons.hh"
+#include "standalone-synapses.hh"
+#include "integrate-rk65.hh"
+
+
+using namespace std;
+
+namespace cnrun {
+
+struct SModelOptions {
+        bool    listen_1varonly:1,
+                listen_deferwrite:1,
+                listen_binary:1,
+                log_dt:1,
+                log_spikers:1,
+                log_spikers_use_serial_id:1,
+                log_sdf:1,
+                display_progress_percent:1,
+                display_progress_time:1;
+        int     precision;
+        double  spike_threshold,
+                spike_lapse,
+                listen_dt;
+        double  //discrete_dt,
+                integration_dt_max,
+                integration_dt_min,
+                integration_dt_cap;
+        double  sxf_start_delay,
+                sxf_period,
+                sdf_sigma;
+        int     verbosely;
+
+        SModelOptions ()
+              : listen_1varonly (true), listen_deferwrite (false), listen_binary (false),
+                log_dt (false),
+                log_spikers (false), log_spikers_use_serial_id (false),
+                log_sdf (false),
+                display_progress_percent (true),
+                display_progress_time (false),
+                precision (8),
+                spike_threshold (0.), spike_lapse (3.),
+                listen_dt(1.),
+                //discrete_dt(.5),
+                integration_dt_max (.5), integration_dt_min (1e-5), integration_dt_cap (5.),
+                sxf_start_delay (0.), sxf_period (0.), sdf_sigma (0.),
+                verbosely (1)
+                {}
+
+        SModelOptions (const SModelOptions& rv)
+                {
+                        memmove(this, &rv, sizeof(SModelOptions));
+                }
+};
+
+
+class CModel : public cnrun::stilton::C_verprintf {
+
+    public:
+      // ctor, dtor
+        CModel (const string& name, CIntegrate_base*, const SModelOptions&);
+        virtual ~CModel ();
+
+        string  name;
+
+        SModelOptions
+                options;
+
+      // Unit list and lookup
+        vector<C_BaseUnit*>
+        list_units() const
+                {  return move(vector<C_BaseUnit*> (units.begin(), units.end()));  }
+        vector<C_BaseUnit*>
+        list_units( const string& label) const;
+        C_BaseUnit    *unit_by_label( const string&) const;
+        C_BaseNeuron  *neuron_by_label( const string&) const;
+        C_BaseSynapse *synapse_by_label( const string&) const;
+        unsigned short longest_label() const
+                {  return _longest_label;  }
+
+      // Unit tally
+        size_t n_hosted_units() const
+                {  return hosted_neurons.size() + hosted_synapses.size();                }
+        size_t n_standalone_units() const
+                {  return standalone_neurons.size() + standalone_synapses.size();        }
+        size_t n_ddtbound_units() const
+                {  return ddtbound_neurons.size() + ddtbound_synapses.size();            }
+        size_t n_total_neurons() const
+                {
+                        return hosted_neurons.size()
+                                + standalone_neurons.size()
+                                + ddtbound_neurons.size();
+                }
+        size_t n_total_synapses() const
+                {
+                        return hosted_synapses.size()
+                                + standalone_synapses.size()
+                                + ddtbound_synapses.size();
+                }
+
+      // 0. Model composition
+
+      // There are two ways of adding units:
+      // - create units outside, then 'include' them in a model;
+      // - specify which unit you want, by type, and creating
+      //   them directly in the model ('add').
+
+        //enum class TIncludeOption { is_last, is_notlast, };  // defined in hosted-unit.hh
+        // if option == is_last, do allocations of hosted units' vars immediately
+        // otherwise defer until addition is done with option == is_notlast
+        // or the user calls finalize_additions
+        int include_unit( C_HostedNeuron*, TIncludeOption option = TIncludeOption::is_last);
+        int include_unit( C_HostedSynapse*, TIncludeOption option = TIncludeOption::is_last);
+        int include_unit( C_StandaloneNeuron*);
+        int include_unit( C_StandaloneSynapse*);
+
+        C_BaseNeuron*
+        add_neuron_species( TUnitType, const string& label,
+                            TIncludeOption = TIncludeOption::is_last,
+                            double x = 0., double y = 0., double z = 0.);
+        C_BaseNeuron*
+        add_neuron_species( const string& type, const string& label,
+                            TIncludeOption = TIncludeOption::is_last,
+                            double x = 0., double y = 0., double z = 0.);
+
+        enum class TSynapseCloningOption { yes, no, };
+        C_BaseSynapse*
+        add_synapse_species( const string& type, const string& src, const string& tgt,
+                             double g,
+                             TSynapseCloningOption = TSynapseCloningOption::yes,
+                             TIncludeOption = TIncludeOption::is_last);
+        void finalize_additions();
+
+        C_BaseSynapse*
+        add_synapse_species( TUnitType, C_BaseNeuron *src, C_BaseNeuron *tgt,
+                             double g,
+                             TSynapseCloningOption = TSynapseCloningOption::yes,
+                             TIncludeOption = TIncludeOption::is_last);
+
+        enum class TExcludeOption { with_delete, no_delete, };
+        C_BaseUnit*
+        exclude_unit( C_BaseUnit*, TExcludeOption option = TExcludeOption::no_delete);
+        // return nullptr if option == do_delete, the excluded unit otherwise, even if it was not owned
+        void delete_unit( C_BaseUnit* u)
+                {  exclude_unit( u, TExcludeOption::with_delete);  }
+
+      // 1. NeuroMl interface
+        enum class TNMLImportOption { merge, reset, };
+        enum TNMLIOResult {
+                ok = 0, nofile, noelem, badattr, badcelltype, biglabel, structerror,
+        };
+        int import_NetworkML( const string& fname, TNMLImportOption);
+        int import_NetworkML( xmlDoc*, const string& fname, TNMLImportOption);  // fname is merely informational here
+        int export_NetworkML( const string& fname);
+        int export_NetworkML( xmlDoc*);
+
+      // 2. Bulk operations
+        enum class TResetOption { with_params, no_params, };
+        void reset( TResetOption = TResetOption::no_params);
+        void reset_state_all_units();
+
+        void cull_deaf_synapses();  // those with gsyn == 0
+        void cull_blind_synapses(); // those with _source == nullptr
+
+      // 3. Informational
+        size_t vars() const  { return _var_cnt; }
+        void dump_metrics( FILE *strm = stdout) const;
+        void dump_state( FILE *strm = stdout) const;
+        void dump_units( FILE *strm = stdout) const;
+
+      // 4. Set unit parameters
+      // high-level functions to manipulate unit behaviour, set params, & connect sources
+        struct STagGroup {
+                string pattern;
+                enum class TInvertOption { yes, no, };
+                TInvertOption invert_option;
+                STagGroup( const string& a,
+                           TInvertOption b = STagGroup::TInvertOption::no)
+                      : pattern (a), invert_option (b)
+                        {}
+        };
+        struct STagGroupListener : STagGroup {
+                int bits;
+                STagGroupListener( const string& a,
+                                   int c = 0,
+                                   STagGroup::TInvertOption b = STagGroup::TInvertOption::no)
+                      : STagGroup (a, b), bits (c)
+                        {}
+        };
+        size_t process_listener_tags( const list<STagGroupListener>&);
+
+        struct STagGroupSpikelogger : STagGroup {
+                double period, sigma, from;
+                STagGroupSpikelogger( const string& a,
+                                      double c = 0., double d = 0., double e = 0.,  // defaults disable sdf computation
+                                      STagGroup::TInvertOption b = STagGroup::TInvertOption::no)
+                      : STagGroup (a, b), period (c), sigma (d), from (e)
+                        {}
+        };
+        size_t process_spikelogger_tags( const list<STagGroupSpikelogger>&);
+        size_t process_putout_tags( const list<STagGroup>&);
+
+        struct STagGroupDecimate : STagGroup {
+                float fraction;
+                STagGroupDecimate( const string& a, double c)
+                      : STagGroup (a, TInvertOption::no), fraction (c)
+                        {}
+        };
+        size_t process_decimate_tags( const list<STagGroupDecimate>&);
+
+        struct STagGroupNeuronParmSet : STagGroup {
+                string parm;
+                double value;
+                STagGroupNeuronParmSet( const string& a,
+                                        const string& c, double d,
+                                        STagGroup::TInvertOption b = STagGroup::TInvertOption::no)
+                      : STagGroup (a, b),
+                        parm (c), value (d)
+                        {}
+        };
+        struct STagGroupSynapseParmSet : STagGroupNeuronParmSet {
+                string target;
+                STagGroupSynapseParmSet( const string& a,
+                                         const string& z, const string& c, double d,
+                                         STagGroup::TInvertOption b = STagGroup::TInvertOption::no)
+                      : STagGroupNeuronParmSet (a, c, d, b), target (z)
+                        {}
+        };
+        size_t process_paramset_static_tags( const list<STagGroupNeuronParmSet>&);
+        size_t process_paramset_static_tags( const list<STagGroupSynapseParmSet>&);
+
+        struct STagGroupSource : STagGroup {
+                string parm;
+                C_BaseSource *source;
+                STagGroupSource( const string& a,
+                                 const string& c, C_BaseSource *d,
+                                 STagGroup::TInvertOption b = STagGroup::TInvertOption::no)  // b == false to revert to stock
+                      :  STagGroup (a, b), parm (c), source (d)
+                        {}
+        };
+        size_t process_paramset_source_tags( const list<STagGroupSource>&);
+
+        C_BaseSource*
+        source_by_id( const string& id) const
+                {
+                        for ( auto& S : _sources )
+                                if ( id == S->name() )
+                                        return S;
+                        return nullptr;
+                }
+        const list<C_BaseSource*>&
+        sources() const
+                {  return _sources;  }
+        void
+        add_source( C_BaseSource* s)
+                {
+                        _sources.push_back( s);
+                }
+        // no (straight) way to delete a source
+
+      // 5. Running
+        unsigned advance( double dist, double *cpu_time_p = nullptr) __attribute__ ((hot));
+        double model_time() const  { return V[0]; }
+
+        double dt() const      { return _integrator->dt; }
+        double dt_min() const  { return _integrator->_dt_min; }
+        double dt_max() const  { return _integrator->_dt_max; }
+        double dt_cap() const  { return _integrator->_dt_cap; }
+        void set_dt(double v)  { _integrator->dt = v; }
+        void set_dt_min(double v)  { _integrator->_dt_min = v; }
+        void set_dt_max(double v)  { _integrator->_dt_max = v; }
+        void set_dt_cap(double v)  { _integrator->_dt_cap = v; }
+
+        unsigned long cycle()         const { return _cycle;          }
+        double model_discrete_time()  const { return _discrete_time;  }
+        double discrete_dt()          const { return _discrete_dt;    }
+
+      // 9. misc
+        gsl_rng *rng() const
+                {  return _rng;  }
+        double rng_sample() const
+                {
+                        return gsl_rng_uniform_pos( _rng);
+                }
+    private:
+        friend class C_BaseUnit;
+        friend class C_BaseNeuron;
+        friend class C_BaseSynapse;
+        friend class C_HostedNeuron;
+        friend class C_HostedConductanceBasedNeuron;
+        friend class C_HostedRateBasedNeuron;
+        friend class C_HostedSynapse;
+        friend class CNeuronMap;
+        friend class CSynapseMap;
+        friend class CSynapseMxAB_dd;
+        friend class SSpikeloggerService;
+
+        friend class CIntegrate_base;
+        friend class CIntegrateRK65;
+
+      // supporting functions
+        void register_listener( C_BaseUnit*);
+        void unregister_listener( C_BaseUnit*);
+        void register_spikelogger( C_BaseNeuron*);
+        void unregister_spikelogger( C_BaseNeuron*);
+        void register_mx_synapse( C_BaseSynapse*);
+        void unregister_mx_synapse( C_BaseSynapse*);
+
+        void register_unit_with_sources( C_BaseUnit*);
+        void unregister_unit_with_sources( C_BaseUnit*);
+        void _include_base_unit( C_BaseUnit*);
+
+        int _process_populations( xmlNode*);
+        int _process_population_instances( xmlNode*, const xmlChar*, const xmlChar*);
+
+        int _process_projections( xmlNode*);
+        int _process_projection_connections( xmlNode*, const xmlChar*, const xmlChar*,
+                                             const xmlChar *src_grp_prefix,
+                                             const xmlChar *tgt_grp_prefix);
+
+        void _setup_schedulers();
+        void coalesce_synapses();
+        void prepare_advance();
+        unsigned _do_advance_on_pure_hosted( double, double*)  __attribute__ ((hot));
+        unsigned _do_advance_on_pure_standalone( double, double*) __attribute__ ((hot));
+        unsigned _do_advance_on_pure_ddtbound( double, double*) __attribute__ ((hot));
+        unsigned _do_advance_on_mixed( double, double*) __attribute__ ((hot));
+
+        void make_listening_units_tell()
+                {
+                        for ( auto& U : listening_units )
+                                U -> tell();
+                }
+        void make_conscious_neurons_possibly_fire()
+                {
+                        for ( auto& U : conscious_neurons )
+                                U->possibly_fire();
+                }
+        void make_units_with_periodic_sources_apprise_from_sources()
+                {
+                        for ( auto& U : units_with_periodic_sources )
+                                U->apprise_from_sources();
+                }
+        void make_units_with_continuous_sources_apprise_from_sources()
+                {
+                        for ( auto& U : units_with_continuous_sources )
+                                U->apprise_from_sources();
+                }
+        void make_spikeloggers_sync_history()
+                {
+                        for ( auto& N : spikelogging_neurons )
+                                N->sync_spikelogging_history();
+                }
+
+        static double
+        model_time( vector<double> &x)
+                {
+                        return x[0];
+                }
+
+      // contents
+        list<C_BaseUnit*>
+                units; // all units together
+      // these have derivative(), are churned in _integrator->cycle()
+        list<C_HostedNeuron*>
+                hosted_neurons;
+        list<C_HostedSynapse*>
+                hosted_synapses;
+      // these need preadvance() and fixate()
+        list<C_StandaloneNeuron*>
+                standalone_neurons;
+        list<C_StandaloneSynapse*>
+                standalone_synapses;
+      // ... also these, but at discrete dt only
+      // (only the standalone map units currently)
+        list<C_StandaloneNeuron*>
+                ddtbound_neurons;
+        list<C_StandaloneSynapse*>
+                ddtbound_synapses;
+
+      // neurons that can possibly_fire() (various oscillators), and
+      // have no inputs, and hence not dependent on anything else
+        list<C_BaseNeuron*>
+                conscious_neurons;
+
+      // various lists to avoid traversing all of them in units:
+      // listeners, spikeloggers & readers
+        list<C_BaseUnit*>
+                listening_units;
+      // uses a meaningful do_spikelogging_or_whatever
+        list<C_BaseNeuron*>
+                spikelogging_neurons;
+      // `Multiplexing AB' synapses are treated very specially
+        list<C_BaseSynapse*>
+                multiplexing_synapses;
+
+      // those for which apprise_from_source( model_time()) will be called
+        list<C_BaseUnit*>
+                units_with_continuous_sources;
+      // same, but not every cycle
+        list<C_BaseUnit*>
+                units_with_periodic_sources;
+        list<double>
+                regular_periods;
+        list<unsigned>
+                regular_periods_last_checked;
+
+        unsigned long
+                _global_unit_id_reservoir;
+
+      // the essential mechanical parts: ----
+      // hosted unit variables
+        vector<double> V,        // contains catenated var vectors of all constituent neurons and synapses
+                       W;        // V and W alternate in the capacity of the main vector, so avoiding many a memcpy
+        size_t  _var_cnt;        // total # of variables (to be) allocated in V an W, plus one for model_time
+
+      // integrator interface
+        CIntegrate_base
+                *_integrator;
+
+        unsigned long
+                _cycle;
+        double  _discrete_time;
+        double  _discrete_dt;
+
+        list<C_BaseSource*>
+                _sources;
+
+        ofstream
+                *_dt_logger,
+                *_spike_logger;
+
+        bool    is_ready:1,
+                is_diskless:1,
+                have_ddtb_units:1;
+
+        unsigned short
+                _longest_label;
+
+        gsl_rng *_rng;
+
+        int verbose_threshold() const
+                {
+                        return options.verbosely;
+                }
+};
+
+
+
+
+
+inline void
+CIntegrateRK65::fixate()
+{
+        swap( model->V, model->W);
+}
+
+
+// various CUnit & CNeuron methods accessing CModel members
+// that we want to have inline
+
+inline double
+C_BaseUnit::model_time() const
+{
+        return M->model_time();
+}
+
+inline void
+C_BaseUnit::pause_listening()
+{
+        if ( !M )
+                throw "pause_listening() called on NULL model";
+        M->unregister_listener( this);
+}
+
+inline void
+C_BaseUnit::resume_listening()
+{
+        if ( !M )
+                throw "resume_listening() called on NULL model";
+        M->register_listener( this);
+}
+
+
+
+template <class T>
+void
+C_BaseUnit::attach_source( T *s, TSinkType t, unsigned short idx)
+{
+        _sources.push_back( SSourceInterface<T>( s, t, idx));
+        M->register_unit_with_sources(this);
+}
+
+
+
+
+
+inline SSpikeloggerService*
+C_BaseNeuron::
+enable_spikelogging_service( int s_mask)
+{
+        if ( !_spikelogger_agent )
+                _spikelogger_agent =
+                        new SSpikeloggerService( this, s_mask);
+        M->register_spikelogger( this);
+        return _spikelogger_agent;
+}
+inline SSpikeloggerService*
+C_BaseNeuron::
+enable_spikelogging_service( double sample_period,
+                             double sigma,
+                             double from, int s_mask)
+{
+        if ( !_spikelogger_agent )
+                _spikelogger_agent =
+                        new SSpikeloggerService( this, sample_period, sigma, from, s_mask);
+        M->register_spikelogger( this);
+        return _spikelogger_agent;
+}
+
+inline void
+C_BaseNeuron::
+disable_spikelogging_service()
+{
+        if ( _spikelogger_agent && !(_spikelogger_agent->_status & CN_KL_PERSIST)) {
+                _spikelogger_agent->sync_history();
+                M->unregister_spikelogger( this);
+
+                delete _spikelogger_agent;
+                _spikelogger_agent = nullptr;
+        }
+}
+
+
+
+
+
+
+inline void
+C_HostedNeuron::
+reset_vars()
+{
+        if ( M && idx < M->_var_cnt )
+                memcpy( &M->V[idx],
+                        __CNUDT[_type].stock_var_values,
+                        __CNUDT[_type].vno * sizeof(double));
+}
+
+inline double&
+C_HostedNeuron::
+var_value( size_t v)
+{
+        return M->V[idx + v];
+}
+
+inline const double&
+C_HostedNeuron::
+get_var_value( size_t v) const
+{
+        return M->V[idx + v];
+}
+
+
+
+inline size_t
+C_HostedConductanceBasedNeuron::
+n_spikes_in_last_dt() const
+{
+        return E() >= M->options.spike_threshold;
+}
+
+inline size_t
+C_HostedRateBasedNeuron::
+n_spikes_in_last_dt() const
+{
+        return round(E() * M->dt() * M->rng_sample());
+}
+
+
+inline size_t
+C_StandaloneConductanceBasedNeuron::
+n_spikes_in_last_dt() const
+{
+        return E() >= M->options.spike_threshold;
+}
+
+inline size_t
+C_StandaloneRateBasedNeuron::
+n_spikes_in_last_dt() const
+{
+        return round(E() * M->dt() * M->rng_sample());
+}
+
+
+
+
+
+inline void
+C_HostedSynapse::
+reset_vars()
+{
+        if ( M && M->_var_cnt > idx )
+                memcpy( &M->V[idx],
+                        __CNUDT[_type].stock_var_values,
+                        __CNUDT[_type].vno * sizeof(double));
+}
+
+
+
+inline double&
+C_HostedSynapse::
+var_value( size_t v)
+{
+        return M->V[idx + v];
+}
+
+inline const double&
+C_HostedSynapse::
+get_var_value( size_t v) const
+{
+        return M->V[idx + v];
+}
+
+
+
+inline double
+C_HostedConductanceBasedNeuron::
+E() const
+{
+        return M->V[idx+0];
+}
+
+// F is computed on the fly, so far usually
+
+
+inline double
+C_HostedSynapse::
+S() const
+{
+        return M->V[idx+0];
+}
+
+
+
+
+inline void
+CSynapseMap::
+preadvance()
+{
+        V_next[0] = S() * exp( -M->discrete_dt() / P[_tau_])
+                + (_source->n_spikes_in_last_dt() ? P[_delta_] : 0);
+}
+
+
+
+inline void
+CSynapseMxMap::
+preadvance()
+{
+        V_next[0] = S() * exp( -M->discrete_dt() / P[_tau_]) + q() * P[_delta_];
+}
+
+}
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/mx-attr.hh b/upstream/src/libcnrun/mx-attr.hh
new file mode 100644
index 0000000..e38dd93
--- /dev/null
+++ b/upstream/src/libcnrun/mx-attr.hh
@@ -0,0 +1,59 @@
+/*
+ *       File name:  libcn/mx-attr.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ * Initial version:  2009-03-31
+ *
+ *         Purpose:  Interface class for mltiplexing units.
+ *
+ *         License:  GPL-2+
+ */
+
+#ifndef CNRUN_LIBCN_MXATTR_H_
+#define CNRUN_LIBCN_MXATTR_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <vector>
+
+using namespace std;
+
+namespace cnrun {
+
+class C_MultiplexingAttributes {
+
+    protected:
+        virtual void update_queue() = 0;
+        vector<double> _kq;
+
+    public:
+        double  q() const
+                {
+                        return _kq.size();
+                }
+        void reset()
+                {
+                        _kq.clear();
+                }
+};
+
+
+
+class C_DotAttributes {
+    public:
+        virtual double& spikes_fired_in_last_dt() = 0;
+};
+
+}
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/sources.cc b/upstream/src/libcnrun/sources.cc
new file mode 100644
index 0000000..1dcd47d
--- /dev/null
+++ b/upstream/src/libcnrun/sources.cc
@@ -0,0 +1,231 @@
+/*
+ *       File name:  libcn/sources.cc
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ * Initial version:  2010-02-24
+ *
+ *         Purpose:  External stimulation sources (periodic, tape, noise).
+ *
+ *         License:  GPL-2+
+ */
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <cmath>
+#include <sys/time.h>
+#include <iostream>
+#include <fstream>
+#include <limits>
+#include <gsl/gsl_randist.h>
+
+#include "libstilton/string.hh"
+#include "sources.hh"
+
+
+using namespace std;
+
+const char* const
+cnrun::C_BaseSource::
+type_s( TSourceType type)
+{
+        switch ( type ) {
+        case TSourceType::null:       return "Null";
+        case TSourceType::tape:       return "Tape";
+        case TSourceType::periodic:   return "Periodic";
+        case TSourceType::function:   return "Function";
+        case TSourceType::noise:      return "Noise";
+        }
+        return "??";
+}
+
+
+
+cnrun::CSourceTape::
+CSourceTape (const string& name_, const string& fname_, TSourceLoopingOption is_looping_)
+      : C_BaseSource (name_, TSourceType::tape), is_looping (is_looping_),
+        _fname (fname_)
+{
+        ifstream ins (stilton::str::tilda2homedir( _fname).c_str());
+        if ( !ins.good() ) {
+                throw stilton::str::sasprintf(
+                        "Tape source file (\"%s\") not good", fname_.c_str());
+        }
+        skipws(ins);
+
+        while ( !ins.eof() && ins.good() ) {
+                while ( ins.peek() == '#' || ins.peek() == '\n' )
+                        ins.ignore( numeric_limits<streamsize>::max(), '\n');
+                double timestamp, datum;
+                ins >> timestamp >> datum;
+                _values.push_back( pair<double,double>(timestamp, datum));
+        }
+
+        if ( _values.size() == 0 ) {
+                fprintf( stderr, "No usable values in \"%s\"\n", _fname.c_str());
+                return;
+        }
+
+        _I = _values.begin();
+}
+
+
+void
+cnrun::CSourceTape::
+dump( FILE *strm) const
+{
+        fprintf( strm, "%s (%s) %zu values from %s%s\n",
+                 name(), type_s(),
+                 _values.size(), _fname.c_str(),
+                 (is_looping == TSourceLoopingOption::yes) ? "" : " (looping)");
+}
+
+
+
+
+cnrun::CSourcePeriodic::
+CSourcePeriodic (const string& name_, const string& fname_, TSourceLoopingOption is_looping_,
+                 double period_)
+        : C_BaseSource (name_, TSourceType::periodic), is_looping (is_looping_),
+          _fname (fname_),
+          _period (period_)
+{
+        ifstream ins( stilton::str::tilda2homedir(fname_).c_str());
+        if ( !ins.good() ) {
+                throw stilton::str::sasprintf(
+                        "Periodic source file (\"%s\") not good", fname_.c_str());
+        }
+        skipws(ins);
+
+        while ( ins.peek() == '#' || ins.peek() == '\n' )
+                ins.ignore( numeric_limits<streamsize>::max(), '\n');
+
+        if ( !isfinite(_period) || _period <= 0. ) {
+                ins >> _period;
+                if ( !isfinite(_period) || _period <= 0. ) {
+                        throw stilton::str::sasprintf(
+                                "Period undefined for source \"%s\"", _fname.c_str());
+                }
+        }
+
+        while ( true ) {
+                while ( ins.peek() == '#' || ins.peek() == '\n' )
+                        ins.ignore( numeric_limits<streamsize>::max(), '\n');
+                double datum;
+                ins >> datum;
+                if ( ins.eof() || !ins.good() )
+                        break;
+                _values.push_back( datum);
+        }
+
+        if ( _values.size() < 2 ) {
+                throw stilton::str::sasprintf(
+                        "Need at least 2 scheduled values in \"%s\"\n", _fname.c_str());
+        }
+}
+
+
+
+void
+cnrun::CSourcePeriodic::
+dump( FILE *strm) const
+{
+        fprintf( strm, "%s (%s) %zu values at %g from %s%s\n",
+                 name(), type_s(),
+                 _values.size(), _period, _fname.c_str(),
+                 (is_looping == TSourceLoopingOption::yes) ? "" : " (looping)");
+}
+
+
+
+void
+cnrun::CSourceFunction::
+dump( FILE *strm) const
+{
+        fprintf( strm, "%s (%s) (function)\n",
+                 name(), type_s());
+}
+
+
+
+const char* const
+cnrun::CSourceNoise::
+distribution_s( TDistribution type)
+{
+        switch ( type ) {
+        case TDistribution::uniform: return "uniform";
+        case TDistribution::gaussian: return "gaussian";
+        }
+        return "??";
+}
+
+
+cnrun::CSourceNoise::TDistribution
+cnrun::CSourceNoise::
+distribution_by_name( const string& s)
+{
+        for ( auto d : {TDistribution::uniform, TDistribution::gaussian} )
+                if ( s == distribution_s( d) )
+                        return d;
+        throw stilton::str::sasprintf( "Invalid distribution name: %s", s.c_str());
+}
+
+
+cnrun::CSourceNoise::
+CSourceNoise (const string& name_,
+              double min_, double max_, double sigma_,
+              TDistribution dist_type_,
+              int seed)
+      : C_BaseSource (name_, TSourceType::noise),
+        _min (min_), _max (max_),
+        _sigma (sigma_),
+        _dist_type (dist_type_)
+{
+        const gsl_rng_type *T;
+        gsl_rng_env_setup();
+        T = gsl_rng_default;
+        if ( gsl_rng_default_seed == 0 ) {
+                struct timeval tp = { 0L, 0L };
+                gettimeofday( &tp, nullptr);
+                gsl_rng_default_seed = tp.tv_usec;
+        }
+        _rng = gsl_rng_alloc( T);
+}
+
+double
+cnrun::CSourceNoise::
+operator() ( double unused)
+{
+        switch ( _dist_type ) {
+        case TDistribution::uniform:   return gsl_rng_uniform( _rng) * (_max - _min) + _min;
+        case TDistribution::gaussian:  return gsl_ran_gaussian( _rng, _sigma) + (_max - _min)/2;
+        }
+        return 42.;
+}
+
+
+cnrun::CSourceNoise::
+~CSourceNoise ()
+{
+        gsl_rng_free( _rng);
+}
+
+
+void
+cnrun::CSourceNoise::
+dump( FILE *strm) const
+{
+        fprintf( strm, "%s (%s) %s in range %g:%g (sigma = %g)\n",
+                 name(), type_s(),
+                 distribution_s(_dist_type), _min, _max, _sigma);
+}
+
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/sources.hh b/upstream/src/libcnrun/sources.hh
new file mode 100644
index 0000000..7caa63c
--- /dev/null
+++ b/upstream/src/libcnrun/sources.hh
@@ -0,0 +1,199 @@
+/*
+ *       File name:  libcn/sources.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ * Initial version:  2010-02-24
+ *
+ *         Purpose:  External stimulation sources (periodic, tape, noise).
+ *
+ *         License:  GPL-2+
+ */
+
+#ifndef CNRUN_LIBCN_SOURCES_H_
+#define CNRUN_LIBCN_SOURCES_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <cstdio>
+#include <string>
+#include <vector>
+#include <gsl/gsl_rng.h>
+
+#include "libstilton/lang.hh"
+#include "forward-decls.hh"
+
+
+using namespace std;
+
+namespace cnrun {
+
+enum class TSourceType { null, tape, periodic, function, noise };
+
+class C_BaseSource {
+
+        DELETE_DEFAULT_METHODS (C_BaseSource)
+
+    public:
+        static const char* const type_s( TSourceType);
+
+        C_BaseSource (const string& name_, TSourceType type_)
+              : _name (name_), _type (type_)
+                {}
+        virtual ~C_BaseSource()
+                {}
+
+        const char* name() const
+                {  return _name.c_str();  }
+        const TSourceType type() const
+                {  return _type;  }
+        const char* type_s() const
+                {  return type_s(_type);  }
+
+        virtual double operator() ( double)
+                {  return 0.;  }
+        virtual bool is_periodic()
+                {  return false;  }
+
+        bool operator== ( const C_BaseSource &rv)
+                {  return _name == rv._name; }
+        bool operator== ( const string& rv)
+                {  return _name == rv; }
+
+        virtual void dump( FILE *strm = stdout) const = 0;
+
+    protected:
+        string  _name;
+        TSourceType
+                _type;
+};
+
+
+enum class TSourceLoopingOption { yes, no };
+
+class CSourceTape : public C_BaseSource {
+
+        DELETE_DEFAULT_METHODS (CSourceTape)
+
+    public:
+        CSourceTape (const string& name_, const string& fname_,
+                     TSourceLoopingOption = TSourceLoopingOption::no);
+
+        TSourceLoopingOption is_looping;
+
+        double operator() ( double at)
+                {
+                        while ( next(_I) != _values.end() && next(_I)->first < at )
+                                ++_I;
+
+                        if ( next(_I) == _values.end() && is_looping == TSourceLoopingOption::yes )
+                                _I = _values.begin();
+
+                        return _I->second;
+                }
+
+        void dump( FILE *strm = stdout) const;
+
+    private:
+        string _fname;
+        vector<pair<double, double>> _values;
+        vector<pair<double, double>>::iterator _I;
+};
+
+
+
+class CSourcePeriodic : public C_BaseSource {
+
+        DELETE_DEFAULT_METHODS (CSourcePeriodic)
+
+    public:
+        CSourcePeriodic (const string& name_, const string& fname_,
+                         TSourceLoopingOption,
+                         double period);
+
+        TSourceLoopingOption is_looping;
+
+        double operator() ( double at)
+                {
+                        size_t  i_abs = (size_t)(at / _period),
+                                i_eff = (is_looping == TSourceLoopingOption::yes)
+                                        ? i_abs % _values.size()
+                                        : min (i_abs, _values.size() - 1);
+                        return _values[i_eff];
+                }
+
+        void dump( FILE *strm = stdout) const;
+
+        bool is_periodic()
+                {  return true;  }
+        double period() const
+                {  return _period;  }
+
+    private:
+        string _fname;
+        vector<double> _values;
+        double _period;
+};
+
+
+
+class CSourceFunction : public C_BaseSource {
+// not useful in Lua
+
+        DELETE_DEFAULT_METHODS (CSourceFunction)
+
+    public:
+        CSourceFunction (const string& name_, double (*function_)(double))
+              : C_BaseSource (name_, TSourceType::function), _function (function_)
+                {}
+
+        double operator() ( double at)
+                {
+                        return _function( at);
+                }
+
+        void dump( FILE *strm = stdout) const;
+
+    private:
+        double (*_function)( double at);
+};
+
+
+
+class CSourceNoise : public C_BaseSource {
+
+        DELETE_DEFAULT_METHODS (CSourceNoise)
+
+    public:
+        enum class TDistribution { uniform, gaussian, };
+        static const char * const distribution_s( TDistribution);
+        static TDistribution distribution_by_name( const string&);
+
+        CSourceNoise (const string& name_, double min_ = 0., double max_ = 1.,
+                      double sigma_ = 1.,
+                      TDistribution = TDistribution::uniform,
+                      int seed = 0);
+       ~CSourceNoise ();
+
+        double operator() ( double unused);
+
+        void dump( FILE *strm = stdout) const;
+
+    private:
+        double _min, _max, _sigma;
+        TDistribution _dist_type;
+        gsl_rng *_rng;
+};
+
+}
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/standalone-attr.hh b/upstream/src/libcnrun/standalone-attr.hh
new file mode 100644
index 0000000..29ccb1d
--- /dev/null
+++ b/upstream/src/libcnrun/standalone-attr.hh
@@ -0,0 +1,56 @@
+/*
+ *       File name:  libcn/standalone-attr.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ * Initial version:  2009-03-31
+ *
+ *         Purpose:  Interface class for standalone units.
+ *
+ *         License:  GPL-2+
+ */
+
+#ifndef CNRUN_LIBCN_STANDALONEATTR_H_
+#define CNRUN_LIBCN_STANDALONEATTR_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <vector>
+
+
+using namespace std;
+namespace cnrun {
+
+class C_StandaloneAttributes {
+
+    friend class CModel;
+    protected:
+        C_StandaloneAttributes (size_t nvars)
+                {
+                        V.resize( nvars);
+                        V_next.resize( nvars);
+                }
+
+        vector<double>
+                V,
+                V_next;
+
+    private:
+        virtual void preadvance()
+                {}
+        void fixate()
+                {  V = V_next;  }
+};
+
+}
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/standalone-neurons.cc b/upstream/src/libcnrun/standalone-neurons.cc
new file mode 100644
index 0000000..ef9ac54
--- /dev/null
+++ b/upstream/src/libcnrun/standalone-neurons.cc
@@ -0,0 +1,438 @@
+/*
+ *       File name:  libcn/standalone-neurons.cc
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ * Initial version:  2009-04-08
+ *
+ *         Purpose:  standalone neurons (those not having state vars
+ *                   on model's integration vector)
+ *
+ *         License:  GPL-2+
+ */
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <iostream>
+
+#include "types.hh"
+#include "model.hh"
+#include "standalone-neurons.hh"
+
+
+
+cnrun::C_StandaloneNeuron::
+C_StandaloneNeuron (TUnitType type_, const string& label_,
+                    double x, double y, double z,
+                    CModel *M_, int s_mask)
+      : C_BaseNeuron( type_, label_, x, y, z, M_, s_mask),
+        C_StandaloneAttributes( __CNUDT[type_].vno)
+{
+        reset_vars();
+        if ( M )
+                M->include_unit( this);
+}
+
+
+
+
+
+
+// --------- Rhythm'n'Blues
+
+const char* const cnrun::CN_ParamNames_NeuronHH_r[] = {
+        "a, " CN_PU_FREQUENCY,
+        "I₀, " CN_PU_CURRENT,
+        "r in F(I) = a (I-I₀)^r",
+        "Externally applied DC, " CN_PU_CURRENT,
+};
+const char* const cnrun::CN_ParamSyms_NeuronHH_r[] = {
+        "a",
+        "I0",
+        "r",
+        "Idc",
+};
+const double cnrun::CN_Params_NeuronHH_r[] = {
+        0.185,            //   a,
+        0.0439,           //   I0,
+        0.564,            //   r in F(I) = a * (I-I0)^r
+        0.                // Externally applied DC
+};
+
+
+const char* const cnrun::CN_VarNames_NeuronHH_r[] = {
+        "Spiking rate, " CN_PU_FREQUENCY,
+};
+const char* const cnrun::CN_VarSyms_NeuronHH_r[] = {
+        "F",
+};
+const double cnrun::CN_Vars_NeuronHH_r[] = {
+         0.        // frequency F
+};
+
+
+double
+cnrun::CNeuronHH_r::
+F( vector<double>& x) const
+{
+        double subsq = Isyn(x) - P[_I0_] + P[_Idc_];
+        if ( subsq <= 0. )
+                return 0.;
+        else {
+                return P[_a_] * pow( subsq, P[_r_]);
+        }
+}
+
+void
+cnrun::CNeuronHH_r::
+preadvance()
+{
+        double subsq = Isyn() - P[_I0_] + P[_Idc_];
+//        printf( "%s->Isyn(x) = %g,\tsubsq = %g\n", _label, Isyn(), subsq);
+        if ( subsq <= 0. )
+                V_next[0] = 0;
+        else
+                V_next[0] = P[_a_] * pow( subsq, P[_r_]);
+}
+
+
+
+
+
+
+
+
+
+
+const char* const cnrun::CN_ParamNames_OscillatorPoissonDot[] = {
+        "Rate λ, " CN_PU_RATE,
+        "Resting potential, " CN_PU_POTENTIAL,
+        "Potential when firing, " CN_PU_POTENTIAL,
+};
+const char* const cnrun::CN_ParamSyms_OscillatorPoissonDot[] = {
+        "lambda",
+        "Vrst",
+        "Vfir",
+};
+const double cnrun::CN_Params_OscillatorPoissonDot[] = {
+        0.02,        // firing rate Lambda [1/ms]=[10^3 Hz]
+      -60.0,        // input neuron resting potential
+       20.0,        // input neuron potential when firing
+};
+
+const char* const cnrun::CN_VarNames_OscillatorPoissonDot[] = {
+        "Membrane potential, " CN_PU_POTENTIAL,
+        "Spikes recently fired",
+//        "Time"
+};
+const char* const cnrun::CN_VarSyms_OscillatorPoissonDot[] = {
+        "E",
+        "nspk",
+//        "t"
+};
+const double cnrun::CN_Vars_OscillatorPoissonDot[] = {
+        -60.,        // = Vrst, per initialization code found in ctor
+          0,
+//          0.
+};
+
+
+
+inline namespace {
+#define _THIRTEEN_ 13
+unsigned long __factorials[_THIRTEEN_] = {
+        1,
+        1, 2, 6, 24, 120,
+        720, 5040, 40320, 362880L, 3628800L,
+        39916800L, 479001600L
+};
+
+inline double
+__attribute__ ((pure))
+factorial( unsigned n)
+{
+        if ( n < _THIRTEEN_ )
+                return __factorials[n];
+        else {
+                //cerr << n << "!" << endl;
+                return __factorials[_THIRTEEN_-1] * factorial(n-_THIRTEEN_);
+        }
+}
+}
+
+void
+cnrun::COscillatorDotPoisson::
+possibly_fire()
+{
+        double        lt = P[_lambda_] * M->dt(),
+                dice = M->rng_sample(),
+                probk = 0.;
+
+        unsigned k;
+        for ( k = 0; ; k++ ) {
+                probk += exp( -lt) * pow( lt, (double)k) / factorial(k);
+                if ( probk > dice ) {
+                        nspikes() = k;
+                        break;
+                }
+        }
+
+        if ( k ) {
+                _status |=  CN_NFIRING;
+                var_value(0) = P[_Vfir_];
+        } else {
+                _status &= ~CN_NFIRING;
+                var_value(0) = P[_Vrst_];
+        }
+}
+
+
+
+void
+cnrun::COscillatorDotPoisson::
+do_detect_spike_or_whatever()
+{
+        unsigned n = n_spikes_in_last_dt();
+        if ( n > 0 ) {
+                for ( unsigned qc = 0; qc < n; qc++ )
+                        _spikelogger_agent->spike_history.push_back( model_time());
+                _spikelogger_agent->_status |= CN_KL_ISSPIKINGNOW;
+                _spikelogger_agent->t_last_spike_start = _spikelogger_agent->t_last_spike_end = model_time();
+        } else
+                _spikelogger_agent->_status &= ~CN_KL_ISSPIKINGNOW;
+}
+
+
+
+
+
+
+
+
+
+
+
+const char* const cnrun::CN_ParamNames_OscillatorPoisson[] = {
+        "Rate λ, " CN_PU_RATE,
+        "Input neuron resting potential, " CN_PU_POTENTIAL,
+        "Input neuron potential when firing, " CN_PU_POTENTIAL,
+        "Spike time, " CN_PU_TIME,
+        "Spike time + refractory period, " CN_PU_TIME,
+};
+const char* const cnrun::CN_ParamSyms_OscillatorPoisson[] = {
+        "lambda",
+        "trel",
+        "trel+trfr",
+        "Vrst",
+        "Vfir",
+};
+const double cnrun::CN_Params_OscillatorPoisson[] = {
+        0.02,        // firing rate Lambda [1/ms]=[10^3 Hz]
+        0.0,        // spike time
+        0.0,        // refractory period + spike time
+      -60.0,        // input neuron resting potential
+       20.0,        // input neuron potential when firing
+};
+
+const char* const cnrun::CN_VarNames_OscillatorPoisson[] = {
+        "Membrane potential E, " CN_PU_POTENTIAL,
+};
+const char* const cnrun::CN_VarSyms_OscillatorPoisson[] = {
+        "E",
+};
+const double cnrun::CN_Vars_OscillatorPoisson[] = {
+        -60.,
+};
+
+
+
+void
+cnrun::COscillatorPoisson::
+possibly_fire()
+{
+        if ( _status & CN_NFIRING )
+                if ( model_time() - _spikelogger_agent->t_last_spike_start > P[_trel_] ) {
+                        (_status &= ~CN_NFIRING) |= CN_NREFRACT;
+                        _spikelogger_agent->t_last_spike_end = model_time();
+                }
+        if ( _status & CN_NREFRACT )
+                if ( model_time() - _spikelogger_agent->t_last_spike_start > P[_trelrfr_] )
+                        _status &= ~CN_NREFRACT;
+
+        if ( !(_status & (CN_NFIRING | CN_NREFRACT)) ) {
+                double lt = P[_lambda_] * M->dt();
+                if ( M->rng_sample() <= exp( -lt) * lt ) {
+                        _status |= CN_NFIRING;
+                        _spikelogger_agent->t_last_spike_start = model_time() /* + M->dt() */ ;
+                }
+        }
+
+//        E() = next_state_E;
+//        next_state_E = (_status & CN_NFIRING) ?P.n.Vfir :P.n.Vrst;
+        var_value(0) = (_status & CN_NFIRING) ?P[_Vfir_] :P[_Vrst_];
+//        if ( strcmp( label, "ORNa.1") == 0 ) cout << label << ": firing_started = " << t_firing_started << ", firing_ended = " << t_firing_ended << " E = " << E() << endl;
+}
+
+
+void
+cnrun::COscillatorPoisson::
+do_detect_spike_or_whatever()
+{
+        unsigned n = n_spikes_in_last_dt();
+        if ( n > 0 ) {
+                if ( !(_spikelogger_agent->_status & CN_KL_ISSPIKINGNOW) ) {
+                        _spikelogger_agent->spike_history.push_back( model_time());
+                        _spikelogger_agent->_status |= CN_KL_ISSPIKINGNOW;
+                }
+        } else
+                if ( _spikelogger_agent->_status & CN_KL_ISSPIKINGNOW ) {
+                        _spikelogger_agent->_status &= ~CN_KL_ISSPIKINGNOW;
+                        _spikelogger_agent->t_last_spike_end = model_time();
+                }
+}
+
+
+
+
+
+// Map neurons require descrete time
+
+const double cnrun::CN_Params_NeuronMap[] = {
+        60.0,                // 0 - Vspike: spike Amplitude factor
+         3.0002440,        // 1 - alpha: "steepness / size" parameter
+        -2.4663490,        // 3 - gamma: "shift / excitation" parameter
+         2.64,                // 2 - beta: input sensitivity
+         0.,
+// Old comment by TN: beta chosen such that Isyn= 10 "nA" is the threshold for spiking
+};
+const char* const cnrun::CN_ParamNames_NeuronMap[] = {
+        "Spike amplitude factor, " CN_PU_POTENTIAL,
+        "\"Steepness / size\" parameter α",
+        "\"Shift / excitation\" parameter γ",
+        "Input sensitivity β, " CN_PU_RESISTANCE,
+        "External DC, " CN_PU_CURRENT,
+};
+const char* const cnrun::CN_ParamSyms_NeuronMap[] = {
+        "Vspike",
+        "alpha",
+        "gamma",
+        "beta",
+        "Idc"
+};
+
+const double cnrun::CN_Vars_NeuronMap[] = {
+      -50,        // E
+};
+const char* const cnrun::CN_VarNames_NeuronMap[] = {
+        "Membrane potential E, " CN_PU_POTENTIAL
+};
+const char* const cnrun::CN_VarSyms_NeuronMap[] = {
+        "E",
+};
+
+
+cnrun::CNeuronMap::
+CNeuronMap (const string& inlabel, double x, double y, double z, CModel *inM, int s_mask)
+      : C_StandaloneConductanceBasedNeuron( NT_MAP, inlabel, x, y, z, inM, s_mask)
+{
+        if ( inM ) {
+                if ( isfinite( inM->_discrete_dt) && inM->_discrete_dt != fixed_dt )
+                        throw "Inappropriate discrete dt";
+
+                inM -> _discrete_dt = fixed_dt;
+        }
+}
+
+
+
+void
+cnrun::CNeuronMap::
+preadvance()
+{
+        double Vspxaxb = P[_Vspike_] * (P[_alpha_] + P[_gamma_]);
+        V_next[0] =
+                ( E() <= 0. )
+                  ? P[_Vspike_] * ( P[_alpha_] * P[_Vspike_] / (P[_Vspike_] - E() - P[_beta_] * (Isyn() + P[_Idc_]))
+                                   + P[_gamma_] )
+                  : ( E() <= Vspxaxb && _E_prev <= 0.)
+                    ? Vspxaxb
+                    : -P[_Vspike_];
+
+        _E_prev = E();
+}
+
+
+
+
+
+
+
+// ----- Pulse
+
+const char* const cnrun::CN_ParamNames_NeuronDotPulse[] = {
+        "Frequency f, " CN_PU_FREQUENCY,
+        "Resting potential Vrst, " CN_PU_VOLTAGE,
+        "Firing potential Vfir, " CN_PU_VOLTAGE,
+};
+const char* const cnrun::CN_ParamSyms_NeuronDotPulse[] = {
+        "f",
+        "Vrst",
+        "Vfir",
+};
+const double cnrun::CN_Params_NeuronDotPulse[] = {
+         10,
+        -60,
+         20
+};
+
+const char* const cnrun::CN_VarNames_NeuronDotPulse[] = {
+        "Membrane potential E, " CN_PU_VOLTAGE,
+        "Spikes recently fired",
+};
+const char* const cnrun::CN_VarSyms_NeuronDotPulse[] = {
+        "E",
+        "nspk",
+};
+const double cnrun::CN_Vars_NeuronDotPulse[] = {
+        -60.,        // E
+         0
+};
+
+
+void
+cnrun::CNeuronDotPulse::
+possibly_fire()
+{
+        enum TParametersNeuronDotPulse { _f_, _Vrst_, _Vfir_ };
+
+        spikes_fired_in_last_dt() = floor( (model_time() + M->dt()) * P[_f_]/1000)
+                                  - floor(  model_time()            * P[_f_]/1000);
+
+        if ( spikes_fired_in_last_dt() ) {
+                _status |=  CN_NFIRING;
+                var_value(0) = P[_Vfir_];
+        } else {
+                _status &= ~CN_NFIRING;
+                var_value(0) = P[_Vrst_];
+        }
+
+}
+
+void
+cnrun::CNeuronDotPulse::
+param_changed_hook()
+{
+        if ( P[_f_] < 0 ) {
+                M->vp( 0, stderr, "DotPulse oscillator \"%s\" got a negative parameter f: capping at 0\n", _label);
+                P[_f_] = 0.;
+        }
+}
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/standalone-neurons.hh b/upstream/src/libcnrun/standalone-neurons.hh
new file mode 100644
index 0000000..e9168d5
--- /dev/null
+++ b/upstream/src/libcnrun/standalone-neurons.hh
@@ -0,0 +1,255 @@
+/*
+ *       File name:  libcn/standalone-neurons.cc
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ * Initial version:  2008-08-02
+ *
+ *         Purpose:  standalone neurons (those not having state vars
+ *                   on model's integration vector)
+ *
+ *         License:  GPL-2+
+ */
+
+#ifndef CNRUN_LIBCN_STANDALONENEURONS_H_
+#define CNRUN_LIBCN_STANDALONENEURONS_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include "libstilton/lang.hh"
+#include "forward-decls.hh"
+#include "base-neuron.hh"
+#include "standalone-attr.hh"
+#include "mx-attr.hh"
+
+
+namespace cnrun {
+
+class C_StandaloneNeuron
+  : public C_BaseNeuron, public C_StandaloneAttributes {
+
+        DELETE_DEFAULT_METHODS (C_StandaloneNeuron)
+
+    protected:
+        C_StandaloneNeuron (TUnitType intype, const string& inlabel,
+                            double x, double y, double z,
+                            CModel*, int s_mask);
+
+    public:
+        double &var_value( size_t v)                  {  return V[v];  }
+        const double &get_var_value( size_t v) const  {  return V[v];  }
+        void reset_vars()
+                {
+                        memcpy( V.data(), __CNUDT[_type].stock_var_values,
+                                sizeof(double) * v_no());
+                        memcpy( V_next.data(), __CNUDT[_type].stock_var_values,
+                                sizeof(double) * v_no());
+                }
+};
+
+
+
+class C_StandaloneConductanceBasedNeuron
+  : public C_StandaloneNeuron {
+
+        DELETE_DEFAULT_METHODS (C_StandaloneConductanceBasedNeuron)
+
+    protected:
+        C_StandaloneConductanceBasedNeuron (TUnitType intype, const string& inlabel,
+                                            double inx, double iny, double inz,
+                                            CModel *inM, int s_mask)
+              : C_StandaloneNeuron (intype, inlabel, inx, iny, inz, inM, s_mask)
+                {}
+
+    public:
+        double E() const                        {  return V[0];  }
+        double E( vector<double>&) const        {  return V[0];  }
+
+        size_t n_spikes_in_last_dt() const;
+};
+
+
+class C_StandaloneRateBasedNeuron
+  : public C_StandaloneNeuron {
+
+        DELETE_DEFAULT_METHODS (C_StandaloneRateBasedNeuron)
+
+    protected:
+        C_StandaloneRateBasedNeuron (TUnitType intype, const string& inlabel,
+                                     double inx, double iny, double inz,
+                                     CModel *inM, int s_mask)
+              : C_StandaloneNeuron (intype, inlabel, inx, iny, inz, inM, s_mask)
+                {}
+
+    public:
+        size_t n_spikes_in_last_dt() const;
+};
+
+
+
+
+
+
+
+
+class CNeuronHH_r
+  : public C_StandaloneRateBasedNeuron {
+
+        DELETE_DEFAULT_METHODS (CNeuronHH_r)
+
+    public:
+        CNeuronHH_r( const string& inlabel,
+                     double x, double y, double z,
+                     CModel *inM, int s_mask = 0)
+              : C_StandaloneRateBasedNeuron( NT_HH_R, inlabel, x, y, z, inM, s_mask)
+                {}
+
+        enum {
+                _a_, _I0_, _r_, _Idc_,
+        };
+
+        double F( vector<double>&) const  __attribute__ ((hot));
+
+        void preadvance() __attribute__ ((hot));
+};
+
+
+
+
+
+
+
+
+class COscillatorPoisson
+  : public C_StandaloneConductanceBasedNeuron {
+
+        DELETE_DEFAULT_METHODS (COscillatorPoisson)
+
+    public:
+        COscillatorPoisson( const string& inlabel,
+                            double x, double y, double z,
+                            CModel *inM, int s_mask = 0)
+              : C_StandaloneConductanceBasedNeuron (NT_POISSON, inlabel, x, y, z, inM, s_mask)
+                {
+                      // need _spikelogger_agent's fields even when no spikelogging is done
+                        _spikelogger_agent = new SSpikeloggerService(
+                                static_cast<C_BaseNeuron*>(this),
+                                0 | CN_KL_PERSIST | CN_KL_IDLE);
+                }
+
+        enum {
+                _lambda_, _trel_, _trelrfr_, _Vrst_, _Vfir_,
+        };
+
+        void possibly_fire() __attribute__ ((hot));
+
+        void do_detect_spike_or_whatever() __attribute__ ((hot));
+};
+
+
+
+
+
+
+
+
+
+
+class COscillatorDotPoisson
+  : public C_StandaloneConductanceBasedNeuron {
+
+        DELETE_DEFAULT_METHODS (COscillatorDotPoisson)
+
+    public:
+        COscillatorDotPoisson (const string& inlabel,
+                               double x, double y, double z,
+                               CModel *inM, int s_mask = 0)
+              : C_StandaloneConductanceBasedNeuron( NT_DOTPOISSON, inlabel, x, y, z, inM, s_mask)
+                {
+                      // need _spikelogger_agent's fields even when no spikelogging is done
+                        _spikelogger_agent = new SSpikeloggerService(
+                                static_cast<C_BaseNeuron*>(this),
+                                0 | CN_KL_PERSIST | CN_KL_IDLE);
+                }
+
+        enum {
+                _lambda_, _Vrst_, _Vfir_,
+        };
+
+        void do_detect_spike_or_whatever() __attribute__ ((hot));
+
+        void possibly_fire() __attribute__ ((hot));
+
+        unsigned n_spikes_in_last_dt()
+                {  return V[1];  }
+
+        double &nspikes()
+                {  return V[1];  }
+};
+
+
+
+class CNeuronDotPulse
+  : public C_StandaloneConductanceBasedNeuron {
+
+        DELETE_DEFAULT_METHODS (CNeuronDotPulse)
+
+    public:
+        CNeuronDotPulse (const string& inlabel,
+                         double x, double y, double z,
+                         CModel *inM, int s_mask = 0)
+              : C_StandaloneConductanceBasedNeuron (NT_DOTPULSE, inlabel, x, y, z, inM, s_mask)
+                {}
+
+        enum { _f_, _Vrst_, _Vfir_ };
+
+        double &spikes_fired_in_last_dt()
+                {  return V[1];  }
+
+        void possibly_fire();
+
+        void param_changed_hook();
+};
+
+
+
+
+
+
+
+
+
+class CNeuronMap
+  : public C_StandaloneConductanceBasedNeuron {
+
+        DELETE_DEFAULT_METHODS (CNeuronMap)
+
+    public:
+        static const constexpr double fixed_dt = 0.1;
+
+        CNeuronMap (const string& inlabel, double x, double y, double z,
+                    CModel*, int s_mask = 0);
+
+        enum {
+                _Vspike_, _alpha_, _gamma_, _beta_, _Idc_
+        };
+
+        void preadvance();
+        void fixate();
+    private:
+        double _E_prev;
+
+};
+
+}
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/standalone-synapses.cc b/upstream/src/libcnrun/standalone-synapses.cc
new file mode 100644
index 0000000..f4f1e54
--- /dev/null
+++ b/upstream/src/libcnrun/standalone-synapses.cc
@@ -0,0 +1,99 @@
+/*
+ *       File name:  libcn/standalone-synapses.cc
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ * Initial version:  2009-04-08
+ *
+ *         Purpose:  standalone synapses.
+ *
+ *         License:  GPL-2+
+ */
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <iostream>
+
+#include "types.hh"
+#include "model.hh"
+#include "standalone-synapses.hh"
+
+
+cnrun::C_StandaloneSynapse::
+C_StandaloneSynapse (TUnitType type_,
+                     C_BaseNeuron* source_, C_BaseNeuron* target_,
+                     double g_, CModel* M_, int s_mask)
+      : C_BaseSynapse (type_, source_, target_, g_, M_, s_mask),
+        C_StandaloneAttributes (__CNUDT[type_].vno)
+{
+        reset_vars();
+        if ( M )
+                M->include_unit( this);
+        // else
+        //         _status &= ~CN_UENABLED;
+}
+
+
+
+
+
+const double cnrun::CN_Params_SynapseMap[] = {
+//        0.075,
+       18.94463,  // Decay rate time constant
+        0.25,
+        0
+
+};
+const char* const cnrun::CN_ParamNames_SynapseMap[] = {
+//        "Synaptic strength g, " CN_PU_CONDUCTANCE,
+        "Decay rate time constant τ, " CN_PU_RATE,
+        "Release quantile δ",
+        "Reversal potential Vrev, " CN_PU_POTENTIAL
+};
+const char* const cnrun::CN_ParamSyms_SynapseMap[] = {
+//        "gsyn",
+        "tau",
+        "delta",
+        "Vrev"
+};
+
+
+cnrun::CSynapseMap::
+CSynapseMap (C_BaseNeuron *insource, C_BaseNeuron *intarget,
+             double ing, CModel *inM, int s_mask, TUnitType alt_type)
+      : C_StandaloneSynapse( alt_type, insource, intarget, ing, inM, s_mask),
+        _source_was_spiking (false)
+{
+        if ( !inM )
+                throw "A MxMap synapse is created unattached to a model: preadvance() will cause a segfault!";
+        if ( isfinite( inM->_discrete_dt) && inM->_discrete_dt != fixed_dt )
+                throw "Inappropriate discrete dt\n";
+        inM -> _discrete_dt = fixed_dt;
+}
+
+
+
+void
+cnrun::CSynapseMxMap::
+update_queue()
+{
+        size_t k = _source -> n_spikes_in_last_dt();
+        while ( k-- )
+                _kq.push_back( model_time());
+
+        while ( true ) {
+                if ( q() > 0 && model_time() - _kq.front() > P[_tau_] )
+                        _kq.erase( _kq.begin());
+                else
+                        break;
+        }
+}
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/standalone-synapses.hh b/upstream/src/libcnrun/standalone-synapses.hh
new file mode 100644
index 0000000..fa82c92
--- /dev/null
+++ b/upstream/src/libcnrun/standalone-synapses.hh
@@ -0,0 +1,126 @@
+/*
+ *       File name:  libcn/standalone-synapses.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ * Initial version:  2008-08-02
+ *
+ *         Purpose:  standalone synapses (those not having state vars
+ *                   on model's integration vector)
+ *
+ *         License:  GPL-2+
+ */
+
+#ifndef CNRUN_LIBCN_STANDALONESYNAPSES_H_
+#define CNRUN_LIBCN_STANDALONESYNAPSES_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <iostream>
+
+#include "base-synapse.hh"
+#include "base-neuron.hh"
+#include "standalone-attr.hh"
+#include "mx-attr.hh"
+
+
+namespace cnrun {
+
+class CModel;
+
+class C_StandaloneSynapse
+  : public C_BaseSynapse, public C_StandaloneAttributes {
+
+        DELETE_DEFAULT_METHODS (C_StandaloneSynapse)
+
+    protected:
+        C_StandaloneSynapse (TUnitType, C_BaseNeuron *insource, C_BaseNeuron *intarget,
+                             double ing, CModel*, int s_mask = 0);
+
+    public:
+        double &var_value( size_t v)                        { return V[v]; }
+        const double &get_var_value( size_t v) const        { return V[v]; }
+        double  S() const                                   { return V[0]; }
+        double &S( vector<double>&)                         { return V[0];  }
+
+        void reset_vars()
+                {
+                        memcpy( V.data(), __CNUDT[_type].stock_var_values,
+                                sizeof(double) * v_no());
+                        memcpy( V_next.data(), __CNUDT[_type].stock_var_values,
+                                sizeof(double) * v_no());
+                }
+};
+
+
+
+
+
+class CSynapseMap
+  : public C_StandaloneSynapse {
+
+        DELETE_DEFAULT_METHODS (CSynapseMap)
+
+    public:
+        static constexpr double fixed_dt = 0.1;
+
+        CSynapseMap (C_BaseNeuron *insource, C_BaseNeuron *intarget,
+                     double ing, CModel*, int s_mask = 0, TUnitType alt_type = YT_MAP);
+
+        void preadvance();  // defined inline in model.h
+
+        enum {
+                _tau_, _delta_, _Esyn_
+        };
+        double Isyn( const C_BaseNeuron &with_neuron, double g) const
+                {
+                        return -g * S() * (with_neuron.E() - P[_Esyn_]);
+                }
+        double Isyn( vector<double>& unused, const C_BaseNeuron &with_neuron, double g) const
+                {
+                        return Isyn( with_neuron, g);
+                }
+
+    protected:
+        bool _source_was_spiking;
+};
+
+
+
+
+
+class CSynapseMxMap
+  : public CSynapseMap, public C_MultiplexingAttributes {
+
+        DELETE_DEFAULT_METHODS (CSynapseMxMap)
+
+    public:
+        static constexpr double fixed_dt = 0.1;
+
+        CSynapseMxMap( C_BaseNeuron *insource, C_BaseNeuron *intarget,
+                       double ing, CModel *inM, int s_mask = 0)
+              : CSynapseMap( insource, intarget, ing, inM, s_mask, YT_MXMAP)
+                {}
+
+        enum {
+                _tau_, _delta_, _Esyn_
+        };
+        void preadvance();  // defined inline in model.h
+
+    private:
+        friend class CModel;
+        void update_queue();
+};
+
+}
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/types.cc b/upstream/src/libcnrun/types.cc
new file mode 100644
index 0000000..d815542
--- /dev/null
+++ b/upstream/src/libcnrun/types.cc
@@ -0,0 +1,524 @@
+/*
+ *       File name:  libcn/types.cc
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ * Initial version:  2008-09-23
+ *
+ *         Purpose:  CN global unit descriptors
+ *
+ *         License:  GPL-2+
+ */
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <cstdio>
+#include <cstring>
+
+#include "libstilton/string.hh"
+#include "types.hh"
+
+
+using namespace std;
+
+cnrun::SCNDescriptor cnrun::__CNUDT[] = {
+
+// ---------------- Neuron types
+
+        { UT_HOSTED,  // NT_HH_D
+          8+18, 4,
+          CN_Params_NeuronHH_d,
+          CN_ParamNames_NeuronHH_d,
+          CN_ParamSyms_NeuronHH_d,
+          CN_Vars_NeuronHH_d,
+          CN_VarNames_NeuronHH_d,
+          CN_VarSyms_NeuronHH_d,
+          "HH",
+          "HH",
+          "Hodgkin-Huxley by Traub and Miles (1991)"
+        },
+
+        { UT_RATEBASED,  // NT_HH_R
+          4, 1,
+          CN_Params_NeuronHH_r,
+          CN_ParamNames_NeuronHH_r,
+          CN_ParamSyms_NeuronHH_r,
+          CN_Vars_NeuronHH_r,
+          CN_VarNames_NeuronHH_r,
+          CN_VarSyms_NeuronHH_r,
+          "HHRate",
+          "HHRate",
+          "Rate-based model of the Hodgkin-Huxley neuron"
+        },
+
+        { UT_HOSTED,  // NT_HH2_D
+          11+18-1, 4,
+          CN_Params_NeuronHH2_d,
+          CN_ParamNames_NeuronHH2_d,
+          CN_ParamSyms_NeuronHH2_d,
+          CN_Vars_NeuronHH2_d,
+          CN_VarNames_NeuronHH_d,
+          CN_VarSyms_NeuronHH_d,
+          "HH2",
+          "HH2",
+          "Hodgkin-Huxley by Traub & Miles w/ K leakage"
+        },
+
+        { UT_RATEBASED | UT__STUB,  // NT_HH2_R
+          0, 0,
+          NULL,          NULL,          NULL,
+          NULL,          NULL,          NULL,
+          "HH2Rate",
+          "HH2Rate",
+          "Rate-based model of the Hodgkin-Huxley by Traub & Miles"
+        },
+
+//#ifdef CN_WANT_MORE_NEURONS
+        { UT_HOSTED,  // NT_EC_D
+          14, 6,
+          CN_Params_NeuronEC_d,
+          CN_ParamNames_NeuronEC_d,
+          CN_ParamSyms_NeuronEC_d,
+          CN_Vars_NeuronEC_d,
+          CN_VarNames_NeuronEC_d,
+          CN_VarSyms_NeuronEC_d,
+          "EC",
+          "EC",
+          "Entorhinal Cortex neuron"
+        },
+
+        { UT_HOSTED,  // NT_ECA_D
+          11, 7,
+          CN_Params_NeuronECA_d,
+          CN_ParamNames_NeuronECA_d,
+          CN_ParamSyms_NeuronECA_d,
+          CN_Vars_NeuronECA_d,
+          CN_VarNames_NeuronECA_d,
+          CN_VarSyms_NeuronECA_d,
+          "ECA",
+          "ECA",
+          "Entorhinal Cortex (A) neuron"
+        },
+//#endif
+
+        { UT_OSCILLATOR | UT_DOT,  // NT_POISSONDOT
+          3, 2,
+          CN_Params_OscillatorPoissonDot,
+          CN_ParamNames_OscillatorPoissonDot,
+          CN_ParamSyms_OscillatorPoissonDot,
+          CN_Vars_OscillatorPoissonDot,
+          CN_VarNames_OscillatorPoissonDot,
+          CN_VarSyms_OscillatorPoissonDot,
+          "DotPoisson",
+          "DotPoisson",
+          "Duration-less spike Poisson oscillator"
+        },
+
+        { UT_OSCILLATOR,  // NT_POISSON
+          5, 1,
+          CN_Params_OscillatorPoisson,
+          CN_ParamNames_OscillatorPoisson,
+          CN_ParamSyms_OscillatorPoisson,
+          CN_Vars_OscillatorPoisson,
+          CN_VarNames_OscillatorPoisson,
+          CN_VarSyms_OscillatorPoisson,
+          "Poisson",
+          "Poisson",
+          "Poisson oscillator"
+        },
+
+/*
+        { UT_HOSTED | UT_OSCILLATOR,  // NT_LV
+          1, 2,
+          CN_Params_OscillatorLV,
+          CN_ParamNames_OscillatorLV,
+          CN_ParamSyms_OscillatorLV,
+          CN_Vars_OscillatorLV,
+          CN_VarNames_OscillatorLV,
+          CN_VarSyms_OscillatorLV,
+          "LV",
+          "LV",
+          "Lotka-Volterra oscillator"
+        },
+*/
+
+        { UT_HOSTED | UT_OSCILLATOR,  // NT_COLPITTS,
+          4, 3,
+          CN_Params_OscillatorColpitts,
+          CN_ParamNames_OscillatorColpitts,
+          CN_ParamSyms_OscillatorColpitts,
+          CN_Vars_OscillatorColpitts,
+          CN_VarNames_OscillatorColpitts,
+          CN_VarSyms_OscillatorColpitts,
+          "Colpitts",
+          "Colpitts",
+          "Colpitts oscillator"
+        },
+
+        { UT_HOSTED | UT_OSCILLATOR,  // NT_VDPOL,
+          2, 2,
+          CN_Params_OscillatorVdPol,
+          CN_ParamNames_OscillatorVdPol,
+          CN_ParamSyms_OscillatorVdPol,
+          CN_Vars_OscillatorVdPol,
+          CN_VarNames_OscillatorVdPol,
+          CN_VarSyms_OscillatorVdPol,
+          "VdPol",
+          "VdPol",
+          "Van der Pol oscillator"
+        },
+
+        { UT_OSCILLATOR | UT_DOT,  // NT_DOTPULSE
+          3, 2,
+          CN_Params_NeuronDotPulse,
+          CN_ParamNames_NeuronDotPulse,
+          CN_ParamSyms_NeuronDotPulse,
+          CN_Vars_NeuronDotPulse,
+          CN_VarNames_NeuronDotPulse,
+          CN_VarSyms_NeuronDotPulse,
+          "DotPulse",
+          "DotPulse",
+          "Dot Pulse generator"
+        },
+
+        { UT_DDTSET,  // NT_MAP
+          5, 1,
+          CN_Params_NeuronMap,
+          CN_ParamNames_NeuronMap,
+          CN_ParamSyms_NeuronMap,
+          CN_Vars_NeuronMap,
+          CN_VarNames_NeuronMap,
+          CN_VarSyms_NeuronMap,
+          "NMap",
+          "NMap",
+          "Map neuron"
+        },
+
+// ---------------- Synapse types
+
+// a proper synapse (of eg AB type) will be selected based on whether
+// its source/target is rate-based or discrete
+
+        { UT_HOSTED,  // YT_AB_DD
+          5, 1,
+          CN_Params_SynapseAB_dd,
+          CN_ParamNames_SynapseAB_dd,
+          CN_ParamSyms_SynapseAB_dd,
+          CN_Vars_SynapseAB,
+          CN_VarNames_SynapseAB,
+          CN_VarSyms_SynapseAB,
+          "AB",
+          "AB_pp",
+          "Alpha-Beta synapse (Destexhe, Mainen, Sejnowsky, 1994)"
+        },
+
+        { UT_HOSTED | UT_TGTISRATE | UT__STUB,  // YT_AB_DR
+          5, 1,
+          NULL,          NULL,          NULL,
+          NULL,          NULL,          NULL,
+          "AB",
+          "AB_pt",
+          "Alpha-Beta synapse (phasic->tonic)"
+        },
+
+        { UT_HOSTED | UT_SRCISRATE | UT__STUB,  // YT_AB_RD
+          5, 1,
+          NULL,          NULL,          NULL,
+          NULL,          NULL,          NULL,
+          "AB",
+          "AB_tp",
+          "Alpha-Beta synapse (tonic->phasic)"
+        },
+
+        { UT_HOSTED | UT_RATEBASED,  // YT_AB_RR
+          4, 1,
+          CN_Params_SynapseAB_rr,
+          CN_ParamNames_SynapseAB_rr,
+          CN_ParamSyms_SynapseAB_rr,
+          CN_Vars_SynapseAB,
+          CN_VarNames_SynapseAB,
+          CN_VarSyms_SynapseAB,
+          "AB",
+          "AB_tt",
+          "Alpha-Beta synapse (tonic->tonic)"
+        },
+
+        { UT_HOSTED | UT_MULTIPLEXING,  // YT_MXAB_DD, inheriting all parameters except alpha, and variables from YT_AB
+          5, 1,
+          CN_Params_SynapseMxAB_dd,
+          CN_ParamNames_SynapseAB_dd,
+          CN_ParamSyms_SynapseAB_dd,
+          CN_Vars_SynapseAB,
+          CN_VarNames_SynapseAB,
+          CN_VarSyms_SynapseAB,
+          "AB",
+          "AB_Mx_pp",
+          "Multiplexing Alpha-Beta synapse for use with durationless units as source (phasic->phasic)"
+        },
+
+        { UT_HOSTED | UT_TGTISRATE | UT_MULTIPLEXING,  // YT_MXAB_DR
+          5, 1,
+          CN_Params_SynapseMxAB_dr,
+          CN_ParamNames_SynapseAB_dr,
+          CN_ParamSyms_SynapseAB_dr,
+          CN_Vars_SynapseAB,
+          CN_VarNames_SynapseAB,
+          CN_VarSyms_SynapseAB,
+          "AB",
+          "AB_Mx_pt",
+          "Multiplexing Alpha-Beta synapse for use with durationless units as source (phasic->tonic)"
+        },
+
+
+        { UT_HOSTED,  // YT_ABMINS_DD
+          5, 1,
+          CN_Params_SynapseABMinus_dd,
+          CN_ParamNames_SynapseAB_dd,
+          CN_ParamSyms_SynapseAB_dd,
+          CN_Vars_SynapseAB,
+          CN_VarNames_SynapseAB,
+          CN_VarSyms_SynapseAB,
+          "ABMinus",
+          "ABMinus_pp",
+          "Alpha-Beta synapse w/out (1-S) term"
+        },
+
+        { UT_HOSTED | UT_TGTISRATE | UT__STUB,  // YT_ABMINS_DR
+          5, 1,
+          NULL,          NULL,          NULL,
+          CN_Vars_SynapseAB,
+          CN_VarNames_SynapseAB,
+          CN_VarSyms_SynapseAB,
+          "ABMinus",
+          "ABMinus_pt",
+          "Alpha-Beta synapse w/out (1-S) term (phasic->tonic)"
+        },
+
+        { UT_HOSTED | UT_SRCISRATE | UT__STUB,  // YT_ABMINS_RD
+          5, 1,
+          NULL,          NULL,          NULL,
+          CN_Vars_SynapseAB,
+          CN_VarNames_SynapseAB,
+          CN_VarSyms_SynapseAB,
+          "ABMinus",
+          "ABMinus_tp",
+          "Alpha-Beta synapse w/out (1-S) term (tonic->phasic)"
+        },
+
+        { UT_HOSTED | UT_SRCISRATE | UT_TGTISRATE | UT__STUB,  // YT_ABMINS_RR
+          5, 1,
+          NULL,          NULL,          NULL,
+          CN_Vars_SynapseAB,
+          CN_VarNames_SynapseAB,
+          CN_VarSyms_SynapseAB,
+          "ABMinus",
+          "ABMinus_tt",
+          "Alpha-Beta synapse w/out (1-S) term (tonic->tonic)"
+        },
+
+        { UT_HOSTED | UT_MULTIPLEXING | UT__STUB,  // YT_MXABMINUS_DD
+          5, 1,
+          NULL,          NULL,          NULL,
+          CN_Vars_SynapseAB,
+          CN_VarNames_SynapseAB,
+          CN_VarSyms_SynapseAB,
+          "ABMinus",
+          "ABMinus_Mx_pp",
+          "Multiplexing Alpha-Beta w/out (1-S) synapse for use with durationless units as source (phasic->phasic)"
+        },
+
+        { UT_HOSTED | UT_TGTISRATE | UT_MULTIPLEXING | UT__STUB,  // YT_MXABMINUS_DR
+          5, 1,
+          NULL,          NULL,          NULL,
+          CN_Vars_SynapseAB,
+          CN_VarNames_SynapseAB,
+          CN_VarSyms_SynapseAB,
+          "ABMinus",
+          "ABMinus_Mx_pt",
+          "Multiplexing Alpha-Beta w/out (1-S) synapse for use with durationless units as source (phasic->tonic)"
+        },
+
+
+        { UT_HOSTED,  // YT_RALL_DD
+          3, 2,
+          CN_Params_SynapseRall_dd,
+          CN_ParamNames_SynapseRall_dd,
+          CN_ParamSyms_SynapseRall_dd,
+          CN_Vars_SynapseRall,
+          CN_VarNames_SynapseRall,
+          CN_VarSyms_SynapseRall,
+          "Rall",
+          "Rall_pp",
+          "Rall synapse (Rall, 1967)"
+        },
+
+        { UT_HOSTED | UT_TGTISRATE | UT__STUB,  // YT_RALL_DR
+          3, 2,
+          NULL,          NULL,          NULL,
+          CN_Vars_SynapseRall,
+          CN_VarNames_SynapseRall,
+          CN_VarSyms_SynapseRall,
+          "Rall",
+          "Rall_pt",
+          "Rall synapse (Rall, 1967) (phasic->tonic)"
+        },
+
+        { UT_HOSTED | UT_SRCISRATE | UT__STUB,  // YT_RALL_RD
+          3, 2,
+          NULL,          NULL,          NULL,
+          CN_Vars_SynapseRall,
+          CN_VarNames_SynapseRall,
+          CN_VarSyms_SynapseRall,
+          "Rall",
+          "Rall_tp",
+          "Rall synapse (tonic->phasic)"
+        },
+
+        { UT_HOSTED | UT_SRCISRATE | UT_TGTISRATE | UT__STUB,  // YT_RALL_RR
+          3, 2,
+          NULL,          NULL,          NULL,
+          CN_Vars_SynapseRall,
+          CN_VarNames_SynapseRall,
+          CN_VarSyms_SynapseRall,
+          "Rall",
+          "Rall_tt",
+          "Rall synapse (tonic->tonic)"
+        },
+
+        { UT_HOSTED | UT_MULTIPLEXING | UT__STUB,  // YT_MXRALL_DD
+          3, 2,
+          NULL,          NULL,          NULL,
+          CN_Vars_SynapseRall,
+          CN_VarNames_SynapseRall,
+          CN_VarSyms_SynapseRall,
+          "Rall",
+          "Rall_Mx_pp",
+          "Rall synapse for use with durationless units as source (phasic->phasic)"
+        },
+
+        { UT_HOSTED | UT_TGTISRATE | UT_MULTIPLEXING | UT__STUB,  // YT_MXRALL_DR
+          3, 2,
+          NULL,          NULL,          NULL,
+          CN_Vars_SynapseRall,
+          CN_VarNames_SynapseRall,
+          CN_VarSyms_SynapseRall,
+          "Rall",
+          "Rall_Mx_pt",
+          "Rall synapse for use with durationless units as source (phasic->tonic)"
+        },
+
+
+        { UT_DDTSET,  // YT_MAP
+          3, 1,
+          CN_Params_SynapseMap,
+          CN_ParamNames_SynapseMap,
+          CN_ParamSyms_SynapseMap,
+          CN_Vars_SynapseAB,
+          CN_VarNames_SynapseAB,
+          CN_VarSyms_SynapseAB,
+          "Map",
+          "Map",
+          "Map synapse"
+        },
+
+        { UT_DDTSET | UT_MULTIPLEXING,  // YT_MXMAP
+          3, 1,
+          CN_Params_SynapseMap,
+          CN_ParamNames_SynapseMap,
+          CN_ParamSyms_SynapseMap,
+          CN_Vars_SynapseAB,
+          CN_VarNames_SynapseAB,
+          CN_VarSyms_SynapseAB,
+          "Map",
+          "Map_Mx",
+          "Multiplexing Map synapse"
+        },
+};
+
+
+
+cnrun::TUnitType
+cnrun::
+unit_family_by_string( const string& id)
+{
+        for ( int i = NT_FIRST; i <= YT_LAST; ++i )
+                if ( id == __CNUDT[i].family )
+                        return (TUnitType)i;
+        return NT_VOID;
+}
+
+cnrun::TUnitType
+cnrun::
+unit_species_by_string( const string& id)
+{
+        for ( int i = NT_FIRST; i <= YT_LAST; ++i )
+                if ( id == __CNUDT[i].species )
+                        return (TUnitType)i;
+        return NT_VOID;
+}
+
+
+
+
+void
+cnrun::
+cnmodel_dump_available_units()
+{
+        using cnrun::stilton::str::double_dot_aligned_s;
+
+        size_t u, p;
+        printf( "\n===== Neurons:\n");
+        for ( u = NT_FIRST; u <= NT_LAST; ++u ) {
+                const auto &U = __CNUDT[u];
+                if ( U.traits & UT__STUB )
+                        continue;
+                printf( "* [%s] %s:\n",
+                        U.species, U.description);
+                for ( p = 0; p < U.pno; ++p ) {
+                        printf( "   %-12s %s  %s\n",
+                                U.stock_param_syms[p],
+                                double_dot_aligned_s(
+                                        U.stock_param_values[p], 4, 8).c_str(),
+                                U.stock_param_names[p]);
+                }
+                for ( p = 0; p < U.vno; ++p ) {
+                        printf( "v: %-12s %s  %s\n",
+                                U.stock_var_syms[p],
+                                double_dot_aligned_s( U.stock_var_values[p], 4, 8).c_str(),
+                                U.stock_var_names[p]);
+                }
+                printf( "\n");
+        }
+        printf( "\n===== Synapses:\n");
+        for ( u = YT_FIRST; u <= YT_LAST; ++u ) {
+                SCNDescriptor &U = __CNUDT[u];
+                if ( U.traits & UT__STUB )
+                        continue;
+                printf( "* [%s] %s:\n",
+                        U.species, U.description);
+                for ( p = 0; p < U.pno; ++p ) {
+                        printf( "   %-12s %s  %s\n",
+                                U.stock_param_syms[p],
+                                double_dot_aligned_s(
+                                        U.stock_param_values[p], 4, 8).c_str(),
+                                U.stock_param_names[p]);
+                }
+                for ( p = 0; p < U.vno; ++p ) {
+                        printf( "v: %-12s %s  %s\n",
+                                U.stock_var_syms[p],
+                                double_dot_aligned_s( U.stock_var_values[p], 4, 8).c_str(),
+                                U.stock_var_names[p]);
+                }
+                printf( "\n");
+        }
+        printf( "\n");
+}
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/types.hh b/upstream/src/libcnrun/types.hh
new file mode 100644
index 0000000..084ce38
--- /dev/null
+++ b/upstream/src/libcnrun/types.hh
@@ -0,0 +1,298 @@
+/*
+ *       File name:  cnrun/types.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ * Initial version:  2008-08-02
+ *
+ *         Purpose:  Enumerated type for unit ids, and a structure describing a unit type.
+ *
+ *         License:  GPL-2+
+ */
+
+//#define CN_WANT_MORE_NEURONS
+
+#ifndef CNRUN_LIBCN_TYPES_H_
+#define CNRUN_LIBCN_TYPES_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+using namespace std;
+
+namespace cnrun {
+
+enum TUnitType {
+      // neuron types
+        NT_VOID = -1,
+
+        NT_HH_D,
+        NT_HH_R,
+        NT_HH2_D,
+        NT_HH2_R,
+//#ifdef CN_WANT_MORE_NEURONS
+        NT_EC_D,
+        NT_ECA_D,
+//#endif
+        NT_DOTPOISSON,
+        NT_POISSON,
+//#ifdef CN_WANT_MORE_NEURONS
+//        NT_LV,
+        NT_COLPITTS,
+        NT_VDPOL,
+//#endif
+        NT_DOTPULSE,
+        NT_MAP,
+
+      // synapse types
+        YT_AB_DD,
+        YT_AB_DR,
+        YT_AB_RD,
+        YT_AB_RR,
+        YT_MXAB_DD,
+        YT_MXAB_DR,
+
+        YT_ABMINUS_DD,
+        YT_ABMINUS_DR,
+        YT_ABMINUS_RD,
+        YT_ABMINUS_RR,
+        YT_MXABMINUS_DD,
+        YT_MXABMINUS_DR,
+
+        YT_RALL_DD,
+        YT_RALL_DR,
+        YT_RALL_RD,
+        YT_RALL_RR,
+        YT_MXRALL_DD,
+        YT_MXRALL_DR,
+
+        YT_MAP,
+        YT_MXMAP,
+};
+
+#define NT_FIRST NT_HH_D
+#define NT_LAST  NT_MAP
+#define YT_FIRST YT_AB_DD
+#define YT_LAST  YT_MXMAP
+
+
+
+// traits, used to ensure units being connected are compatible
+#define UT_HOSTED        (1 << 0)
+#define UT_DDTSET        (1 << 1)
+#define UT_OSCILLATOR    (1 << 2)
+#define UT_RATEBASED     (1 << 3)
+#define UT_SRCISRATE     UT_RATEBASED
+#define UT_TGTISRATE     (1 << 4)
+#define UT_DOT           (1 << 5)
+#define UT_MULTIPLEXING  UT_DOT
+#define UT__STUB         (1 << 15)
+
+struct SCNDescriptor {
+        int     traits;
+        unsigned short
+                pno, vno;
+        const double *const  stock_param_values;
+        const char   *const *stock_param_names;
+        const char   *const *stock_param_syms;
+        const double *const  stock_var_values;
+        const char   *const *stock_var_names;
+        const char   *const *stock_var_syms;
+        const char   *family,
+                     *species;
+        const char   *description;
+};
+
+TUnitType unit_family_by_string( const string&) __attribute__ ((pure));
+TUnitType unit_species_by_string( const string&) __attribute__ ((pure));
+
+inline bool
+unit_species_is_valid( const string& id)
+{
+        return unit_species_by_string(id) != NT_VOID;
+}
+inline bool
+unit_species_is_neuron( const string& id)
+{
+        TUnitType t = unit_species_by_string(id);
+        return t >= NT_FIRST && t <= NT_LAST;
+}
+
+inline bool
+unit_species_is_synapse( const string& id)
+{
+        TUnitType t = unit_species_by_string(id);
+        return t >= YT_FIRST && t <= YT_LAST;
+}
+
+inline bool
+unit_family_is_neuron( const string& id)
+{
+        TUnitType t = unit_family_by_string(id);
+        return t >= NT_FIRST && t <= NT_LAST;
+}
+
+inline bool
+unit_family_is_synapse( const string& id)
+{
+        TUnitType t = unit_family_by_string(id);
+        return t >= YT_FIRST && t <= YT_LAST;
+}
+
+extern SCNDescriptor __CNUDT[];
+
+void cnmodel_dump_available_units();
+
+
+
+extern const double CN_Params_NeuronHH_d[];
+extern const char* const CN_ParamNames_NeuronHH_d[];
+extern const char* const CN_ParamSyms_NeuronHH_d[];
+extern const double CN_Vars_NeuronHH_d[];
+extern const char* const CN_VarNames_NeuronHH_d[];
+extern const char* const CN_VarSyms_NeuronHH_d[];
+
+extern const double CN_Params_NeuronHH2_d[];
+extern const char* const CN_ParamNames_NeuronHH2_d[];
+extern const char* const CN_ParamSyms_NeuronHH2_d[];
+extern const double CN_Vars_NeuronHH2_d[];
+
+
+extern const double CN_Params_NeuronHH_r[];
+extern const char* const CN_ParamNames_NeuronHH_r[];
+extern const char* const CN_ParamSyms_NeuronHH_r[];
+extern const double CN_Vars_NeuronHH_r[];
+extern const char* const CN_VarNames_NeuronHH_r[];
+extern const char* const CN_VarSyms_NeuronHH_r[];
+
+
+extern const double CN_Params_NeuronDotPulse[];
+extern const char* const CN_ParamNames_NeuronDotPulse[];
+extern const char* const CN_ParamSyms_NeuronDotPulse[];
+extern const double CN_Vars_NeuronDotPulse[];
+extern const char* const CN_VarNames_NeuronDotPulse[];
+extern const char* const CN_VarSyms_NeuronDotPulse[];
+
+
+//#ifdef CN_WANT_MORE_NEURONS
+extern const double CN_Params_NeuronEC_d[];
+extern const char* const CN_ParamNames_NeuronEC_d[];
+extern const char* const CN_ParamSyms_NeuronEC_d[];
+extern const double CN_Vars_NeuronEC_d[];
+extern const char* const CN_VarNames_NeuronEC_d[];
+extern const char* const CN_VarSyms_NeuronEC_d[];
+
+
+extern const double CN_Params_NeuronECA_d[];
+extern const char* const CN_ParamNames_NeuronECA_d[];
+extern const char* const CN_ParamSyms_NeuronECA_d[];
+extern const double CN_Vars_NeuronECA_d[];
+extern const char* const CN_VarNames_NeuronECA_d[];
+extern const char* const CN_VarSyms_NeuronECA_d[];
+//#endif
+
+extern const double CN_Params_NeuronMap[];
+extern const char* const CN_ParamNames_NeuronMap[];
+extern const char* const CN_ParamSyms_NeuronMap[];
+extern const double CN_Vars_NeuronMap[];
+extern const char* const CN_VarNames_NeuronMap[];
+extern const char* const CN_VarSyms_NeuronMap[];
+
+
+extern const double CN_Params_OscillatorPoissonDot[];
+extern const char* const CN_ParamNames_OscillatorPoissonDot[];
+extern const char* const CN_ParamSyms_OscillatorPoissonDot[];
+extern const double CN_Vars_OscillatorPoissonDot[];
+extern const char* const CN_VarNames_OscillatorPoissonDot[];
+extern const char* const CN_VarSyms_OscillatorPoissonDot[];
+
+extern const double CN_Params_OscillatorPoisson[];
+extern const char* const CN_ParamNames_OscillatorPoisson[];
+extern const char* const CN_ParamSyms_OscillatorPoisson[];
+extern const double CN_Vars_OscillatorPoisson[];
+extern const char* const CN_VarNames_OscillatorPoisson[];
+extern const char* const CN_VarSyms_OscillatorPoisson[];
+
+
+/*
+extern const double CN_Params_OscillatorLV[];
+extern const char* const CN_ParamNames_OscillatorLV[];
+extern const char* const CN_ParamSyms_OscillatorLV[];
+extern const double CN_Vars_OscillatorLV[];
+extern const char* const CN_VarNames_OscillatorLV[];
+extern const char* const CN_VarSyms_OscillatorLV[];
+*/
+
+extern const double CN_Params_OscillatorColpitts[];
+extern const char* const CN_ParamNames_OscillatorColpitts[];
+extern const char* const CN_ParamSyms_OscillatorColpitts[];
+extern const double CN_Vars_OscillatorColpitts[];
+extern const char* const CN_VarNames_OscillatorColpitts[];
+extern const char* const CN_VarSyms_OscillatorColpitts[];
+
+
+extern const double CN_Params_OscillatorVdPol[];
+extern const char* const CN_ParamNames_OscillatorVdPol[];
+extern const char* const CN_ParamSyms_OscillatorVdPol[];
+extern const double CN_Vars_OscillatorVdPol[];
+extern const char* const CN_VarNames_OscillatorVdPol[];
+extern const char* const CN_VarSyms_OscillatorVdPol[];
+//#endif
+
+
+
+extern const double CN_Params_SynapseAB_dd[];
+extern const char* const CN_ParamNames_SynapseAB_dd[];
+extern const char* const CN_ParamSyms_SynapseAB_dd[];
+extern const double CN_Vars_SynapseAB[];
+extern const char* const CN_VarNames_SynapseAB[];
+extern const char* const CN_VarSyms_SynapseAB[];
+
+extern const double CN_Params_SynapseABMinus_dd[];
+
+extern const double CN_Params_SynapseMxAB_dd[];
+
+extern const char* const CN_ParamNames_SynapseAB_dr[];
+extern const char* const CN_ParamSyms_SynapseAB_dr[];
+
+extern const double CN_Params_SynapseMxAB_dr[];
+
+extern const double CN_Params_SynapseAB_rr[];
+extern const char* const CN_ParamNames_SynapseAB_rr[];
+extern const char* const CN_ParamSyms_SynapseAB_rr[];
+
+
+extern const double CN_Params_SynapseRall_dd[];
+extern const char* const CN_ParamNames_SynapseRall_dd[];
+extern const char* const CN_ParamSyms_SynapseRall_dd[];
+extern const double CN_Vars_SynapseRall[];
+extern const char* const CN_VarNames_SynapseRall[];
+extern const char* const CN_VarSyms_SynapseRall[];
+
+
+extern const double CN_Params_SynapseMap[];
+extern const char* const CN_ParamNames_SynapseMap[];
+extern const char* const CN_ParamSyms_SynapseMap[];
+
+#define CN_PU_CONDUCTANCE "μS/cm²"
+#define CN_PU_RESISTANCE "MΩ"
+#define CN_PU_POTENTIAL "mV"
+#define CN_PU_VOLTAGE "mV"
+#define CN_PU_CURRENT "nA"
+#define CN_PU_CAPACITY_DENSITY "μF/cm²"
+#define CN_PU_TIME "msec"
+#define CN_PU_TIME_MSEC "msec"
+#define CN_PU_RATE "1/msec"
+#define CN_PU_FREQUENCY "Hz"
+#define CN_PU_TIME_SEC "sec"
+
+}
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libstilton/Makefile.am b/upstream/src/libstilton/Makefile.am
index 75f6417..c8c9aad 100644
--- a/upstream/src/libstilton/Makefile.am
+++ b/upstream/src/libstilton/Makefile.am
@@ -1,27 +1,22 @@
 include $(top_srcdir)/src/Common.mk
+AM_CXXFLAGS += -shared -fPIC
 
-pkglib_LTLIBRARIES = \
-	libstilton.la
+noinst_LIBRARIES = \
+	liba.a
 
-libstilton_la_SOURCES = \
+liba_a_SOURCES = \
 	alg.hh \
 	containers.hh \
 	lang.hh \
-	exprparser.hh \
-	exprparser.cc \
-	libcommon.cc
+	misc.hh \
+	string.hh \
+	libstilton.cc
 
-libstilton_la_LDFLAGS = \
-	-avoid-version \
-	-rpath $(libdir)/$(PACKAGE) \
-	-shared -module
+stiltonincdir = $(includedir)/libstilton
 
-if DO_PCH
-BUILT_SOURCES = \
-	alg.hh.gch \
-	containers.hh.gch \
-	lang.hh.gch \
-	exprparser.hh.gch
-
-CLEANFILES = $(BUILT_SOURCES)
-endif
+stiltoninc_HEADERS = \
+	alg.hh \
+	containers.hh \
+	lang.hh \
+	misc.hh \
+	string.hh
diff --git a/upstream/src/libstilton/alg.hh b/upstream/src/libstilton/alg.hh
index f24cba2..aa97b09 100644
--- a/upstream/src/libstilton/alg.hh
+++ b/upstream/src/libstilton/alg.hh
@@ -9,8 +9,8 @@
  *         License:  GPL
  */
 
-#ifndef _CNRUN_LIBSTILTON_ALG_H
-#define _CNRUN_LIBSTILTON_ALG_H
+#ifndef CNRUN_LIBSTILTON_ALG_H_
+#define CNRUN_LIBSTILTON_ALG_H_
 
 
 #if HAVE_CONFIG_H && !defined(VERSION)
@@ -20,6 +20,7 @@
 using namespace std;
 
 namespace cnrun {
+namespace stilton {
 namespace alg {
 
 /// uncomment on demand
@@ -91,6 +92,7 @@ value_within( const T& v, const T& l, const T& h)
 // for more, check this file in Aghermann
 
 } // namespace alg
+} // namespace stilton
 } // namespace cnrun
 
 #endif
diff --git a/upstream/src/libstilton/containers.hh b/upstream/src/libstilton/containers.hh
index eb3bfb6..df25c56 100644
--- a/upstream/src/libstilton/containers.hh
+++ b/upstream/src/libstilton/containers.hh
@@ -9,17 +9,18 @@
  *         License:  GPL
  */
 
-#ifndef _CNRUN_LIBSTILTON_CONTAINERS_H
-#define _CNRUN_LIBSTILTON_CONTAINERS_H
+#ifndef CNRUN_LIBSTILTON_CONTAINERS_H_
+#define CNRUN_LIBSTILTON_CONTAINERS_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
 
 #include <list>
 #include <forward_list>
 #include <vector>
 #include <map>
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
+#include <algorithm>
 
 using namespace std;
 
@@ -30,21 +31,21 @@ template <typename T>
 bool
 member( const T& x, const list<T>& v)
 {
-        return any( v.begin(), v.end(), x);
+        return find( v.begin(), v.end(), x) != v.end();
 }
 
 template <typename T>
 bool
 member( const T& x, const forward_list<T>& v)
 {
-        return any( v.begin(), v.end(), x);
+        return find( v.begin(), v.end(), x) != v.end();
 }
 
 template <typename T>
 bool
 member( const T& x, const vector<T>& v)
 {
-        return any( v.begin(), v.end(), x);
+        return find( v.begin(), v.end(), x) != v.end();
 }
 
 template <typename K, typename V>
diff --git a/upstream/src/libstilton/exprparser.cc b/upstream/src/libstilton/exprparser.cc
deleted file mode 100644
index fc7d9f6..0000000
--- a/upstream/src/libstilton/exprparser.cc
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *
- * License: GPL-2+
- *
- * Initial version: 2008-12-02
- *
- * Expression parser
- */
-
-
-#include <cstdlib>
-#include <cstdio>
-#include <array>
-#include <iostream>
-
-#include "exprparser.hh"
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-
-using namespace std;
-
-
-const char* const cnrun::__exparse_error_strings[] = {
-	"",
-	"Missing operand",
-	"Unbalanced parentheses",
-	"Unparsable value or missing operator",
-	"Unary operator used as binary",
-	"Undefined variable",
-	"Non-lvalue in assignment",
-	"varlist is NULL"
-};
-
-
-
-
-enum TOperator {
-	OP_VOID = -1,
-	OP_NEG,
-	OP_UNARYMINUS,
-	OP_MULT,	OP_DIV,
-	OP_ADD,		OP_SUBTRACT,
-	OP_LT,		OP_GT,
-	OP_ASSIGN,
-
-	OP_LE,		OP_GE,		OP_EQ,
-};
-
-struct SOp {
-	char	literal[4];
-	int	prio;
-	bool	assoc_ltr,
-		is_binary;
-
-	SOp( const char *l, int p, bool a, bool b)
-	      : prio (p), assoc_ltr (a), is_binary (b)
-		{ strncpy( literal, l, 3); }
-
-	bool isat( const char *where)
-		{ return (strncmp( where, literal, strlen( literal)) == 0); }
-};
-
-
-#define n_ops 12
-
-inline namespace {
-array<SOp, n_ops> Ops = {
-	{
-		SOp("!", 1, false, false),
-		SOp("-", 1, false, false),
-		SOp("*", 3, true, true),	SOp("/", 3, true, true),
-		SOp("+", 5, true, true),	SOp("-", 5, true, true),
-		SOp("<", 7, true, true),	SOp(">", 7, true, true),
-		SOp("=", 9, false, true),
-
-		SOp("<=", 7, true, true),	SOp(">=", 7, true, true),	SOp("==", 7, true, true)
-	}
-};
-} // inline namespace
-
-
-
-cnrun::TExprParserError
-cnrun::CExpression::
-_do_parse( const char *str, double& parsed, list<SVariable> *varlist)
-{
-	if ( !str ) {
-		parsed = 0;
-		return status = EXPARSE_OK;
-	}
-
-	parsed = NAN;
-	_var = "";
-
-	string	workbuf( str);
-	char	*p = &workbuf[0];
-
-	p += strspn( p, " \t");
-	if ( !*p ) {
-		parsed = 0;
-		return status = EXPARSE_EMPTY;
-	}
-
-	char	*expr1 = p,
-		*expr2 = nullptr;
-	TExprParserError subexpr_retval;
-
-      // determine subexpressions, if any, at top level
-	int	level = 0;
-	char	*tl_op_at = nullptr;
-	TOperator
-		tl_op = OP_VOID;
-	bool	last_token_was_operator = true;
-
-//	cerr << "\nPARSE \"" << p << "\"\n";
-	while ( *p ) {
-		if ( *p == eol_comment_delim ) {
-			*p = '\0';
-			break;
-		}
-		if      ( *p == '(' )	level++;
-		else if ( *p == ')' )	level--;
-
-		if ( level < 0 )
-			return status = EXPARSE_UNBALANCED;
-		if ( level > 0 || isspace( *p) )
-			goto end_detect;
-
-	      // detect exponent (e-4)
-		if ( strncasecmp( p, "e-", 2) == 0 ) {
-			p++;
-			goto end_detect;
-		}
-	      // serve the case of unary -: part one
-		if ( *p == '-' && last_token_was_operator ) {
-			char *pp = p;
-			while ( pp > &workbuf[0] && !isspace(*pp) )  pp--; // pp++;
-//			cerr << "  (checking \"" << pp << "\"";
-			char *tp;
-			if ( strtod( pp, &tp) )
-				;
-			if ( tp > p ) { // we have indeed read a number
-//				cerr << "  parsed a number up to \"" << tp<< "\")\n";
-				p = tp - 1;
-				last_token_was_operator = false;
-				goto end_detect;
-			}
-//			cerr << " not a number)\n";
-		}
-
-		int o;
-		for ( o = n_ops-1; o >= 0; o-- ) // check for multibyte operators first (those are at end)
-			if ( Ops[o].isat( p) ) {
-				char *pp = p;
-				p += strlen( Ops[o].literal) - 1; // anticipate general p++
-
-				if ( o == OP_SUBTRACT && last_token_was_operator ) {
-//					cerr << "override\n";
-					o = OP_UNARYMINUS;
-				} else
-					if ( !last_token_was_operator && !Ops[o].is_binary ) {
-//					cerr << " ...at \"" << pp << "\" with op " << Ops[o].literal << endl;
-						if ( !silent ) fprintf( stderr, "Unary %s used after an operand\n", Ops[o].literal);
-						return status = EXPARSE_UNASSOC;
-					}
-
-				if ( tl_op == OP_VOID ||
-				     (Ops[o].assoc_ltr && Ops[tl_op].prio <= Ops[o].prio) ||
-				     (!Ops[o].assoc_ltr && Ops[tl_op].prio < Ops[o].prio) ) {
-//					cerr << "current tlop: " << Ops[o].literal << endl;
-					tl_op_at = pp;
-					tl_op = (TOperator)o;
-				}
-				last_token_was_operator = true;
-				goto end_detect;
-			}
-
-		last_token_was_operator = false;
-
-	end_detect:
-		p++;
-	}
-//	cerr << "tlop is " << Ops[tl_op].literal << endl;
-
-	if ( level > 0 ) {
-		if ( !silent ) fprintf( stderr, "Expression lacks some `)''\n");
-		return status = EXPARSE_UNBALANCED;
-	}
-
-	list<SVariable>::iterator V;
-
-	if ( tl_op != OP_VOID ) {
-		*tl_op_at = '\0';
-		expr2 = tl_op_at + strlen( Ops[tl_op].literal);
-		double opd1, opd2;
-
-//		cerr << "parsing [" << expr1 << "] "<< Ops[tl_op].literal << " [" << expr2 << "]\n";
-
-	      // second subexpr must always be good
-		subexpr_retval = _do_parse( expr2, opd2, varlist);
-		if ( subexpr_retval )
-			return status = subexpr_retval;
-
-	      // first subexpr must be empty, but only in the case of OP_NEG
-		subexpr_retval = _do_parse( expr1, opd1, varlist);
-
-		switch ( subexpr_retval ) {
-		case EXPARSE_OK:
-			break;
-		case EXPARSE_EMPTY:
-			if ( !Ops[tl_op].is_binary ) {
-//				cerr << "was a unary op\n";
-				break;
-			} else
-				return subexpr_retval;
-		case EXPARSE_UNDEFVAR:
-			if ( tl_op == OP_ASSIGN )
-				break;
-			else {
-				// have it reported here (in deeper _do_parse where it is flagged), we don't know yet
-				// if an undefined var is going to be defined
-				if ( !silent ) fprintf( stderr, "Undefined variable `%s'\n", strtok( expr1, " \t"));
-				return status = subexpr_retval;
-			}
-		      break;
-		default:
-			return subexpr_retval;
-		}
-
-		switch ( tl_op ) {
-		case OP_VOID:	break;
-		case OP_UNARYMINUS:	parsed = -opd2;		break;
-		case OP_ADD:		parsed = opd1 + opd2;	break;
-		case OP_SUBTRACT:	parsed = opd1 - opd2;	break;
-		case OP_MULT:		parsed = opd1 * opd2;	break;
-		case OP_DIV:		parsed = opd1 / opd2;	break;
-		case OP_LT:		parsed = opd1 < opd2;	break;
-		case OP_LE:		parsed = opd1 <= opd2;	break;
-		case OP_GT:		parsed = opd1 > opd2;	break;
-		case OP_GE:		parsed = opd1 >= opd2;	break;
-		case OP_EQ:		parsed = opd1 == opd2;	break;
-		case OP_NEG:		parsed = !opd2;		break;
-		case OP_ASSIGN:
-			if ( !varlist ) {
-				if ( !silent ) fprintf( stderr, "Variable assignment reqires a user varlist\n");
-				return status = EXPARSE_VARLISTNULL;
-			}
-			if ( _var == "" ) {
-				if ( !silent ) fprintf( stderr, "Non-lvalue in assignment\n");
-				return status = EXPARSE_NONLVAL;
-			}
-			parsed = opd2;
-			for ( V = varlist->begin(); V != varlist->end(); V++ )
-				if ( strcmp( V->name, _var.c_str()) == 0 ) { // _var has been cached by a previous call to _do_parse
-					V->value = opd2;
-					toplevel_op = tl_op;
-					return status = EXPARSE_OK;
-				}
-			varlist->push_back( SVariable( _var.c_str(), opd2));
-		    break;
-		}
-		toplevel_op = tl_op;
-		return status = EXPARSE_OK;
-	}
-
-      // single expression, possibly in parentheses
-	if ( *expr1 == '(' ) {
-		*strrchr( ++expr1, ')') = '\0';  // parentheses have been checked in the by-char parser loop above
-		return _do_parse( expr1, parsed, varlist);
-	}
-
-      // bare expression
-	expr1 = strtok( expr1, " \t");
-	char *tailp;
-	parsed = strtod( expr1, &tailp);
-	if ( tailp == nullptr || strspn( tailp, " \t\n\r;") == strlen( tailp) )   // digits followed by whitespace
-		return status = EXPARSE_OK;
-
-	if ( tailp == expr1 && varlist ) { // no digits at front: check if that's a variable
-		for ( V = varlist->begin(); V != varlist->end(); V++ ) {
-			if ( strcmp( V->name, expr1) == 0 ) {
-				parsed = V->value;
-				_var = V->name;
-				return status = EXPARSE_OK;
-			}
-		}
-		_var = expr1;  // possibly to be assigned in caller; parsed remains NAN
-		return status = EXPARSE_UNDEFVAR;
-	}
-
-      // some digits followed by rubbish
-	return status = EXPARSE_BAD;
-}
-
-
-// EOF
diff --git a/upstream/src/libstilton/exprparser.hh b/upstream/src/libstilton/exprparser.hh
deleted file mode 100644
index 9620d88..0000000
--- a/upstream/src/libstilton/exprparser.hh
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *
- * License: GPL-2+
- *
- * Initial version: 2008-12-02
- *
- * An expression parser
- */
-
-#ifndef CNAUX_EXPRPARSER_H
-#define CNAUX_EXPRPARSER_H
-
-#include <cmath>
-#include <cstring>
-#include <string>
-#include <list>
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-namespace cnrun {
-
-using namespace std;
-
-
-enum TExprParserError {
-	EXPARSE_OK = 0,
-	EXPARSE_EMPTY,
-	EXPARSE_UNBALANCED,
-	EXPARSE_BAD,
-	EXPARSE_UNASSOC,
-	EXPARSE_UNDEFVAR,
-	EXPARSE_NONLVAL,
-	EXPARSE_VARLISTNULL
-};
-
-
-#define STILTON_MAX_VAR_LEN	32
-
-struct SVariable {
-	char name[STILTON_MAX_VAR_LEN];
-	double value;
-	SVariable( const char *inname, double invalue = NAN)
-	      : value (invalue)
-		{
-			strncpy( name, inname, STILTON_MAX_VAR_LEN-1);
-		}
-	bool operator== ( const SVariable &rv) const
-		{
-			return strcmp( name, rv.name /*, STILTON_MAX_VAR_LEN */ ) == 0;
-		}
-	bool operator< ( const SVariable &rv) const
-		{
-			return strcmp( name, rv.name /*, STILTON_MAX_VAR_LEN */ ) == -1;
-		}
-};
-
-extern const char *const __exparse_error_strings[];
-
-class CExpression {
-
-    public:
-	TExprParserError status;
-
-	CExpression()
-	      : status (EXPARSE_OK), silent (false), eol_comment_delim ('#'), toplevel_op (' '), _parsed_value (NAN)
-		{}
-	const char *error_string() const
-		{	return __exparse_error_strings[status];		}
-
-	double operator() ( const char *str, list<SVariable> *varlist = nullptr)
-		{	return ( _do_parse( str, _parsed_value, varlist) == EXPARSE_OK )
-				? _parsed_value : NAN;				}
-	int operator() ( const char *str, double& parsed, list<SVariable> *varlist = nullptr)
-		{	_do_parse( str, parsed, varlist);
-			return status;						}
-
-	bool	silent;
-	char	eol_comment_delim;
-	char	toplevel_op;
-
-	const char *status_s() const
-		{	return __exparse_error_strings[status];		}
-
-    private:
-	double	_parsed_value;
-	string	_var;
-//	string	_source_str;
-	TExprParserError _do_parse( const char *str, double& parsed, list<SVariable>*);
-};
-
-
-}
-
-#endif
-
-// EOF
diff --git a/upstream/src/libstilton/lang.hh b/upstream/src/libstilton/lang.hh
index eb941ec..da77173 100644
--- a/upstream/src/libstilton/lang.hh
+++ b/upstream/src/libstilton/lang.hh
@@ -9,8 +9,8 @@
  *         License:  GPL
  */
 
-#ifndef _CNRUN_LIBSTILTON_LANG_H
-#define _CNRUN_LIBSTILTON_LANG_H
+#ifndef CNRUN_LIBSTILTON_LANG_H_
+#define CNRUN_LIBSTILTON_LANG_H_
 
 #if HAVE_CONFIG_H && !defined(VERSION)
 #  include "config.h"
@@ -23,7 +23,8 @@
 
 using namespace std;
 
-namespace agh {
+namespace cnrun {
+namespace stilton {
 
 // for functions to suppress some possibly benign exceptions:
 enum class TThrowOption {
@@ -72,10 +73,10 @@ inline int dbl_cmp( double x, double y)  // optional precision maybe?
 #define unlikely(x)      __builtin_expect (!!(x), 0)
 
 
-#define FABUF printf( __FILE__ ":%d (%s): %s\n", __LINE__, __FUNCTION__, __buf__);
 #define FAFA printf( __FILE__ ":%d (%s): fafa\n", __LINE__, __FUNCTION__);
 
-} // namespace agh
+}
+}
 
 #endif
 
diff --git a/upstream/src/libstilton/libcommon.cc b/upstream/src/libstilton/libstilton.cc
similarity index 70%
rename from upstream/src/libstilton/libcommon.cc
rename to upstream/src/libstilton/libstilton.cc
index d1344b5..06b7e60 100644
--- a/upstream/src/libstilton/libcommon.cc
+++ b/upstream/src/libstilton/libstilton.cc
@@ -9,29 +9,55 @@
  *         License:  GPL
  */
 
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
 
 #include <cmath>
 #include <cstring>
 #include <string>
 #include <list>
 
-#include <stdarg.h>
+#include <cstdarg>
+#include <cerrno>
 #include <unistd.h>
-#include <errno.h>
 
 #include "string.hh"
 #include "alg.hh"
+#include "misc.hh"
 
 
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
 using namespace std;
+using namespace cnrun::stilton;
+
+
+void
+C_verprintf::
+vp( int level, const char* fmt, ...) const
+{
+        if ( level < verbose_threshold() ) {
+                va_list ap;
+                va_start (ap, fmt);
+                vprintf( fmt, ap);
+                va_end (ap);
+        }
+}
+
+void
+C_verprintf::
+vp( int level, FILE* f, const char* fmt, ...) const
+{
+        if ( level < verbose_threshold() ) {
+                va_list ap;
+                va_start (ap, fmt);
+                vfprintf( f, fmt, ap);
+                va_end (ap);
+        }
+}
 
 
 string
-cnrun::str::
+cnrun::stilton::str::
 svasprintf( const char* fmt, va_list ap)
 {
         char *_;
@@ -45,7 +71,7 @@ svasprintf( const char* fmt, va_list ap)
 
 
 string
-cnrun::str::
+cnrun::stilton::str::
 sasprintf( const char* fmt, ...)
 {
         char *_;
@@ -63,7 +89,7 @@ sasprintf( const char* fmt, ...)
 
 
 string
-cnrun::str::
+cnrun::stilton::str::
 trim( const string& r0)
 {
         string r (r0);
@@ -78,7 +104,7 @@ trim( const string& r0)
 }
 
 string
-cnrun::str::
+cnrun::stilton::str::
 pad( const string& r0, size_t to)
 {
         string r (to, ' ');
@@ -89,7 +115,7 @@ pad( const string& r0, size_t to)
 
 
 list<string>
-cnrun::str::
+cnrun::stilton::str::
 tokens_trimmed( const string& s_, const char* sep)
 {
         string s {s_};
@@ -104,7 +130,7 @@ tokens_trimmed( const string& s_, const char* sep)
 }
 
 list<string>
-cnrun::str::
+cnrun::stilton::str::
 tokens( const string& s_, const char* sep)
 {
         string s {s_};
@@ -122,7 +148,7 @@ tokens( const string& s_, const char* sep)
 
 
 void
-cnrun::str::
+cnrun::stilton::str::
 decompose_double( double value, double *mantissa, int *exponent)
 {
         char buf[32];
@@ -135,7 +161,7 @@ decompose_double( double value, double *mantissa, int *exponent)
 
 
 string&
-cnrun::str::
+cnrun::stilton::str::
 homedir2tilda( string& inplace)
 {
         const char *home = getenv("HOME");
@@ -146,7 +172,7 @@ homedir2tilda( string& inplace)
 }
 
 string
-cnrun::str::
+cnrun::stilton::str::
 homedir2tilda( const string& v)
 {
         string inplace (v);
@@ -158,7 +184,7 @@ homedir2tilda( const string& v)
 }
 
 string&
-cnrun::str::
+cnrun::stilton::str::
 tilda2homedir( string& inplace)
 {
         const char *home = getenv("HOME");
@@ -171,7 +197,7 @@ tilda2homedir( string& inplace)
 }
 
 string
-cnrun::str::
+cnrun::stilton::str::
 tilda2homedir( const string& v)
 {
         string inplace (v);
@@ -187,10 +213,10 @@ tilda2homedir( const string& v)
 
 
 string
-cnrun::str::
+cnrun::stilton::str::
 dhms( double seconds, int dd)
 {
-        bool        positive = seconds >= 0.;
+        bool    positive = seconds >= 0.;
         if ( not positive )
                 seconds = -seconds;
 
@@ -200,8 +226,8 @@ dhms( double seconds, int dd)
                 d = (int)seconds/60/60/24 % (60*60*24);
         double  f = seconds - floor(seconds);
 
-        using cnrun::str::sasprintf;
-        string        f_ = ( dd == 0 )
+        using cnrun::stilton::str::sasprintf;
+        string  f_ = ( dd == 0 )
                 ? ""
                 : sasprintf( ".%0*d", dd, (int)(f*pow(10, dd)));
         return ( d > 0 )
@@ -214,7 +240,7 @@ dhms( double seconds, int dd)
 }
 
 string
-cnrun::str::
+cnrun::stilton::str::
 dhms_colon( double seconds, int dd)
 {
         bool    positive = seconds >= 0.;
@@ -227,8 +253,8 @@ dhms_colon( double seconds, int dd)
                 d = (int)seconds/60/60/24 % (60*60*24);
         double        f = seconds - floor(seconds);
 
-        using cnrun::str::sasprintf;
-        string        f_ = ( dd == 0 )
+        using cnrun::stilton::str::sasprintf;
+        string  f_ = ( dd == 0 )
                 ? ""
                 : sasprintf( ".%0*d", dd, (int)(f*pow(10, dd)));
 
@@ -239,7 +265,7 @@ dhms_colon( double seconds, int dd)
 
 
 
-inline namespace {
+namespace {
 int
 n_frac_digits( double v)
 {
@@ -254,47 +280,49 @@ n_frac_digits( double v)
 }
 
 string
-cnrun::str::
-double_dot_aligned_s( double val, int int_width, int frac_width)
+cnrun::stilton::str::
+double_dot_aligned_s( double val, const size_t int_width, const size_t frac_width)
 {
-        char buf[40];
-
-        if ( int_width + frac_width > 39 )
-                int_width = 8, frac_width = 8;
+        string ret;
 
-        val = round(val * pow(10.,frac_width)) / pow(10.,frac_width);
+        val = round(val * pow(10., frac_width)) / pow(10., frac_width);
 
         double  intval;
         double  fracval = modf( val, &intval);
         int     frac_digits = n_frac_digits( val);
         int     frac_pad = frac_width - frac_digits;
-        if ( frac_pad < 1 )
-                frac_pad = 1;
+        if ( frac_pad < 1 ) {
+                frac_digits = frac_width;
+                frac_pad = 0;
+        }
 
         if ( frac_digits )
                 if ( (int)intval )
-                        snprintf( buf, 40, "% *d.%0*ld%*s",
-                                  int_width, int(intval),
-                                  frac_digits, (long)round(pow(10., frac_digits) * fabs( fracval)),
-                                  frac_pad, " ");
+                        ret = sasprintf(
+                                "% *d.%0*ld%*s",
+                                int(int_width), int(intval),
+                                frac_digits, (long)round(pow(10., frac_digits) * fabs( fracval)),
+                                frac_pad, " ");
                 else
-                        snprintf( buf, 40, "%*s.%0*ld%*s",
-                                  int_width, " ",
-                                  frac_digits, (long)round(pow(10., frac_digits) * fabs( fracval)),
-                                  frac_pad, " ");
+                        ret = sasprintf(
+                                "%*s.%0*ld%*s",
+                                int(int_width), " ",
+                                frac_digits, (long)round(pow(10., frac_digits) * fabs( fracval)),
+                                frac_pad, " ");
 
         else
                 if ( (int)intval )
-                        snprintf( buf, 40, "%*d.%-*s",
-                                  int_width, int(intval), frac_width, " ");
+                        ret = sasprintf(
+                                "%*d.%-*s",
+                                int(int_width), int(intval), int(frac_width), " ");
                 else
-                        snprintf( buf, 40, "%-*s0%-*s",
-                                  int_width, " ", frac_width, " ");
+                        ret = sasprintf(
+                                "%-*s0%-*s",
+                                int(int_width), " ", int(frac_width), " ");
 
-        return {buf};
+        return move(ret);
 }
 
-
 // Local Variables:
 // Mode: c++
 // indent-tabs-mode: nil
diff --git a/upstream/src/libstilton/misc.hh b/upstream/src/libstilton/misc.hh
new file mode 100644
index 0000000..bd74ac4
--- /dev/null
+++ b/upstream/src/libstilton/misc.hh
@@ -0,0 +1,43 @@
+/*
+ *       File name:  libstilton/misc.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ * Initial version:  2014-09-19
+ *
+ *         Purpose:  misc supporting algorithms
+ *
+ *         License:  GPL
+ */
+
+#ifndef CNRUN_LIBSTILTON_MISC_H_
+#define CNRUN_LIBSTILTON_MISC_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <unistd.h>
+
+using namespace std;
+
+namespace cnrun {
+namespace stilton {
+
+struct C_verprintf {
+        virtual int verbose_threshold() const = 0;
+        void vp( int, const char* fmt, ...) const __attribute__ ((format (printf, 3, 4)));
+        void vp( int, FILE*, const char* fmt, ...) const __attribute__ ((format (printf, 4, 5)));
+};
+
+
+} // namespace stilton
+} // namespace cnrun
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libstilton/string.hh b/upstream/src/libstilton/string.hh
index 38e6bca..95032a8 100644
--- a/upstream/src/libstilton/string.hh
+++ b/upstream/src/libstilton/string.hh
@@ -9,8 +9,12 @@
  *         License:  GPL
  */
 
-#ifndef _CNRUN_LIBSTILTON_STRING_H
-#define _CNRUN_LIBSTILTON_STRING_H
+#ifndef CNRUN_LIBSTILTON_STRING_H_
+#define CNRUN_LIBSTILTON_STRING_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
 
 #include <cstdarg>
 #include <cstring>
@@ -18,13 +22,10 @@
 #include <list>
 #include <sstream>
 
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
 using namespace std;
 
 namespace cnrun {
+namespace stilton {
 namespace str {
 
 enum class TStrCmpCaseOption {
@@ -39,7 +40,7 @@ string pad( const string& r0, size_t to);
 
 template <typename C>
 string
-join( const C& l, const char* sep)
+join( const C& l, const char* sep = ", ")
 {
         if ( l.empty() )
                 return "";
@@ -102,11 +103,12 @@ string dhms_colon( double seconds, int decimal_digits = 0) __attribute__ ((pure)
 
 
 string
-double_dot_aligned_s( double val, int int_width = 8, int frac_width = 8);
+double_dot_aligned_s( double val, size_t int_width = 8, size_t frac_width = 8);
 
 
 }
 }
+}
 
 #endif
 
diff --git a/upstream/src/cnrun/.gitignore b/upstream/src/lua-cnrun/.gitignore
similarity index 100%
rename from upstream/src/cnrun/.gitignore
rename to upstream/src/lua-cnrun/.gitignore
diff --git a/upstream/src/lua-cnrun/Makefile.am b/upstream/src/lua-cnrun/Makefile.am
new file mode 100644
index 0000000..eca1f01
--- /dev/null
+++ b/upstream/src/lua-cnrun/Makefile.am
@@ -0,0 +1,25 @@
+include $(top_srcdir)/src/Common.mk
+AM_CXXFLAGS += $(LUA_INCLUDE)
+
+pkglib_LTLIBRARIES = \
+	libcnrun-lua.la
+libcnrun_lua_la_SOURCES = \
+	commands.cc cnhost.hh
+libcnrun_lua_la_LIBADD = \
+	../libcnrun/libcnrun.la \
+	$(LIBCN_LIBS) \
+	$(LUA_LIB)
+libcnrun_lua_la_LDFLAGS = \
+	-shared -avoid-version \
+	-module
+
+# and move it to where lua can find it via reqiure("cnrun");
+# no, link is better, so that lintian finds libcnrun-lua.so in its
+# usual location in /usr/lib
+install-exec-hook:
+	$(MKDIR_P) $(DESTDIR)/$(luaexecdir)
+	mv $(DESTDIR)/$(libdir)/$(PACKAGE)/libcnrun-lua.so \
+		$(DESTDIR)/$(luaexecdir)/cnrun.so
+	rm -rf $(DESTDIR)/$(libdir)/$(PACKAGE)
+uninstall-hook:
+	rm -f $(DESTDIR)/$(luaexecdir)/cnrun.so
diff --git a/upstream/src/lua-cnrun/cnhost.hh b/upstream/src/lua-cnrun/cnhost.hh
new file mode 100644
index 0000000..7ac327b
--- /dev/null
+++ b/upstream/src/lua-cnrun/cnhost.hh
@@ -0,0 +1,154 @@
+/*
+ *       File name:  cnrun/cnhost.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ * Initial version:  2008-11-04
+ *
+ *         Purpose:  C host side for cn lua library
+ *
+ *         License:  GPL-2+
+ */
+
+#ifndef CNRUN_CNRUN_CNHOST_H_
+#define CNRUN_CNRUN_CNHOST_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <list>
+#include <string>
+extern "C" {
+#include <lua.h>
+}
+
+#include "libcnrun/model.hh"
+
+namespace cnrun {
+
+struct SHostOptions
+  : public cnrun::SModelOptions {
+        string  working_dir;
+
+        SHostOptions ()
+              : working_dir (".")
+                {}
+        SHostOptions (const SHostOptions& rv)
+              : cnrun::SModelOptions (rv),
+                working_dir (rv.working_dir)
+                {}
+};
+
+
+class CHost
+  : public cnrun::stilton::C_verprintf {
+
+        DELETE_DEFAULT_METHODS (CHost)
+
+    public:
+        CHost (const SHostOptions& rv)
+              : options (rv)
+                {}
+        virtual ~CHost ()
+                {
+                        for ( auto& m : models )
+                                delete m.second;
+                }
+
+        SHostOptions
+                options;
+
+        bool have_model( const string& name) const
+                {
+                        return models.find(name) != models.end();
+                }
+        list<const char*> list_models() const
+                {
+                        list<const char*> L;
+                        for ( auto& x : models )
+                                L.push_back(x.first.c_str());
+                        return move(L);
+                }
+        CModel* get_model( const string& name)
+                {
+                        return models.at(name);
+                }
+        bool new_model( CModel& M)
+                {
+                        if ( models.find(M.name) == models.end() ) {
+                                models[M.name] = &M;
+                                return 0;
+                        } else
+                                return -1;
+                }
+        void del_model( const string& name)
+                {
+                        if ( models.find(name) != models.end() )
+                                delete models[name];
+                        models.erase( name);
+                }
+        // cmd_new_model( const TArgs&);
+        // cmd_delete_model( const TArgs&);
+        // cmd_import_nml( const TArgs&);
+        // cmd_export_nml( const TArgs&);
+        // cmd_reset_model( const TArgs&);
+        // cmd_cull_deaf_synapses( const TArgs&);
+        // cmd_describe_model( const TArgs&);
+        // cmd_get_model_parameter( const TArgs&);
+        // cmd_set_model_parameter( const TArgs&);
+        // cmd_advance( const TArgs&);
+        // cmd_advance_until( const TArgs&);
+
+        // cmd_new_neuron( const TArgs&);
+        // cmd_new_synapse( const TArgs&);
+        // cmd_get_unit_properties( const TArgs&);
+        // cmd_get_unit_parameter( const TArgs&);
+        // cmd_set_unit_parameter( const TArgs&);
+        // cmd_get_unit_vars( const TArgs&);
+        // cmd_reset_unit( const TArgs&);
+
+        // cmd_get_units_matching( const TArgs&);
+        // cmd_get_units_of_type( const TArgs&);
+        // cmd_set_matching_neuron_parameter( const TArgs&);
+        // cmd_set_matching_synapse_parameter( const TArgs&);
+        // cmd_revert_matching_unit_parameters( const TArgs&);
+        // cmd_decimate( const TArgs&);
+        // cmd_putout( const TArgs&);
+
+        // cmd_new_tape_source( const TArgs&);
+        // cmd_new_periodic_source( const TArgs&);
+        // cmd_new_noise_source( const TArgs&);
+        // cmd_get_sources( const TArgs&);
+        // cmd_connect_source( const TArgs&);
+        // cmd_disconnect_source( const TArgs&);
+
+        // cmd_start_listen( const TArgs&);
+        // cmd_stop_listen( const TArgs&);
+        // cmd_start_log_spikes( const TArgs&);
+        // cmd_stop_log_spikes( const TArgs&);
+
+      // vp
+        int verbose_threshold() const
+                {  return options.verbosely;  }
+    private:
+        map<string, CModel*>
+                models;
+
+        // enum class TIssueType { warning, syntax_error, system_error };
+        // static const char* _issue_type_s(TIssueType);
+        // void _report_script_issue( TIssueType, const char* fmt, ...) const
+        //         __attribute__ ((format (printf, 3, 4)));
+    public:
+        static list<string> list_commands();
+};
+
+}
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/lua-cnrun/commands.cc b/upstream/src/lua-cnrun/commands.cc
new file mode 100644
index 0000000..c29edac
--- /dev/null
+++ b/upstream/src/lua-cnrun/commands.cc
@@ -0,0 +1,1039 @@
+/*
+ *       File name:  libcnlua/commands.cc
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ *                   building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ * Initial version:  2014-10-09
+ *
+ *         Purpose:  libcn and some host-side state, exported for use in your lua code.
+ *
+ *         License:  GPL-2+
+ */
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+extern "C" {
+#include <lua.h>
+#include <lualib.h>
+#include <lauxlib.h>
+}
+
+#include "libstilton/string.hh"
+#include "cnhost.hh"
+
+using namespace std;
+using namespace cnrun;
+
+namespace {
+
+// supporting functions:
+
+const int TWO_ARGS_FOR_ERROR = 2;
+
+int make_error( lua_State *L, const char *fmt, ...)  __attribute__ ((format (printf, 2, 3)));
+int make_error( lua_State *L, const char *fmt, ...)
+{
+        va_list ap;
+        va_start (ap, fmt);
+        auto s = stilton::str::svasprintf( fmt, ap);
+        va_end (ap);
+
+        return  lua_pushnil(L),
+                lua_pushstring(L, s.c_str()),
+                TWO_ARGS_FOR_ERROR;
+}
+
+int check_signature( lua_State* L, const char* fun, const char* sig)
+{
+        size_t  siglen = strlen(sig),
+                nargsin = lua_gettop( L);
+        if ( nargsin != siglen )
+                return make_error(
+                        L, "%s: Expected %zu arg(s), got %zu",
+                        fun, siglen, nargsin);
+
+        for ( size_t i = 1; i <= siglen; ++i )
+                switch ( sig[i-1] ) {
+                case 's':
+                        if ( !lua_isstring( L, i) )
+                                return make_error(
+                                        L, "%s(\"%s\"): Expected a string arg at position %zu",
+                                        fun, sig, i);
+                    break;
+                case 'g':
+                case 'd':
+                        if ( !lua_isnumber( L, i) )
+                                return make_error(
+                                        L, "%s(\"%s\"): Expected a numeric arg at position %zu",
+                                        fun, sig, i);
+                    break;
+                case 'p':
+                        if ( !lua_islightuserdata( L, i) )
+                                return make_error(
+                                        L, "%s(\"%s\"): Expected a light user data arg at position %zu",
+                                        fun, sig, i);
+                    break;
+                case 'b':
+                        if ( !lua_isboolean( L, i) )
+                                return make_error(
+                                        L, "%s(\"%s\"): Expected a boolean arg at position %zu",
+                                        fun, sig, i);
+                    break;
+                }
+        return 0;
+}
+
+}
+
+
+// here be the commands:
+namespace {
+
+#define INTRO_CHECK_SIG(sig)                            \
+        if ( check_signature( L, __FUNCTION__, sig) )   \
+                return TWO_ARGS_FOR_ERROR;
+
+// the only command not requiring or dealing with context:
+int dump_available_units( lua_State *L)
+{
+        INTRO_CHECK_SIG("");
+        cnmodel_dump_available_units();
+        return  lua_pushinteger( L, 1),
+                lua_pushstring( L, "fafa"),
+                2;
+}
+
+
+int get_context( lua_State *L)
+{
+        INTRO_CHECK_SIG("");
+
+        auto Cp = new CHost (SHostOptions ());
+
+        return  lua_pushinteger( L, 1),
+                lua_pushlightuserdata( L, Cp),
+                2;
+}
+
+#define INTRO_WITH_CONTEXT(sig) \
+        INTRO_CHECK_SIG(sig) \
+        auto& C = *(CHost*)lua_topointer( L, 1);
+
+int drop_context( lua_State *L)
+{
+        INTRO_WITH_CONTEXT("p");
+
+        delete &C;  // come what may
+
+        return  lua_pushinteger( L, 1),
+                lua_pushstring( L, "fafa"),
+                2;
+}
+
+
+#define INTRO_WITH_MODEL_NAME(sig) \
+        INTRO_WITH_CONTEXT(sig) \
+        const char* model_name = lua_tostring( L, 2);
+
+#define VOID_RETURN \
+        return  lua_pushinteger( L, 1), \
+                lua_pushstring( L, model_name), \
+                2;
+
+#define NUMVAL_RETURN(v) \
+        return  lua_pushinteger( L, 1), \
+                lua_pushnumber( L, v), \
+                2;
+
+int new_model( lua_State *L)
+{
+        INTRO_WITH_MODEL_NAME("ps");
+
+        if ( C.have_model( model_name) )
+                return make_error(
+                        L, "%s(): Model named %s already exists",
+                        __FUNCTION__, model_name);
+
+        auto M = new CModel(
+                model_name,
+                new CIntegrateRK65(
+                        C.options.integration_dt_min,
+                        C.options.integration_dt_max,
+                        C.options.integration_dt_cap),
+                C.options);
+        if ( !M )
+                return make_error(
+                        L, "%s(): Failed to create a model (%s)",
+                        __FUNCTION__, model_name);
+
+        C.new_model(*M);
+
+        return  lua_pushinteger( L, 1),
+                lua_pushlightuserdata( L, M),
+                2;
+}
+
+
+int delete_model( lua_State *L)
+{
+        INTRO_WITH_MODEL_NAME("ps");
+
+        C.del_model( model_name);
+
+        VOID_RETURN;
+}
+
+
+int list_models( lua_State *L)
+{
+        INTRO_WITH_CONTEXT("p");
+
+        lua_pushinteger( L, 1);
+        auto MM = C.list_models();
+        for ( auto& M : MM )
+                lua_pushstring( L, M);
+        return  lua_pushinteger( L, MM.size() + 1),
+                2;
+}
+
+
+#define INTRO_WITH_MODEL(sig) \
+        INTRO_WITH_MODEL_NAME(sig) \
+        if ( not C.have_model( model_name) ) \
+                return make_error( \
+                        L, "%s(): No model named %s", \
+                        __FUNCTION__, model_name); \
+        auto& M = *C.get_model(model_name);
+
+int import_nml( lua_State *L)
+{
+        INTRO_WITH_MODEL("pss");
+
+        const char* fname = lua_tostring( L, 3);
+        string fname2 = stilton::str::tilda2homedir(fname);
+
+        int ret = M.import_NetworkML( fname2, CModel::TNMLImportOption::merge);
+        if ( ret < 0 )
+                return make_error(
+                        L, "%s(%s): NML import failed from \"%s\" (%d)",
+                        __FUNCTION__, model_name, fname, ret);
+
+        M.cull_blind_synapses();
+
+        VOID_RETURN;
+}
+
+
+int export_nml( lua_State *L)
+{
+        INTRO_WITH_MODEL("pss");
+
+        const char* fname = lua_tostring( L, 3);
+        string fname2 = stilton::str::tilda2homedir(fname);
+
+        if ( M.export_NetworkML( fname2) )
+                return make_error(
+                        L, "%s(%s): NML export failed to \"%s\"",
+                        __FUNCTION__, model_name, fname);
+
+        VOID_RETURN;
+}
+
+
+int reset_model( lua_State *L)
+{
+        INTRO_WITH_MODEL("ps");
+
+        M.reset( CModel::TResetOption::no_params);
+        // for with_params, there is revert_unit_parameters()
+
+        VOID_RETURN;
+}
+
+
+int cull_deaf_synapses( lua_State *L)
+{
+        INTRO_WITH_MODEL("ps");
+
+        M.cull_deaf_synapses();
+
+        VOID_RETURN;
+}
+
+
+int describe_model( lua_State *L)
+{
+        INTRO_WITH_MODEL("ps");
+
+        M.dump_metrics();
+        M.dump_units();
+        M.dump_state();
+
+        VOID_RETURN;
+}
+
+
+int get_model_parameter( lua_State *L)
+{
+        INTRO_WITH_MODEL("pss");
+
+        const string
+                P (lua_tostring( L, 3));
+
+        double g = NAN;
+        string s;
+        if ( P == "verbosely" ) {
+                g = M.options.verbosely;
+        } else if ( P == "integration_dt_min" ) {
+                g = M.dt_min();
+        } else if ( P == "integration_dt_max" ) {
+                g = M.dt_min();
+        } else if ( P == "integration_dt_cap" ) {
+                g = M.dt_min();
+        } else if ( P == "listen_dt" ) {
+                g = M.options.listen_dt;
+        } else if ( P == "listen_mode" ) {
+                auto F = [] (bool v) -> char { return v ? '+' : '-'; };
+                s = stilton::str::sasprintf(
+                        "1%cd%cb%c",
+                        F(M.options.listen_1varonly),
+                        F(M.options.listen_deferwrite),
+                        F(M.options.listen_binary));
+        } else if ( P == "sxf_start_delay" ) {
+                g = M.options.sxf_start_delay;
+        } else if ( P == "sxf_period" ) {
+                g = M.options.sxf_period;
+        } else if ( P == "sdf_sigma" ) {
+                g = M.options.sdf_sigma;
+        } else
+                return make_error(
+                        L, "%s(%s): Unrecognized parameter: %s",
+                        __FUNCTION__, model_name, P.c_str());
+
+        return  lua_pushinteger( L, 1),
+                s.empty() ? lua_pushnumber( L, g) : (void)lua_pushstring( L, s.c_str()),
+                2;
+}
+
+
+int set_model_parameter( lua_State *L)
+{
+        INTRO_WITH_MODEL("psss");
+
+        const char
+                *P = lua_tostring( L, 3),
+                *V = lua_tostring( L, 4);
+
+#define ERR_RETURN \
+        return make_error( \
+                L, "%s(%s): bad value for parameter `%s'", \
+                __FUNCTION__, model_name, P)
+
+        if ( 0 == strcmp( P, "verbosely") ) {
+                int v;
+                if ( 1 != sscanf( V, "%d", &v) )
+                        ERR_RETURN;
+                C.options.verbosely = M.options.verbosely = v;
+
+        } else if ( 0 == strcmp( P, "integration_dt_min") ) {
+                double v;
+                if ( 1 != sscanf( V, "%lg", &v) )
+                        ERR_RETURN;
+                M.set_dt_min( C.options.integration_dt_min = v);
+
+        } else if ( 0 == strcmp( P, "integration_dt_max" ) ) {
+                double v;
+                if ( 1 != sscanf( V, "%lg", &v) )
+                        ERR_RETURN;
+                M.set_dt_max( C.options.integration_dt_max = v);
+
+        } else if ( 0 == strcmp( P, "integration_dt_cap" ) ) {
+                double v;
+                if ( 1 != sscanf( V, "%lg", &v) )
+                        ERR_RETURN;
+                M.set_dt_cap( C.options.integration_dt_cap = v);
+
+        } else if ( 0 == strcmp( P, "listen_dt") ) {
+                double v;
+                if ( 1 != sscanf( V, "%lg", &v) )
+                        ERR_RETURN;
+                C.options.listen_dt = M.options.listen_dt = v;
+
+        } else if ( 0 == strcmp( P, "listen_mode" ) ) {
+                const char *p;
+                if ( (p = strchr( V, '1')) )
+                        M.options.listen_1varonly   = C.options.listen_1varonly   = (*(p+1) != '-');
+                if ( (p = strchr( V, 'd')) )
+                        M.options.listen_deferwrite = C.options.listen_deferwrite = (*(p+1) != '-');
+                if ( (p = strchr( V, 'b')) )
+                        M.options.listen_binary     = C.options.listen_binary     = (*(p+1) != '-');
+                // better spell out these parameters, ffs
+
+        } else if ( 0 == strcmp( P, "sxf_start_delay" ) ) {
+                double v;
+                if ( 1 != sscanf( V, "%lg", &v) )
+                        ERR_RETURN;
+                C.options.sxf_start_delay = M.options.sxf_start_delay = v;
+
+        } else if ( 0 == strcmp( P, "sxf_period" ) ) {
+                double v;
+                if ( 1 != sscanf( V, "%lg", &v) )
+                        ERR_RETURN;
+                C.options.sxf_period = M.options.sxf_period = v;
+
+        } else if ( 0 == strcmp( P, "sdf_sigma" ) ) {
+                double v;
+                if ( 1 != sscanf( V, "%lg", &v) )
+                        ERR_RETURN;
+                C.options.sdf_sigma = M.options.sdf_sigma = v;
+        }
+#undef ERR_RETURN
+
+        VOID_RETURN;
+}
+
+
+int advance( lua_State *L)
+{
+        INTRO_WITH_MODEL("psg");
+
+        const double time_to_go = lua_tonumber( L, 3);
+        const double end_time = M.model_time() + time_to_go;
+        if ( M.model_time() > end_time )
+                return make_error(
+                        L, "%s(%s): Cannot go back in time (model is now at %g sec)",
+                        __FUNCTION__, model_name, M.model_time());
+        if ( !M.advance( time_to_go) )
+                return make_error(
+                        L, "%s(%s): Failed to advance",
+                        __FUNCTION__, model_name);
+
+        VOID_RETURN;
+}
+
+
+int advance_until( lua_State *L)
+{
+        INTRO_WITH_MODEL("pss");
+
+        const double end_time = lua_tonumber( L, 3);
+        if ( M.model_time() > end_time )
+                return make_error(
+                        L, "%s(%s): Cannot go back in time (model is now at %g sec)",
+                        __FUNCTION__, model_name, M.model_time());
+        if ( !M.advance( end_time) )
+                return make_error(
+                        L, "%s(%s): Failed to advance",
+                        __FUNCTION__, model_name);
+
+        VOID_RETURN;
+}
+
+
+// ----------------------------------------
+
+int new_neuron( lua_State *L)
+{
+        INTRO_WITH_MODEL("psss");
+
+        const char
+                *type  = lua_tostring( L, 3),
+                *label = lua_tostring( L, 4);
+
+        if ( !M.add_neuron_species(
+                     type, label,
+                     TIncludeOption::is_last) )
+                return make_error(
+                        L, "%s(%s): error", __FUNCTION__, model_name);
+
+        VOID_RETURN;
+}
+
+
+int new_synapse( lua_State *L)
+{
+        INTRO_WITH_MODEL("pssssg");
+
+        const char
+                *type = lua_tostring( L, 3),
+                *src  = lua_tostring( L, 4),
+                *tgt  = lua_tostring( L, 5);
+        const double
+                g = lua_tonumber( L, 6);
+
+        if ( !M.add_synapse_species(
+                     type, src, tgt, g,
+                     CModel::TSynapseCloningOption::yes,
+                     TIncludeOption::is_last) )
+                return make_error(
+                        L, "%s(%s): error", __FUNCTION__, model_name);
+
+        VOID_RETURN;
+}
+
+
+int get_unit_properties( lua_State *L)
+{
+        INTRO_WITH_MODEL("pss");
+
+        const char
+                *label = lua_tostring( L, 3);
+        auto Up = M.unit_by_label(label);
+        if ( Up )
+                return  lua_pushnumber( L, 1),
+                        lua_pushstring( L, Up->label()),
+                        lua_pushstring( L, Up->class_name()),
+                        lua_pushstring( L, Up->family()),
+                        lua_pushstring( L, Up->species()),
+                        lua_pushboolean( L, Up->has_sources()),
+                        lua_pushboolean( L, Up->is_not_altered()),
+                        7;
+        else
+                return make_error(
+                        L, "%s(%s): No such unit: %s",
+                        __FUNCTION__, model_name, label);
+}
+
+
+int get_unit_parameter( lua_State *L)
+{
+        INTRO_WITH_MODEL("psss");
+
+        const char
+                *label = lua_tostring( L, 3),
+                *param = lua_tostring( L, 4);
+        auto Up = M.unit_by_label(label);
+        if ( !Up )
+                return make_error(
+                        L, "%s(%s): No such unit: %s",
+                        __FUNCTION__, model_name, label);
+        try {
+                return  lua_pushinteger( L, 1),
+                        lua_pushnumber( L, Up->get_param_value( param)),
+                        2;
+        } catch (exception& ex) {
+                return make_error(
+                        L, "%s(%s): Unit %s (type %s) has no parameter named %s",
+                        __FUNCTION__, model_name, label, Up->class_name(), param);
+        }
+}
+
+
+int set_unit_parameter( lua_State *L)
+{
+        INTRO_WITH_MODEL("psssg");
+
+        const char
+                *label = lua_tostring( L, 3),
+                *param = lua_tostring( L, 4);
+        const double
+                value = lua_tonumber( L, 5);
+        auto Up = M.unit_by_label(label);
+        if ( !Up )
+                return make_error(
+                        L, "%s(%s): No such unit: %s",
+                        __FUNCTION__, model_name, label);
+        try {
+                Up->param_value( param) = value;
+        } catch (exception& ex) {
+                return make_error(
+                        L, "%s(%s): Unit %s (type %s) has no parameter named %s",
+                        __FUNCTION__, model_name, label, Up->class_name(), param);
+        }
+
+        VOID_RETURN;
+}
+
+
+int get_unit_vars( lua_State *L)
+{
+        INTRO_WITH_MODEL("pss");
+
+        const char
+                *label = lua_tostring( L, 3);
+        auto Up = M.unit_by_label(label);
+        if ( !Up )
+                return make_error(
+                        L, "%s(%s): No such unit: %s",
+                        __FUNCTION__, model_name, label);
+
+        lua_pushnumber( L, 1);
+        for ( size_t i = 0; i < Up->v_no(); ++i )
+                lua_pushnumber( L, Up->get_var_value(i));
+        return Up->v_no() + 1;
+}
+
+
+int reset_unit( lua_State *L)
+{
+        INTRO_WITH_MODEL("pss");
+
+        const char
+                *label = lua_tostring( L, 3);
+        auto Up = M.unit_by_label(label);
+        if ( !Up )
+                return make_error(
+                        L, "%s(%s): No such unit: %s",
+                        __FUNCTION__, model_name, label);
+
+        Up -> reset_state();
+
+        VOID_RETURN;
+}
+
+
+// ----------------------------------------
+
+int get_units_matching( lua_State *L)
+{
+        INTRO_WITH_MODEL("pss");
+
+        const char
+                *pattern = lua_tostring( L, 3);
+        auto UU = M.list_units( pattern);
+        lua_pushinteger( L, 1);
+        for ( auto& U : UU )
+                lua_pushstring( L, U->label());
+        return UU.size() + 1;
+}
+
+
+int get_units_of_type( lua_State *L)
+{
+        INTRO_WITH_MODEL("pss");
+
+        const char
+                *type = lua_tostring( L, 3);
+        auto UU = M.list_units();
+        lua_pushinteger( L, 1);
+        for ( auto& U : UU )
+                if ( strcmp( U->species(), type) == 0 )
+                        lua_pushstring( L, U->label());
+        return UU.size() + 1;
+}
+
+
+int set_matching_neuron_parameter( lua_State *L)
+{
+        INTRO_WITH_MODEL("psssg");
+
+        const char
+                *pattern = lua_tostring( L, 3),
+                *param   = lua_tostring( L, 4);
+        const double
+                value    = lua_tonumber( L, 5);
+        list<CModel::STagGroupNeuronParmSet> tags {
+                CModel::STagGroupNeuronParmSet (pattern, param, value)};
+        int count_set = M.process_paramset_static_tags( tags);
+
+        return  lua_pushinteger( L, 1),
+                lua_pushinteger( L, count_set),
+                2;
+}
+
+
+int set_matching_synapse_parameter( lua_State *L)
+{
+        INTRO_WITH_MODEL("pssssg");
+
+        const char
+                *pat_src = lua_tostring( L, 3),
+                *pat_tgt = lua_tostring( L, 4),
+                *param   = lua_tostring( L, 5);
+        const double
+                value    = lua_tonumber( L, 6);
+
+        list<CModel::STagGroupSynapseParmSet> tags {
+                CModel::STagGroupSynapseParmSet (pat_src, pat_tgt, param, value)};
+        int count_set = M.process_paramset_static_tags( tags);
+
+        return  lua_pushinteger( L, 1),
+                lua_pushinteger( L, count_set),
+                2;
+}
+
+
+int revert_matching_unit_parameters( lua_State *L)
+{
+        INTRO_WITH_MODEL("pss");
+
+        const char
+                *pattern = lua_tostring( L, 3);
+
+        auto UU = M.list_units( pattern);
+        for ( auto& U : UU )
+                U->reset_params();
+
+        return  lua_pushinteger( L, 1),
+                lua_pushinteger( L, UU.size()),
+                2;
+}
+
+
+int decimate( lua_State *L)
+{
+        INTRO_WITH_MODEL("pssg");
+
+        const char
+                *pattern = lua_tostring( L, 3);
+        const double
+                frac     = lua_tonumber( L, 4);
+        if ( frac < 0. || frac > 1. )
+                return make_error(
+                        L, "%s(%s): Decimation fraction (%g) outside [0..1]\n",
+                        __FUNCTION__, model_name, frac);
+
+        list<CModel::STagGroupDecimate> tags {{pattern, frac}};
+        int affected = M.process_decimate_tags( tags);
+
+        return  lua_pushinteger( L, 1),
+                lua_pushinteger( L, affected),
+                2;
+}
+
+
+int putout( lua_State *L)
+{
+        INTRO_WITH_MODEL("pss");
+
+        const char
+                *pattern = lua_tostring( L, 3);
+
+        list<CModel::STagGroup> tags {{pattern, CModel::STagGroup::TInvertOption::no}};
+        int affected = M.process_putout_tags( tags);
+
+        return  lua_pushinteger( L, 1),
+                lua_pushinteger( L, affected),
+                2;
+}
+
+
+// ----------------------------------------
+
+int new_tape_source( lua_State *L)
+{
+        INTRO_WITH_MODEL("psssb");
+
+        const char
+                *name  = lua_tostring( L, 3),
+                *fname = lua_tostring( L, 4);
+        const bool
+                looping = lua_toboolean( L, 5);
+
+        if ( M.source_by_id( name) )
+                return make_error(
+                        L, "%s(%s): Tape source \"%s\" already exists",
+                        __FUNCTION__, model_name, name);
+
+        try {
+                auto source = new CSourceTape(
+                        name, fname,
+                        looping ? TSourceLoopingOption::yes : TSourceLoopingOption::no);
+                if ( source )
+                        M.add_source( source);
+                else
+                        return make_error(
+                                L, "%s(%s): reading from %s, bad data",
+                                __FUNCTION__, model_name, fname);
+        } catch (exception& ex) {
+                return make_error(
+                        L, "%s(%s): %s, %s: %s",
+                        __FUNCTION__, model_name, name, fname, ex.what());
+        }
+
+        VOID_RETURN;
+}
+
+
+int new_periodic_source( lua_State *L)
+{
+        INTRO_WITH_MODEL("psssbg");
+
+        const char
+                *name   = lua_tostring( L, 3),
+                *fname  = lua_tostring( L, 4);
+        const bool
+                looping = lua_toboolean( L, 5);
+        const double
+                period  = lua_tonumber( L, 6);
+
+        if ( M.source_by_id( name) )
+                return make_error(
+                        L, "%s(%s): Periodic source \"%s\" already exists",
+                        __FUNCTION__, model_name, name);
+
+        try {
+                auto source = new CSourcePeriodic(
+                        name, fname,
+                        looping ? TSourceLoopingOption::yes : TSourceLoopingOption::no,
+                        period);
+                if ( source )
+                        M.add_source( source);
+                else
+                        return make_error(
+                                L, "%s(%s): reading from %s, bad data",
+                                __FUNCTION__, model_name, fname);
+        } catch (exception& ex) {
+                return make_error(
+                        L, "%s(%s): %s, %s: %s",
+                        __FUNCTION__, model_name, name, fname, ex.what());
+        }
+
+        VOID_RETURN;
+}
+
+
+int new_noise_source( lua_State *L)
+{
+        INTRO_WITH_MODEL("pssgggs");
+
+        const char
+                *name   = lua_tostring( L, 3);
+        const double
+                &min    = lua_tonumber( L, 4),
+                &max    = lua_tonumber( L, 5),
+                &sigma  = lua_tonumber( L, 6);
+        const string
+                &distribution = lua_tostring( L, 7);
+
+        if ( M.source_by_id( name) )
+                return make_error(
+                        L, "%s(%s): Noise source \"%s\" already exists",
+                        __FUNCTION__, model_name, name);
+
+        try {
+                auto source = new CSourceNoise(
+                        name, min, max, sigma, CSourceNoise::distribution_by_name(distribution));
+                if ( source )
+                        M.add_source( source);
+                else
+                        return make_error(
+                                L, "%s(%s): bad data",
+                                __FUNCTION__, model_name);
+        } catch (exception& ex) {
+                return make_error(
+                        L, "%s(%s): %s: %s",
+                        __FUNCTION__, model_name, name, ex.what());
+        }
+
+        VOID_RETURN;
+}
+
+
+int get_sources( lua_State *L)
+{
+        INTRO_WITH_MODEL("ps");
+
+        lua_pushinteger( L, 1);
+        for ( auto& S : M.sources() )
+                lua_pushstring( L, S->name());
+        return M.sources().size() + 1;
+}
+
+
+int connect_source( lua_State *L)
+{
+        INTRO_WITH_MODEL("pssss");
+
+        const char
+                *label  = lua_tostring( L, 3),
+                *parm   = lua_tostring( L, 4),
+                *source = lua_tostring( L, 5);
+        C_BaseSource *S = M.source_by_id( source);
+        if ( !S )
+                return make_error(
+                        L, "%s(%s): No such stimulation source: %s",
+                        __FUNCTION__, model_name, source);
+        // cannot check whether units matching label indeed have a parameter so named
+        list<CModel::STagGroupSource> tags {
+                {label, parm, S, CModel::STagGroup::TInvertOption::no}};
+        int affected = M.process_paramset_source_tags( tags);
+
+        return  lua_pushinteger( L, 1),
+                lua_pushinteger( L, affected),
+                2;
+}
+
+
+int disconnect_source( lua_State *L)
+{
+        INTRO_WITH_MODEL("pssss");
+
+        const char
+                *label  = lua_tostring( L, 3),
+                *parm   = lua_tostring( L, 4),
+                *source = lua_tostring( L, 5);
+        C_BaseSource *S = M.source_by_id( source);
+        if ( !S )
+                return make_error(
+                        L, "%s(%s): No such stimulation source: %s",
+                        __FUNCTION__, model_name, source);
+        // cannot check whether units matching label indeed have a parameter so named
+        list<CModel::STagGroupSource> tags {
+                {label, parm, S, CModel::STagGroup::TInvertOption::yes}};
+        int affected = M.process_paramset_source_tags( tags);
+
+        return  lua_pushinteger( L, 1),
+                lua_pushinteger( L, affected),
+                2;
+}
+
+
+// ----------------------------------------
+
+int start_listen( lua_State *L)
+{
+        INTRO_WITH_MODEL("pss");
+
+        const char
+                *pattern = lua_tostring( L, 3);
+        list<CModel::STagGroupListener> tags {
+                CModel::STagGroupListener (
+                        pattern, (0
+                                  | (M.options.listen_1varonly ? CN_ULISTENING_1VARONLY : 0)
+                                  | (M.options.listen_deferwrite ? CN_ULISTENING_DEFERWRITE : 0)
+                                  | (M.options.listen_binary ? CN_ULISTENING_BINARY : CN_ULISTENING_DISK)),
+                        CModel::STagGroup::TInvertOption::no)};
+        int affected = M.process_listener_tags( tags);
+
+        return  lua_pushinteger( L, 1),
+                lua_pushinteger( L, affected),
+                2;
+}
+
+
+int stop_listen( lua_State *L)
+{
+        INTRO_WITH_MODEL("pss");
+
+        const char
+                *pattern = lua_tostring( L, 3);
+        list<CModel::STagGroupListener> tags {
+                CModel::STagGroupListener (
+                        pattern, (0
+                                  | (M.options.listen_1varonly ? CN_ULISTENING_1VARONLY : 0)
+                                  | (M.options.listen_deferwrite ? CN_ULISTENING_DEFERWRITE : 0)
+                                  | (M.options.listen_binary ? CN_ULISTENING_BINARY : CN_ULISTENING_DISK)),
+                        CModel::STagGroup::TInvertOption::yes)};
+        int affected = M.process_listener_tags( tags);
+
+        return  lua_pushinteger( L, 1),
+                lua_pushinteger( L, affected),
+                2;
+}
+
+
+int start_log_spikes( lua_State *L)
+{
+        INTRO_WITH_MODEL("pss");
+
+        const char
+                *pattern = lua_tostring( L, 3);
+        list<CModel::STagGroupSpikelogger> tags {{
+                        pattern,
+                        M.options.sxf_period, M.options.sdf_sigma, M.options.sxf_start_delay,
+                        CModel::STagGroup::TInvertOption::no}};
+        int affected = M.process_spikelogger_tags( tags);
+
+        return  lua_pushinteger( L, 1),
+                lua_pushinteger( L, affected),
+                2;
+}
+
+
+int stop_log_spikes( lua_State *L)
+{
+        INTRO_WITH_MODEL("pss");
+
+        const char
+                *pattern = lua_tostring( L, 3);
+        list<CModel::STagGroupSpikelogger> tags {{
+                        pattern,
+                        M.options.sxf_period, M.options.sdf_sigma, M.options.sxf_start_delay,
+                        CModel::STagGroup::TInvertOption::yes}};
+        int affected = M.process_spikelogger_tags( tags);
+
+        return  lua_pushinteger( L, 1),
+                lua_pushinteger( L, affected),
+                2;
+}
+
+
+// all together now:
+const struct luaL_Reg cnlib [] = {
+#define BLOOP(X) {#X, X}
+        BLOOP(dump_available_units),
+        BLOOP(get_context),
+        BLOOP(drop_context),
+        BLOOP(new_model),
+        BLOOP(delete_model),
+        BLOOP(list_models),
+        BLOOP(import_nml),
+        BLOOP(export_nml),
+        BLOOP(reset_model),
+        BLOOP(cull_deaf_synapses),
+        BLOOP(describe_model),
+        BLOOP(get_model_parameter),
+        BLOOP(set_model_parameter),
+        BLOOP(advance),
+        BLOOP(advance_until),
+
+        BLOOP(new_neuron),
+        BLOOP(new_synapse),
+        BLOOP(get_unit_properties),
+        BLOOP(get_unit_parameter),
+        BLOOP(set_unit_parameter),
+        BLOOP(get_unit_vars),
+        BLOOP(reset_unit),
+
+        BLOOP(get_units_matching),
+        BLOOP(get_units_of_type),
+        BLOOP(set_matching_neuron_parameter),
+        BLOOP(set_matching_synapse_parameter),
+        BLOOP(revert_matching_unit_parameters),
+        BLOOP(decimate),
+        BLOOP(putout),
+
+        BLOOP(new_tape_source),
+        BLOOP(new_periodic_source),
+        BLOOP(new_noise_source),
+        BLOOP(get_sources),
+        BLOOP(connect_source),
+        BLOOP(disconnect_source),
+
+        BLOOP(start_listen),
+        BLOOP(stop_listen),
+        BLOOP(start_log_spikes),
+        BLOOP(stop_log_spikes),
+#undef BLOOP
+        {NULL, NULL}
+};
+
+}
+
+
+extern "C" {
+
+int luaopen_cnrun( lua_State *L)
+{
+#ifdef HAVE_LUA_51
+        printf( "register cnrun\n");
+        luaL_register(L, "cnlib", cnlib);
+#else  // this must be 5.2
+        printf( "newlib cnrun\n");
+        luaL_newlib(L, cnlib);
+#endif
+        return 1;
+}
+
+}
+
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/print_version.cc b/upstream/src/print_version.cc
deleted file mode 100644
index aea64cc..0000000
--- a/upstream/src/print_version.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- *       File name:  print_version.cc
- *         Project:  cnrun
- *          Author:  Andrei Zavada <johnhommer at gmail.com>
- * Initial version:  2014-03-22
- *
- *         Purpose:  print version (separate file for every make to always touch it)
- *
- *         License:  GPL
- */
-
-#include <cstdio>
-#include "config.h"
-
-void
-print_version( const char* this_program)
-{
-        printf(  "%s %s built " __DATE__ " " __TIME__ " by %s\n", this_program, GIT_DESCRIBE_TAGS, BUILT_BY);
-}
-
-// Local Variables:
-// Mode: c++
-// indent-tabs-mode: nil
-// tab-width: nil
-// c-basic-offset: 8
-// End:
diff --git a/upstream/src/tools/.gitignore b/upstream/src/tools/.gitignore
index a3ee24b..3a6c993 100644
--- a/upstream/src/tools/.gitignore
+++ b/upstream/src/tools/.gitignore
@@ -1,3 +1,2 @@
-varfold
 hh-latency-estimator
 spike2sdf
diff --git a/upstream/src/tools/Makefile.am b/upstream/src/tools/Makefile.am
index d866f51..6fbad79 100644
--- a/upstream/src/tools/Makefile.am
+++ b/upstream/src/tools/Makefile.am
@@ -4,23 +4,15 @@ AM_CXXFLAGS += \
 	$(LIBCN_CFLAGS)
 
 bin_PROGRAMS = \
-	spike2sdf varfold hh-latency-estimator
+	spike2sdf hh-latency-estimator
 
 spike2sdf_SOURCES = \
 	spike2sdf.cc
 
-varfold_SOURCES = \
-	varfold.cc
-varfold_LDFLAAGS = \
-	-shared
-varfold_LDADD = \
-	$(LIBCN_LIBS)
-
 hh_latency_estimator_SOURCES = \
 	hh-latency-estimator.cc
 hh_latency_estimator_LDADD = \
-	../libcn/libcn.la \
-	../libstilton/libstilton.la \
+	../libcnrun/libcnrun.la \
 	$(LIBCN_LIBS)
 hh_latency_estimator_LDFLAAGS = \
 	-shared
diff --git a/upstream/src/tools/hh-latency-estimator.cc b/upstream/src/tools/hh-latency-estimator.cc
index deaea57..b5653b2 100644
--- a/upstream/src/tools/hh-latency-estimator.cc
+++ b/upstream/src/tools/hh-latency-estimator.cc
@@ -1,23 +1,27 @@
 /*
- * Author: Andrei Zavada <johnhommer at gmail.com>
+ *       File name:  tools/hh-latency-estimator.cc
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ * Initial version:  2009-09-12
  *
- * License: GPL-2+
+ *         Purpose:  convenience to estiate latency to first spike of a HH neuron
+ *                   in response to continuous Poisson stimulation
  *
- * Initial version: 2009-09-12
+ *         License:  GPL-2+
  */
 
-#include <unistd.h>
-
-#include "libcn/hosted-neurons.hh"
-#include "libcn/standalone-synapses.hh"
-
-#include "libcn/model.hh"
-#include "libcn/types.hh"
-
 #if HAVE_CONFIG_H && !defined(VERSION)
 #  include "config.h"
 #endif
 
+#include <unistd.h>
+
+#include "libcnrun/hosted-neurons.hh"
+#include "libcnrun/standalone-synapses.hh"
+
+#include "libcnrun/model.hh"
+#include "libcnrun/types.hh"
+
 using namespace cnrun;
 
 CModel *Model;
@@ -26,37 +30,37 @@ enum TOscillatorType { S_POISSON = 0, S_PULSE = 1 };
 enum TIncrOpType { INCR_ADD, INCR_MULT };
 
 struct SOptions {
-	double	pulse_f_min,
-		pulse_f_max,
-		pulse_df,
-		syn_g,
-		syn_beta,
-		syn_trel;
-	bool	enable_listening;
-
-	size_t	n_repeats;
-	const char
-		*irreg_mag_fname,
-		*irreg_cnt_fname;
-
-	TOscillatorType
-		src_type;
-	TIncrOpType
-		incr_op;
-
-	SOptions()
-	       : pulse_f_min (-INFINITY),
-		 pulse_f_max (-INFINITY),
-		 pulse_df (-INFINITY),
-		 syn_g (-INFINITY),
-		 syn_beta (-INFINITY),
-		 syn_trel (-INFINITY),
-		 enable_listening (false),
-		 n_repeats (1),
-		 irreg_mag_fname (nullptr),
-		 irreg_cnt_fname (nullptr),
-		 src_type (S_POISSON)
-		{}
+        double  pulse_f_min,
+                pulse_f_max,
+                pulse_df,
+                syn_g,
+                syn_beta,
+                syn_trel;
+        bool    enable_listening;
+
+        size_t  n_repeats;
+        const char
+                *irreg_mag_fname,
+                *irreg_cnt_fname;
+
+        TOscillatorType
+                src_type;
+        TIncrOpType
+                incr_op;
+
+        SOptions()
+               : pulse_f_min (-INFINITY),
+                 pulse_f_max (-INFINITY),
+                 pulse_df (-INFINITY),
+                 syn_g (-INFINITY),
+                 syn_beta (-INFINITY),
+                 syn_trel (-INFINITY),
+                 enable_listening (false),
+                 n_repeats (1),
+                 irreg_mag_fname (nullptr),
+                 irreg_cnt_fname (nullptr),
+                 src_type (S_POISSON)
+                {}
 };
 
 SOptions Options;
@@ -66,162 +70,167 @@ const char* const pulse_parm_sel[] = { "lambda", "f" };
 
 
 static int parse_options( int argc, char **argv);
-#define CNRUN_CLPARSE_HELP_REQUEST	-1
-#define CNRUN_CLPARSE_ERROR		-2
+#define CNRUN_CLPARSE_HELP_REQUEST  -1
+#define CNRUN_CLPARSE_ERROR         -2
 
 static void usage( const char *argv0);
 
 
-#define CNRUN_EARGS		-1
-#define CNRUN_ESETUP		-2
-#define CNRUN_ETRIALFAIL	-3
+#define CNRUN_EARGS       -1
+#define CNRUN_ESETUP      -2
+#define CNRUN_ETRIALFAIL  -3
 
 int
 main( int argc, char *argv[])
 {
-//	cout << "\nHH latency estimator  compiled " << __TIME__ << " " << __DATE__ << endl;
+//        cout << "\nHH latency estimator  compiled " << __TIME__ << " " << __DATE__ << endl;
 
-	if ( argc == 1 ) {
-		usage( argv[0]);
-		return 0;
-	}
+        if ( argc == 1 ) {
+                usage( argv[0]);
+                return 0;
+        }
 
-	int retval = 0;
+        int retval = 0;
 
-	switch ( parse_options( argc, argv) ) {
-	case CNRUN_CLPARSE_ERROR:
-		cerr << "Problem parsing command line or sanitising values\n"
-			"Use -h for help\n";
-		return CNRUN_EARGS;
-	case CNRUN_CLPARSE_HELP_REQUEST:
-		usage( argv[0]);
-		return 0;
-	}
+        switch ( parse_options( argc, argv) ) {
+        case CNRUN_CLPARSE_ERROR:
+                cerr << "Problem parsing command line or sanitising values\n"
+                        "Use -h for help\n";
+                return CNRUN_EARGS;
+        case CNRUN_CLPARSE_HELP_REQUEST:
+                usage( argv[0]);
+                return 0;
+        }
 
       // create and set up the model
-	if ( !(Model = new CModel( "hh-latency", new CIntegrateRK65(), 0)) ) {
-		cerr << "Failed to create a model\n";
-		return CNRUN_ESETUP;
-	}
-
-	Model->verbosely = 0;
+        Model = new CModel(
+                "hh-latency",
+                new CIntegrateRK65(
+                        1e-6, .5, 5, 1e-8,  1e-12, 1e-6, true),
+                SModelOptions ());
+        if ( !Model ) {
+                cerr << "Failed to create a model\n";
+                return CNRUN_ESETUP;
+        }
+
+        Model->options.verbosely = 0;
 
       // add our three units
-	CNeuronHH_d	*hh	 = new CNeuronHH_d( "HH", 0.2, 0.1, 0.3, Model, CN_UOWNED);
-	C_BaseNeuron	*pulse = (Options.src_type == S_PULSE)
-		? static_cast<C_BaseNeuron*>(new CNeuronDotPulse( "Pulse", 0.1, 0.2, 0.3, Model, CN_UOWNED))
-		: static_cast<C_BaseNeuron*>(new COscillatorDotPoisson( "Pulse", 0.1, 0.2, 0.3, Model, CN_UOWNED));
-	CSynapseMxAB_dd	*synapse = new CSynapseMxAB_dd( pulse, hh, Options.syn_g, Model, CN_UOWNED);
+        CNeuronHH_d  *hh    = new CNeuronHH_d( "HH", 0.2, 0.1, 0.3, Model, CN_UOWNED);
+        C_BaseNeuron *pulse = (Options.src_type == S_PULSE)
+                ? static_cast<C_BaseNeuron*>(new CNeuronDotPulse( "Pulse", 0.1, 0.2, 0.3, Model, CN_UOWNED))
+                : static_cast<C_BaseNeuron*>(new COscillatorDotPoisson( "Pulse", 0.1, 0.2, 0.3, Model, CN_UOWNED));
+        CSynapseMxAB_dd *synapse = new CSynapseMxAB_dd( pulse, hh, Options.syn_g, Model, CN_UOWNED);
 
       // enable_spikelogging_service
-	hh -> enable_spikelogging_service();
+        hh -> enable_spikelogging_service();
 
-	if ( Options.enable_listening ) {
-		hh -> start_listening( CN_ULISTENING_DISK | CN_ULISTENING_1VARONLY);
-		pulse -> start_listening( CN_ULISTENING_DISK);
-		synapse -> start_listening( CN_ULISTENING_DISK);
-		Model->listen_dt = 0.;
-	}
+        if ( Options.enable_listening ) {
+                hh -> start_listening( CN_ULISTENING_DISK | CN_ULISTENING_1VARONLY);
+                pulse -> start_listening( CN_ULISTENING_DISK);
+                synapse -> start_listening( CN_ULISTENING_DISK);
+                Model->options.listen_dt = 0.;
+        }
 
       // assign user-supplied values to parameters: invariant ones first
-	if ( Options.syn_beta != -INFINITY )
-		synapse->param_value("beta") = Options.syn_beta;
-	if ( Options.syn_trel != -INFINITY )
-		synapse->param_value("trel") = Options.syn_trel;
+        if ( Options.syn_beta != -INFINITY )
+                synapse->param_value("beta") = Options.syn_beta;
+        if ( Options.syn_trel != -INFINITY )
+                synapse->param_value("trel") = Options.syn_trel;
 
       // do trials
-	size_t	n_spikes;
-	double	warmup_time = 30;
-
-	size_t	i;
-
-	size_t	n_steps = 1 + ((Options.incr_op == INCR_ADD)
-			       ? (Options.pulse_f_max - Options.pulse_f_min) / Options.pulse_df
-			       : log(Options.pulse_f_max / Options.pulse_f_min) / log(Options.pulse_df));
-
-	double	frequencies[n_steps];
-	for ( i = 0; i < n_steps; i++ )
-		frequencies[i] = (Options.incr_op == INCR_ADD)
-			? Options.pulse_f_min + i*Options.pulse_df
-			: Options.pulse_f_min * pow( Options.pulse_df, (double)i);
-	vector<double>
-		irreg_mags[n_steps];
-	size_t	irreg_counts[n_steps];
-	memset( irreg_counts, 0, n_steps*sizeof(size_t));
-
-	double	latencies[n_steps];
-
-	for ( size_t trial = 0; trial < Options.n_repeats; trial++ ) {
-		memset( latencies, 0, n_steps*sizeof(double));
-
-		for ( i = 0; i < n_steps; i++ ) {
-
-			if ( Options.enable_listening ) {
-				char label[CN_MAX_LABEL_SIZE];
-				snprintf( label, CN_MAX_LABEL_SIZE, "pulse-%06g", frequencies[i]);
-				pulse->set_label( label);
-				snprintf( label, CN_MAX_LABEL_SIZE, "hh-%06g", frequencies[i]);
-				hh->set_label( label);
-				snprintf( label, CN_MAX_LABEL_SIZE, "synapse-%06g", frequencies[i]);
-				synapse->set_label( label);
-			}
-			Model->reset();  // will reset model_time, preserve params, and is a generally good thing
-
-			pulse->param_value( pulse_parm_sel[Options.src_type]) = 0;
-
-		      // warmup
-			Model->advance( warmup_time);
-			if ( hh->spikelogger_agent()->spike_history.size() )
-				printf( "What? %zd spikes already?\n", hh->spikelogger_agent()->spike_history.size());
-		      // calm down the integrator
-			Model->dt() = Model->dt_min();
-		      // assign trial values
-			pulse->param_value(pulse_parm_sel[Options.src_type]) = frequencies[i];
-		      // go
-			Model->advance( 100);
-
-		      // collect latency: that is, the time of the first spike
-			latencies[i] = (( n_spikes = hh->spikelogger_agent()->spike_history.size() )
-				   ? *(hh->spikelogger_agent()->spike_history.begin()) - warmup_time
-				   : 999);
-
-			printf( "%g\t%g\t%zu\n", frequencies[i], latencies[i], n_spikes);
-		}
-
-		printf( "\n");
-		for ( i = 1; i < n_steps; i++ )
-			if ( latencies[i] > latencies[i-1] ) {
-				irreg_mags[i].push_back( (latencies[i] - latencies[i-1]) / latencies[i-1]);
-				irreg_counts[i]++;
-			}
-	}
-
-
-	{
-		ostream *irrmag_strm = Options.irreg_mag_fname ? new ofstream( Options.irreg_mag_fname) : &cout;
-
-		(*irrmag_strm) << "#<at>\t<irreg_mag>\n";
-		for ( i = 0; i < n_steps; i++ )
-			if ( irreg_mags[i].size() )
-				for ( size_t j = 0; j < irreg_mags[i].size(); j++ )
-					(*irrmag_strm) << frequencies[i] << '\t' << irreg_mags[i][j] << endl;
-
-		if ( Options.irreg_mag_fname )
-			delete irrmag_strm;
-	}
-	{
-		ostream *irrcnt_strm = Options.irreg_cnt_fname ? new ofstream( Options.irreg_cnt_fname) : &cout;
-
-		(*irrcnt_strm) << "#<at>\t<cnt>\n";
-		for ( i = 0; i < n_steps; i++ )
-			(*irrcnt_strm) << frequencies[i] << '\t' << irreg_counts[i] << endl;
-
-		if ( Options.irreg_cnt_fname )
-			delete irrcnt_strm;
-	}
-	delete Model;
-
-	return retval;
+        size_t  n_spikes;
+        double  warmup_time = 30;
+
+        size_t  i;
+
+        size_t  n_steps = 1 + ((Options.incr_op == INCR_ADD)
+                               ? (Options.pulse_f_max - Options.pulse_f_min) / Options.pulse_df
+                               : log(Options.pulse_f_max / Options.pulse_f_min) / log(Options.pulse_df));
+
+        double  frequencies[n_steps];
+        for ( i = 0; i < n_steps; i++ )
+                frequencies[i] = (Options.incr_op == INCR_ADD)
+                        ? Options.pulse_f_min + i*Options.pulse_df
+                        : Options.pulse_f_min * pow( Options.pulse_df, (double)i);
+        vector<double>
+                irreg_mags[n_steps];
+        size_t  irreg_counts[n_steps];
+        memset( irreg_counts, 0, n_steps*sizeof(size_t));
+
+        double  latencies[n_steps];
+
+        for ( size_t trial = 0; trial < Options.n_repeats; trial++ ) {
+                memset( latencies, 0, n_steps*sizeof(double));
+
+                for ( i = 0; i < n_steps; i++ ) {
+
+                        if ( Options.enable_listening ) {
+                                char label[C_BaseUnit::max_label_size];
+                                snprintf( label, C_BaseUnit::max_label_size, "pulse-%06g", frequencies[i]);
+                                pulse->set_label( label);
+                                snprintf( label, C_BaseUnit::max_label_size, "hh-%06g", frequencies[i]);
+                                hh->set_label( label);
+                                snprintf( label, C_BaseUnit::max_label_size, "synapse-%06g", frequencies[i]);
+                                synapse->set_label( label);
+                        }
+                        Model->reset();  // will reset model_time, preserve params, and is a generally good thing
+
+                        pulse->param_value( pulse_parm_sel[Options.src_type]) = 0;
+
+                      // warmup
+                        Model->advance( warmup_time);
+                        if ( hh->spikelogger_agent()->spike_history.size() )
+                                printf( "What? %zd spikes already?\n", hh->spikelogger_agent()->spike_history.size());
+                      // calm down the integrator
+                        Model->set_dt( Model->dt_min());
+                      // assign trial values
+                        pulse->param_value(pulse_parm_sel[Options.src_type]) = frequencies[i];
+                      // go
+                        Model->advance( 100);
+
+                      // collect latency: that is, the time of the first spike
+                        latencies[i] = (( n_spikes = hh->spikelogger_agent()->spike_history.size() )
+                                   ? *(hh->spikelogger_agent()->spike_history.begin()) - warmup_time
+                                   : 999);
+
+                        printf( "%g\t%g\t%zu\n", frequencies[i], latencies[i], n_spikes);
+                }
+
+                printf( "\n");
+                for ( i = 1; i < n_steps; i++ )
+                        if ( latencies[i] > latencies[i-1] ) {
+                                irreg_mags[i].push_back( (latencies[i] - latencies[i-1]) / latencies[i-1]);
+                                irreg_counts[i]++;
+                        }
+        }
+
+
+        {
+                ostream *irrmag_strm = Options.irreg_mag_fname ? new ofstream( Options.irreg_mag_fname) : &cout;
+
+                (*irrmag_strm) << "#<at>\t<irreg_mag>\n";
+                for ( i = 0; i < n_steps; i++ )
+                        if ( irreg_mags[i].size() )
+                                for ( size_t j = 0; j < irreg_mags[i].size(); j++ )
+                                        (*irrmag_strm) << frequencies[i] << '\t' << irreg_mags[i][j] << endl;
+
+                if ( Options.irreg_mag_fname )
+                        delete irrmag_strm;
+        }
+        {
+                ostream *irrcnt_strm = Options.irreg_cnt_fname ? new ofstream( Options.irreg_cnt_fname) : &cout;
+
+                (*irrcnt_strm) << "#<at>\t<cnt>\n";
+                for ( i = 0; i < n_steps; i++ )
+                        (*irrcnt_strm) << frequencies[i] << '\t' << irreg_counts[i] << endl;
+
+                if ( Options.irreg_cnt_fname )
+                        delete irrcnt_strm;
+        }
+        delete Model;
+
+        return retval;
 }
 
 
@@ -233,24 +242,24 @@ main( int argc, char *argv[])
 static void
 usage( const char *argv0)
 {
-	cout << "Usage: " << argv0 << "-f...|-l...  [-y...]\n" <<
-		"Stimulation intensity to estimate the response latency for:\n"
-		" -f <double f_min>:<double f_incr>:<double f_max>\n"
-		"\t\t\tUse a DotPulse oscillator, with these values for f, or\n"
-		" -l <double f_min>:<double f_incr>:<double f_max>\n"
-		"\t\t\tUse a DotPoisson oscillator, with these values for lambda\n"
-		"Synapse parameters:\n"
-		" -yg <double>\tgsyn (required)\n"
-		" -yb <double>\tbeta\n"
-		" -yr <double>\ttrel\n"
-		"\n"
-		" -o\t\t\tWrite unit variables\n"
-		" -c <uint>\t\tRepeat this many times\n"
-		" -T <fname>\tCollect stats on irreg_cnt to fname\n"
-		" -S <fname>\tCollect stats on irreg_mags to fname\n"
-		"\n"
-		" -h \t\tDisplay this help\n"
-		"\n";
+        cout << "Usage: " << argv0 << "-f...|-l...  [-y...]\n" <<
+                "Stimulation intensity to estimate the response latency for:\n"
+                " -f <double f_min>:<double f_incr>:<double f_max>\n"
+                "\t\t\tUse a DotPulse oscillator, with these values for f, or\n"
+                " -l <double f_min>:<double f_incr>:<double f_max>\n"
+                "\t\t\tUse a DotPoisson oscillator, with these values for lambda\n"
+                "Synapse parameters:\n"
+                " -yg <double>\tgsyn (required)\n"
+                " -yb <double>\tbeta\n"
+                " -yr <double>\ttrel\n"
+                "\n"
+                " -o\t\t\tWrite unit variables\n"
+                " -c <uint>\t\tRepeat this many times\n"
+                " -T <fname>\tCollect stats on irreg_cnt to fname\n"
+                " -S <fname>\tCollect stats on irreg_mags to fname\n"
+                "\n"
+                " -h \t\tDisplay this help\n"
+                "\n";
 }
 
 
@@ -261,72 +270,89 @@ usage( const char *argv0)
 static int
 parse_options( int argc, char **argv)
 {
-	int	c;
-
-	while ( (c = getopt( argc, argv, "f:l:y:oc:S:T:h")) != -1 )
-		switch ( c ) {
-		case 'y':
-			switch ( optarg[0] ) {
-			case 'g':	if ( sscanf( optarg+1, "%lg", &Options.syn_g) != 1 ) {
-					cerr << "-yg takes a double\n";
-					return CNRUN_CLPARSE_ERROR;
-				}						break;
-			case 'b':	if ( sscanf( optarg+1, "%lg", &Options.syn_beta) != 1 ) {
-					cerr << "-yb takes a double\n";
-					return CNRUN_CLPARSE_ERROR;
-				}						break;
-			case 'r':	if ( sscanf( optarg+1, "%lg", &Options.syn_trel) != 1 ) {
-					cerr << "-yr takes a double\n";
-					return CNRUN_CLPARSE_ERROR;
-				}						break;
-			default:	cerr << "Unrecognised option modifier for -y\n";
-				return CNRUN_CLPARSE_ERROR;
-			}
-		    break;
-
-		case 'f':
-		case 'l':
-			if ( (Options.incr_op = INCR_ADD,
-			      (sscanf( optarg, "%lg:%lg:%lg",
-				       &Options.pulse_f_min, &Options.pulse_df, &Options.pulse_f_max) == 3))
-				||
-			     (Options.incr_op = INCR_MULT,
-			      (sscanf( optarg, "%lg*%lg:%lg",
-				       &Options.pulse_f_min, &Options.pulse_df, &Options.pulse_f_max) == 3)) ) {
-
-				Options.src_type = (c == 'f') ? S_PULSE : S_POISSON;
-
-			} else {
-				cerr << "Expecting all three parameter with -{f,l} min{:,*}incr:max\n";
-				return CNRUN_CLPARSE_ERROR;
-			}
-		    break;
-
-		case 'o':	Options.enable_listening = true;		break;
-
-		case 'c':	Options.n_repeats = strtoul( optarg, nullptr, 10);	break;
-
-		case 'S':	Options.irreg_mag_fname = optarg;		break;
-		case 'T':	Options.irreg_cnt_fname = optarg;		break;
-
-		case 'h':
-			return CNRUN_CLPARSE_HELP_REQUEST;
-		case '?':
-		default:
-			return CNRUN_CLPARSE_ERROR;
-		}
-
-	if ( Options.pulse_f_min == -INFINITY ||
-	     Options.pulse_f_max == -INFINITY ||
-	     Options.pulse_df    == -INFINITY ) {
-		cerr << "Oscillator type (with -f or -l) not specified\n";
-		return CNRUN_EARGS;
-	}
-
-	return 0;
+        int             c;
+
+        while ( (c = getopt( argc, argv, "f:l:y:oc:S:T:h")) != -1 )
+                switch ( c ) {
+                case 'y':
+                        switch ( optarg[0] ) {
+                        case 'g':
+                                if ( sscanf( optarg+1, "%lg", &Options.syn_g) != 1 ) {
+                                        cerr << "-yg takes a double\n";
+                                        return CNRUN_CLPARSE_ERROR;
+                                }
+                            break;
+                        case 'b':
+                                if ( sscanf( optarg+1, "%lg", &Options.syn_beta) != 1 ) {
+                                        cerr << "-yb takes a double\n";
+                                        return CNRUN_CLPARSE_ERROR;
+                                }
+                            break;
+                        case 'r':
+                                if ( sscanf( optarg+1, "%lg", &Options.syn_trel) != 1 ) {
+                                        cerr << "-yr takes a double\n";
+                                        return CNRUN_CLPARSE_ERROR;
+                                }
+                            break;
+                        default:
+                                cerr << "Unrecognised option modifier for -y\n";
+                            return CNRUN_CLPARSE_ERROR;
+                        }
+                    break;
+
+                case 'f':
+                case 'l':
+                        if ( (Options.incr_op = INCR_ADD,
+                              (sscanf( optarg, "%lg:%lg:%lg",
+                                       &Options.pulse_f_min, &Options.pulse_df, &Options.pulse_f_max) == 3))
+                                ||
+                             (Options.incr_op = INCR_MULT,
+                              (sscanf( optarg, "%lg*%lg:%lg",
+                                       &Options.pulse_f_min, &Options.pulse_df, &Options.pulse_f_max) == 3)) ) {
+
+                                Options.src_type = (c == 'f') ? S_PULSE : S_POISSON;
+
+                        } else {
+                                cerr << "Expecting all three parameter with -{f,l} min{:,*}incr:max\n";
+                                return CNRUN_CLPARSE_ERROR;
+                        }
+                    break;
+
+                case 'o':
+                        Options.enable_listening = true;
+                    break;
+
+                case 'c':
+                        Options.n_repeats = strtoul( optarg, nullptr, 10);
+                    break;
+
+                case 'S':
+                        Options.irreg_mag_fname = optarg;
+                    break;
+                case 'T':
+                        Options.irreg_cnt_fname = optarg;
+                    break;
+
+                case 'h':
+                        return CNRUN_CLPARSE_HELP_REQUEST;
+                case '?':
+                default:
+                        return CNRUN_CLPARSE_ERROR;
+                }
+
+        if ( Options.pulse_f_min == -INFINITY ||
+             Options.pulse_f_max == -INFINITY ||
+             Options.pulse_df    == -INFINITY ) {
+                cerr << "Oscillator type (with -f or -l) not specified\n";
+                return CNRUN_EARGS;
+        }
+
+        return 0;
 }
 
-
-
-
-// EOF
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/tools/spike2sdf.cc b/upstream/src/tools/spike2sdf.cc
index 91de7f7..a1dafa6 100644
--- a/upstream/src/tools/spike2sdf.cc
+++ b/upstream/src/tools/spike2sdf.cc
@@ -1,13 +1,17 @@
 /*
- * Author: Andrei Zavada <johnhommer at gmail.com>
+ *       File name:  tools/spike2sdf.cc
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer at gmail.com>
+ * Initial version:  2008-11-11
  *
- * License: GPL-2+
+ *         Purpose:  A remedy against forgetting to pass -d to cnrun
  *
- * Initial version: 2008-11-11
- *
- * A remedy against forgetting to pass -d to cnrun
+ *         License:  GPL-2+
  */
 
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
 
 #include <iostream>
 #include <fstream>
@@ -17,75 +21,77 @@
 #include <cstring>
 #include <cmath>
 
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
 using namespace std;
 
 int
 main( int argc, char *argv[])
 {
-	if ( argc != 5 ) {
-		cerr << "Expecting <fname> <period> <sigma> <restrict_window_size\n";
-		return -1;
-	}
-
-	string fname( argv[1]);
-
-	double	sxf_sample = strtod( argv[2], nullptr),
-		sdf_sigma = strtod( argv[3], nullptr),
-		restrict_window = strtod( argv[4], nullptr);
-
-	ifstream is( fname.c_str());
-	if ( !is.good() ) {
-		cerr << "Can't read from file " << fname << endl;
-		return -1;
-	}
-	is.ignore( numeric_limits<streamsize>::max(), '\n');
-
-	if ( fname.rfind( ".spikes") == fname.size() - 7 )
-		fname.erase( fname.size() - 7, fname.size());
-	fname += ".sdf";
-
-	ofstream os( fname.c_str());
-	if ( !os.good() ) {
-		cerr << "Can't open " << fname << " for writing\n";
-		return -1;
-	}
-	os << "#<t>\t<sdf>\t<nspikes>\n";
-
-
-	vector<double> _spike_history;
-	while ( true ) {
-		double datum;
-		is >> datum;
-		if ( is.eof() )
-			break;
-		_spike_history.push_back( datum);
-	}
-
-	double	at, len = _spike_history.back(), dt,
-		sdf_var = sdf_sigma * sdf_sigma;
-	cout << fname << ": " << _spike_history.size() << " spikes (last at " << _spike_history.back() << ")\n";
-	for ( at = sxf_sample; at < len; at += sxf_sample ) {
-		double result = 0.;
-		unsigned nspikes = 0;
-		for ( auto &T : _spike_history ) {
-			dt = T - at;
-			if ( restrict_window > 0 && dt < -restrict_window/2 )
-				continue;
-			if ( restrict_window > 0 && dt >  restrict_window/2 )
-				break;
-
-			nspikes++;
-			result += exp( -dt*dt/sdf_var);
-
-		}
-		os << at << "\t" << result << "\t" << nspikes << endl;
-	}
-
-	return 0;
+        if ( argc != 5 ) {
+                cerr << "Expecting <fname> <period> <sigma> <restrict_window_size\n";
+                return -1;
+        }
+
+        string fname( argv[1]);
+
+        double  sxf_sample = strtod( argv[2], nullptr),
+                sdf_sigma = strtod( argv[3], nullptr),
+                restrict_window = strtod( argv[4], nullptr);
+
+        ifstream is( fname.c_str());
+        if ( !is.good() ) {
+                cerr << "Can't read from file " << fname << endl;
+                return -1;
+        }
+        is.ignore( numeric_limits<streamsize>::max(), '\n');
+
+        if ( fname.rfind( ".spikes") == fname.size() - 7 )
+                fname.erase( fname.size() - 7, fname.size());
+        fname += ".sdf";
+
+        ofstream os( fname.c_str());
+        if ( !os.good() ) {
+                cerr << "Can't open " << fname << " for writing\n";
+                return -1;
+        }
+        os << "#<t>\t<sdf>\t<nspikes>\n";
+
+
+        vector<double> _spike_history;
+        while ( true ) {
+                double datum;
+                is >> datum;
+                if ( is.eof() )
+                        break;
+                _spike_history.push_back( datum);
+        }
+
+        double  at, len = _spike_history.back(), dt,
+                sdf_var = sdf_sigma * sdf_sigma;
+        cout << fname << ": " << _spike_history.size() << " spikes (last at " << _spike_history.back() << ")\n";
+        for ( at = sxf_sample; at < len; at += sxf_sample ) {
+                double result = 0.;
+                unsigned nspikes = 0;
+                for ( auto &T : _spike_history ) {
+                        dt = T - at;
+                        if ( restrict_window > 0 && dt < -restrict_window/2 )
+                                continue;
+                        if ( restrict_window > 0 && dt >  restrict_window/2 )
+                                break;
+
+                        nspikes++;
+                        result += exp( -dt*dt/sdf_var);
+
+                }
+                os << at << "\t" << result << "\t" << nspikes << endl;
+        }
+
+        return 0;
 }
 
-// EOF
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/tools/varfold.cc b/upstream/src/tools/varfold.cc
deleted file mode 100644
index 7a2bdf1..0000000
--- a/upstream/src/tools/varfold.cc
+++ /dev/null
@@ -1,718 +0,0 @@
-/*
- * Author: Andrei Zavada <johnhommer at gmail.com>
- *
- * License: GPL-2+
- *
- * Initial version: 2008-11-11
- *
- */
-
-
-
-#include <unistd.h>
-#include <cmath>
-#include <cstdlib>
-#include <cstring>
-#include <iostream>
-#include <fstream>
-#include <sstream>
-#include <limits>
-#include <stdexcept>
-#include <vector>
-#include <valarray>
-#include <numeric>
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-using namespace std;
-
-
-typedef vector<double>::iterator vd_i;
-typedef vector<unsigned>::iterator vu_i;
-
-
-enum TConvType {
-	SDFF_CMP_NONE,
-	SDFF_CMP_SQDIFF,
-	SDFF_CMP_WEIGHT
-};
-
-enum TCFOpType {
-	SDFF_CFOP_AVG,
-	SDFF_CFOP_PROD,
-	SDFF_CFOP_SUM
-};
-
-
-struct SOptions {
-	const char
-		*working_dir,
-		*target_profiles_dir,
-		*grand_target_fname,
-		*grand_result_fname;
-	vector<string>
-		units;
-	TCFOpType
-		cf_op_type;
-	vector<unsigned>
-		dims;
-	bool	go_sdf:1,
-		use_shf:1,
-		do_normalise:1,
-		do_matrix_output:1,
-		do_column_output:1,
-		assume_no_shf_value:1,
-		assume_generic_data:1,
-		assume_no_timepoint:1,
-		octave_compat:1,
-		verbosely:1;
-	double	sample_from,
-		sample_period,
-		sample_window;
-	unsigned
-		field_n,
-		of_fields,
-		skipped_first_lines;
-	TConvType
-		conv_type;
-
-	SOptions()
-	      : working_dir ("."),
-		target_profiles_dir ("."),
-		grand_target_fname ("overall.target"),
-		grand_result_fname (nullptr),
-		cf_op_type (SDFF_CFOP_AVG),
-		go_sdf (true),
-		use_shf (false),
-		do_normalise (false),
-		do_matrix_output (true),
-		do_column_output (false),
-		assume_no_shf_value (false),
-		assume_generic_data (true),
-		assume_no_timepoint (false),
-		octave_compat (false),
-		verbosely (true),
-		sample_from (0),
-		sample_period (0),
-		sample_window (0),
-		field_n (1),
-		of_fields (1),
-		skipped_first_lines (0),
-		conv_type (SDFF_CMP_NONE)
-		{}
-};
-
-static SOptions Options;
-
-//static size_t dim_prod;
-
-static int get_unit_cf( const char *unit_label, valarray<double> &Mi, double *result);
-
-static int parse_cmdline( int argc, char *argv[]);
-static void usage( const char *argv0);
-
-#define SDFCAT_EARGS		-1
-#define SDFCAT_EHELPREQUEST	-2
-#define SDFCAT_EFILES		-3
-#define SDFCAT_ERANGES		-4
-
-
-static int read_matrices_from_sxf( const char* fname, valarray<double> &M, valarray<double> &H, double *sdf_max_p = nullptr);
-static int construct_matrix_from_var( const char* fname, valarray<double> &M);
-static int read_matrix( const char*, valarray<double>&);
-static int write_matrix( const char*, const valarray<double>&);
-static double convolute_matrix_against_target( const valarray<double>&, const valarray<double>&);
-
-
-
-int
-main( int argc, char *argv[])
-{
-	int retval = 0;
-
-	if ( argc == 1 ) {
-		usage( argv[0]);
-		return SDFCAT_EARGS;
-	}
-
-	{
-		int parse_retval = parse_cmdline( argc, argv);
-		if ( parse_retval ) {
-			if ( parse_retval == SDFCAT_EHELPREQUEST )
-				usage( argv[0]);
-			return -1;
-		}
-
-		if ( Options.assume_no_shf_value && Options.use_shf ) {
-			cerr << "Conflicting options (-H and -H-)\n";
-			return -1;
-		}
-	}
-
-      // cd as requested
-	char *pwd = nullptr;
-	if ( Options.working_dir ) {
-		pwd = getcwd( nullptr, 0);
-		if ( chdir( Options.working_dir) ) {
-			fprintf( stderr, "Failed to cd to \"%s\"\n", Options.working_dir);
-			return -2;
-		}
-	}
-
-
-//	vector<double> unit_CFs;
-
-	size_t dim_prod = accumulate( Options.dims.begin(), Options.dims.end(), 1., multiplies<double>());
-	valarray<double>
-		Mi (dim_prod), Mi_valid_cases (dim_prod),
-		G  (dim_prod), G_valid_cases  (dim_prod);
-
-	for ( vector<string>::iterator uI = Options.units.begin(); uI != Options.units.end(); uI++ ) {
-		double CFi;
-		if ( get_unit_cf( uI->c_str(), Mi, &CFi) )  // does its own convolution
-			return -4;
-
-		for ( size_t i = 0; i < dim_prod; i++ )
-			if ( !isfinite( Mi[i]) )
-				Mi[i] = (Options.cf_op_type == SDFF_CFOP_PROD) ? 1. : 0.;
-			else
-				G_valid_cases[i]++;
-
-		switch ( Options.cf_op_type ) {
-		case SDFF_CFOP_SUM:
-		case SDFF_CFOP_AVG:
-			G += Mi;
-		    break;
-		case SDFF_CFOP_PROD:
-			G *= Mi;
-		    break;
-		}
-
-		if ( Options.conv_type != SDFF_CMP_NONE ) {
-			ofstream o( (*uI)+".CF");
-			o << CFi << endl;
-		}
-	}
-
-	// for ( size_t i = 0; i < dim_prod; i++ )
-	// 	if ( G_valid_cases[i] == 0. )
-	// 		G_valid_cases[i] = 1;
-
-	if ( Options.cf_op_type == SDFF_CFOP_AVG )
-		G /= G_valid_cases; // Options.units.size();
-
-	if ( Options.units.size() > 1 || Options.grand_result_fname ) {
-
-		string grand_total_bname (Options.grand_result_fname ? Options.grand_result_fname
-					  : (Options.cf_op_type == SDFF_CFOP_AVG)
-					  ? "AVERAGE"
-					   : (Options.cf_op_type == SDFF_CFOP_SUM)
-					   ? "SUM" : "PRODUCT");
-		write_matrix( grand_total_bname.c_str(), G);
-
-		if ( Options.conv_type != SDFF_CMP_NONE ) {
-			valarray<double> T (dim_prod);
-			if ( read_matrix( (string(Options.target_profiles_dir) + '/' + Options.grand_target_fname).c_str(), T) )
-				return -4;
-			double grandCF = convolute_matrix_against_target( G, T);
-
-			ofstream grand_CF_strm ((grand_total_bname + ".CF").c_str());
-			grand_CF_strm << grandCF << endl;
-		}
-	}
-
-	if ( pwd )
-		if ( chdir( pwd) )
-			;
-
-	return retval;
-}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-static int
-get_unit_cf( const char* ulabel, valarray<double> &M, double *result_p)
-{
-	valarray<double> H (M.size()), T (M.size());
-
-	string eventual_fname;
-	if ( Options.go_sdf ) {
-		if ( (Options.assume_generic_data = true,
-		      read_matrices_from_sxf( (eventual_fname = ulabel).c_str(), M, H)) &&
-
-		     (Options.assume_generic_data = false,
-		      read_matrices_from_sxf( (eventual_fname = string(ulabel) + ".sxf").c_str(), M, H)) &&
-
-		     (Options.assume_no_shf_value = true, Options.use_shf = false,
-		      read_matrices_from_sxf( (eventual_fname = string(ulabel) + ".sdf").c_str(), M, H)) ) {
-
-			fprintf( stderr, "Failed to read data from\"%s\" or \"%s.s{x,d}f\"\n", ulabel, ulabel);
-			return -2;
-		}
-	} else  // go var
-		if ( construct_matrix_from_var( (eventual_fname = ulabel).c_str(), M) &&
-		     construct_matrix_from_var( (eventual_fname = string(ulabel) + ".var").c_str(), M) ) {
-
-			fprintf( stderr, "Failed to read \"%s.var\"\n", ulabel);
-			return -2;
-		}
-
-	if ( (Options.do_matrix_output || Options.do_column_output)
-	     && Options.dims.size() == 2 ) {  // only applicable to 2-dim matrices
-
-		write_matrix( eventual_fname.c_str(), M);
-		if ( Options.use_shf )
-			write_matrix( (string(ulabel) + "(shf)").c_str(), H);
-	}
-
-	if ( Options.conv_type != SDFF_CMP_NONE ) {
-		if ( read_matrix( (string(Options.target_profiles_dir) + '/' + eventual_fname + ".target").c_str(), T) ) {
-			if ( !Options.do_matrix_output && !Options.do_column_output ) {
-				fprintf( stderr, "Failed to read target profile for \"%s\", and no matrix folding output specified\n",
-					 eventual_fname.c_str());
-				return -2;
-			}
-		} else
-			if ( result_p )
-				*result_p = convolute_matrix_against_target( M, T);
-	}
-
-	return 0;
-}
-
-
-
-int
-read_datum( ifstream &ifs, double& v) throw (invalid_argument)
-{
-	static string _s;
-	ifs >> _s;
-	if ( !ifs.good() )
-		return -1;
-	double _v = NAN;
-	try { _v = stod( _s); }
-	catch ( invalid_argument ex) {
-		if ( strcasecmp( _s.c_str(), "NaN") == 0 )
-			v = NAN;
-		else if ( strcasecmp( _s.c_str(), "inf") == 0 || strcasecmp( _s.c_str(), "infinity") == 0 )
-			v = INFINITY;
-		else {
-			throw (ex);  // rethrow
-			return -2;
-		}
-	}
-	v = _v;
-	return 0;
-}
-
-
-
-// ------------------------- matrix io ------
-
-static int
-read_matrices_from_sxf( const char *fname, valarray<double> &M, valarray<double> &H, double *sdf_max_p)
-{
-	if ( Options.verbosely )
-		printf( "Trying \"%s\" ... ", fname);
-
-	ifstream ins( fname);
-	if ( !ins.good() ) {
-		if ( Options.verbosely )
-			printf( "not found\n");
-		return -1;
-	} else
-		if ( Options.verbosely )
-			printf( "found\n");
-
-//	size_t	ignored_lines = 0;
-
-	double	sdf_max = -INFINITY,
-		_;
-	size_t	idx, row;
-	for ( idx = row = 0; idx < M.size(); idx += (++row > Options.skipped_first_lines)) {
-		while ( ins.peek() == '#' ) {
-//			ignored_lines++;
-			ins.ignore( numeric_limits<streamsize>::max(), '\n');
-		}
-
-		if ( ins.eof() ) {
-			fprintf( stderr, "Short read from \"%s\" at element %zu\n", fname, idx);
-			return -2;
-		}
-		if ( !Options.assume_no_timepoint )
-			ins >> _;       // time
-
-		try {
-			read_datum( ins, M[idx]);
-			if ( !Options.assume_generic_data ) {
-				if ( !Options.assume_no_shf_value )
-					read_datum( ins, H[idx]);  // shf
-				read_datum( ins, _);       // nspikes
-			}
-		} catch (invalid_argument ex) {
-			fprintf( stderr, "Bad value read from \"%s\" at element %zu\n", fname, idx);
-			return -2;
-		}
-
-		if ( M[idx] > sdf_max )
-			sdf_max = M[idx];
-	}
-
-	if ( Options.use_shf )
-		M *= H;
-
-	if ( Options.do_normalise ) {
-		M[idx] /= sdf_max;
-		//H[idx] /= sdf_max;
-	}
-
-	if ( sdf_max_p )
-		*sdf_max_p = sdf_max;
-
-	return 0;
-}
-
-
-
-
-
-static int
-construct_matrix_from_var( const char *fname, valarray<double> &M)
-{
-	ifstream ins( fname);
-	if ( !ins.good() ) {
-//		cerr << "No results in " << fname << endl;
-		return -1;
-	}
-
-	double	at, _, var;
-	vector<double> sample;
-	size_t	idx;
-
-	string line;
-	try {
-		for ( idx = 0; idx < M.size(); ++idx ) {
-			M[idx] = 0.;
-
-			while ( ins.peek() == '#' )
-				ins.ignore( numeric_limits<streamsize>::max(), '\n');
-
-			sample.clear();
-			do {
-				getline( ins, line, '\n');
-				if ( ins.eof() ) {
-					if ( idx == M.size()-1 )
-						break;
-					else
-						throw "bork";
-				}
-				stringstream fields (line);
-				fields >> at;
-				for ( size_t f = 1; f <= Options.of_fields; ++f )
-					if ( f == Options.field_n )
-						fields >> var;
-					else
-						fields >> _;
-
-				if ( at < Options.sample_from + Options.sample_period * idx - Options.sample_window/2 )
-					continue;
-
-				sample.push_back( var);
-
-			} while ( at <= Options.sample_from + Options.sample_period * idx + Options.sample_window/2 );
-
-			M[idx] = accumulate( sample.begin(), sample.end(), 0.) / sample.size();
-		}
-	} catch (...) {
-		fprintf( stderr, "Short read, bad data or some other IO error in %s at record %zd\n", fname, idx);
-		return -2;
-	}
-
-	// if ( Options.do_normalise ) {
-	// 	for ( idx = 0; idx < dim_prod; idx++ )
-	// 		M[idx] /= sdf_max;
-	// 	// if ( H )
-	// 	// 	for ( idx = 0; idx < dim_prod; idx++ )
-	// 	// 		H[idx] /= sdf_max;
-	// }
-
-	return 0;
-}
-
-
-
-
-
-static int
-read_matrix( const char *fname, valarray<double> &M)
-{
-	ifstream ins( fname);
-	if ( !ins.good() ) {
-		cerr << "No results in " << fname << endl;
-		return -1;
-	}
-
-	while ( ins.peek() == '#' ) {
-		ins.ignore( numeric_limits<streamsize>::max(), '\n');  // skip header
-	}
-
-	size_t	idx;
-	for ( idx = 0; idx < M.size(); idx++ )
-		if ( ins.eof() ) {
-			fprintf( stderr, "Short read from \"%s\" at element %zu\n", fname, idx);
-			return -1;
-		} else
-			ins >> M[idx];
-	return 0;
-}
-
-
-
-
-
-
-static int
-write_matrix( const char *fname, const valarray<double> &X)
-{
-	if ( Options.do_matrix_output ) {
-		ofstream outs( (string(fname) + ".mx").c_str());
-		if ( Options.verbosely )
-			printf( "Writing \"%s.mx\"\n", fname);
-		for ( size_t k = 0; k < Options.dims[0]; k++ )
-			for ( size_t l = 0; l < Options.dims[1]; l++ ) {
-				if ( l > 0 )  outs << "\t";
-				const double &datum = X[k*Options.dims[0] + l];
-				if ( Options.octave_compat && !std::isfinite(datum) )
-					outs << (std::isinf(datum) ? "Inf" : "NaN");
-				else
-					outs << datum;
-				if ( l == Options.dims[1]-1 ) outs << endl;
-			}
-		if ( !outs.good() )
-			return -1;
-	}
-
-	if ( Options.do_column_output ) {
-		ofstream outs( (string(fname) + ".col").c_str());
-		if ( Options.verbosely )
-			printf( "Writing \"%s.mx\"\n", fname);
-		for ( size_t k = 0; k < Options.dims[0]; k++ )
-			for ( size_t l = 0; l < Options.dims[1]; l++ )
-				outs << l << "\t" << k << "\t" << X[k*Options.dims[0] + l] << endl;
-		if ( !outs.good() )
-			return -1;
-	}
-
-	return 0;
-}
-
-
-
-
-
-
-static double
-convolute_matrix_against_target( const valarray<double> &M, const valarray<double> &T)
-{
-	double	CF = 0.;
-	size_t idx;
-
-	switch ( Options.conv_type ) {
-	case SDFF_CMP_WEIGHT:
-		for ( idx = 0; idx < M.size(); idx++ )
-			CF += M[idx] * T[idx];
-		break;
-	case SDFF_CMP_SQDIFF:
-		for ( idx = 0; idx < M.size(); idx++ )
-			CF += pow( M[idx] - T[idx], 2);
-		CF = sqrt( CF);
-		break;
-	case SDFF_CMP_NONE:
-		return NAN;
-	}
-
-	return CF;
-}
-
-
-
-
-
-
-
-
-
-
-static int
-parse_cmdline( int argc, char *argv[])
-{
-	char c;
-	while ( (c = getopt( argc, argv, "OC:Rd:f:G:H::-t:Nx:T:U:V:z:o:F:qh")) != -1 ) {
-		switch ( c ) {
-		case 'C':	Options.working_dir = optarg;				break;
-
-		case 'R':	Options.go_sdf = false;					break;
-
-		case 'T':	Options.grand_target_fname = optarg;			break;
-		case 'U':	Options.grand_result_fname = optarg;			break;
-
-		case 'd':	if ( sscanf( optarg, "%lg:%lg:%lg",
-					     &Options.sample_from, &Options.sample_period, &Options.sample_window) < 2 ) {
-					cerr << "Expecting three parameter with -d (from:period[:window])\n";
-					return SDFCAT_EARGS;
-				}
-				if ( Options.sample_window == 0. )
-					Options.sample_window = Options.sample_period;	break;
-
-		case 'f':	if ( sscanf( optarg, "%d:%d",
-					     &Options.field_n, &Options.of_fields) < 1 ) {
-					cerr << "Expecting two parameters with -f (field:fields)\n";
-					return SDFCAT_EARGS;
-				}							break;
-
-		case 'G':	Options.target_profiles_dir = optarg;			break;
-
-		case 'u':	Options.units.push_back( string(optarg));		break;
-
-		case 'H':	if ( optarg )
-					if ( strcmp( optarg, "-") == 0 )
-						Options.assume_no_shf_value = true, Options.use_shf = false;
-					else {
-						cerr << "Unrecognised option to -H: `" << optarg << "\n";
-						return SDFCAT_EARGS;
-					}
-				else
-					Options.use_shf = true;				break;
-
-		case 't':	if ( optarg ) {
-					if ( strcmp( optarg, "-") == 0 )
-						Options.assume_no_timepoint = Options.assume_generic_data = true,
-							Options.use_shf = false;
-					else {
-						cerr << "Option -t can only be -t-\n";
-						return SDFCAT_EARGS;
-					}
-				}							break;
-
-		case 'N':	Options.do_normalise = true;				break;
-
-		case 'V':	if ( strcmp( optarg, "sqdiff" ) == 0 )
-					Options.conv_type = SDFF_CMP_SQDIFF;
-				else if ( strcmp( optarg, "weight") == 0 )
-					Options.conv_type = SDFF_CMP_WEIGHT;
-				else {
-					cerr << "-V takes `sqdiff' or `weight'\n";
-					return SDFCAT_EARGS;
-				}
-			break;
-		case 'z':	if ( strcmp( optarg, "sum" ) == 0 )
-					Options.cf_op_type = SDFF_CFOP_SUM;
-				else if ( strcmp( optarg, "avg") == 0 )
-					Options.cf_op_type = SDFF_CFOP_AVG;
-				else if ( strcmp( optarg, "prod") == 0 )
-					Options.cf_op_type = SDFF_CFOP_PROD;
-				else {
-					cerr << "-X can be `sum', `avg' or `prod'\n";
-					return SDFCAT_EARGS;
-				}
-			break;
-		case 'o':	Options.do_matrix_output = (strchr( optarg, 'm') != nullptr);
-				Options.do_column_output = (strchr( optarg, 'c') != nullptr);
-			break;
-
-		case 'x':
-		{
-			unsigned d;
-			if ( sscanf( optarg, "%ud", &d) < 1 ) {
-				cerr << "-x takes an unsigned\n";
-				return SDFCAT_EARGS;
-			}
-			Options.dims.push_back( d);
-		}   break;
-
-		case 'F':
-			if ( sscanf( optarg, "%ud", &Options.skipped_first_lines) < 1 ) {
-				cerr << "-F takes an unsigned\n";
-				return SDFCAT_EARGS;
-			}
-		    break;
-
-		case 'O':	Options.octave_compat = true;				break;
-
-		case 'q':	Options.verbosely = false;				break;
-
-		case 'h':
-			return SDFCAT_EHELPREQUEST;
-		default:
-			return SDFCAT_EARGS;
-		}
-	}
-
-	for ( int i = optind; i < argc; i++ )
-		Options.units.push_back( string(argv[i]));
-
-	if ( Options.units.empty() ) {
-		cerr << "No units (-u) specified\n";
-		return SDFCAT_EARGS;
-	}
-	if ( Options.dims.empty() ) {
-		cerr << "No dimensions (-x) specified\n";
-		return SDFCAT_EARGS;
-	}
-
-	return 0;
-}
-
-
-
-
-static void
-usage( const char *argv0)
-{
-	cout << "Usage: " << argv0 << "[options] [unitname_or_filename] ...\n"
-		"Options are\n"
-		" -C <dir>\t\tcd into dir before working\n"
-		" -G <dir>\t\tSearch for target profiles in dir (default " << Options.target_profiles_dir << ")\n"
-		" -x <dim>\t\tDimensions for the target and data matrices (repeat as necessary)\n"
-		" -V[sqdiff|weight]\tObtain resulting profile by this convolution method:\n"
-		"\t\t\t  sum of squared differences between source and target profiles,\n"
-		"\t\t\t  sum of source profile values weighted by those in the target profile\n"
-		" -z[sum|avg|prod]\tOperation applied to individual CFs, to produce a grand total\n"
-		" -T <fname>\tRead reference profile from this file (default \"" << Options.grand_target_fname << "\"\n"
-		" -U <fname>\tWrite the total result to this file (default is {SUM,AVERAGE,PRODUCT}.mx, per option -z)\n"
-		"\n"
-		" -R\t\t\tCollect .var data rather than .sxf\n"
-		"With -R, use\n"
-		" -f <unsigned n1>:<unsigned n2>\n"
-		"\t\t\tExtract n1th field of n2 consec. fields per record\n"
-		"\t\t\t  (default " << Options.field_n << " of " << Options.of_fields << ")\n"
-		" -d <double f>:<double p>:<double ws>\tSample from time f at period p with window size ws\n"
-		"otherwise:\n"
-		" -F <unsigned>\t\tRead sxf data from that position, not from 0\n"
-		" -H \t\t\tMultiply sdf by shf\n"
-		" -H-\t\t\tAssume there is no shf field in .sxf file\n"
-		" -t-\t\t\tAssume no timestamp in data file; implies -H-\n"
-		"\n"
-		" -o[mc]\t\t\tWrite <unit>.[m]atrix and/or .[c]ol profiles\n"
-		" -O\t\t\tWrite nan and inf as \"NaN\" and \"Inf\" to please octave\n"
-		" -q\t\t\tSuppress normal messages\n"
-		" -h\t\t\tDisplay this help\n"
-		"\n"
-		" unitname_or_filename\tData vector (e.g., PN.0; multiple entries as necessary;\n"
-		"\t\t\t  will try label.sxf then label.sdf)\n";
-}
-
-// EOF

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/cnrun.git



More information about the debian-med-commit mailing list