big reshuffle in libcnrun
authorAndrei Zavada <johnhommer@gmail.com>
Tue, 2 Dec 2014 00:47:01 +0000 (02:47 +0200)
committerAndrei Zavada <johnhommer@gmail.com>
Tue, 2 Dec 2014 00:47:01 +0000 (02:47 +0200)
63 files changed:
upstream/configure.ac
upstream/src/libcnrun/Makefile.am
upstream/src/libcnrun/base-neuron.hh [deleted file]
upstream/src/libcnrun/base-synapse.hh [deleted file]
upstream/src/libcnrun/base-unit.cc [deleted file]
upstream/src/libcnrun/base-unit.hh [deleted file]
upstream/src/libcnrun/forward-decls.hh [deleted file]
upstream/src/libcnrun/hosted-attr.hh [deleted file]
upstream/src/libcnrun/hosted-neurons.cc [deleted file]
upstream/src/libcnrun/hosted-neurons.hh [deleted file]
upstream/src/libcnrun/hosted-synapses.cc [deleted file]
upstream/src/libcnrun/hosted-synapses.hh [deleted file]
upstream/src/libcnrun/integrate-base.hh [deleted file]
upstream/src/libcnrun/integrate-rk65.hh [deleted file]
upstream/src/libcnrun/integrator/base.hh [new file with mode: 0644]
upstream/src/libcnrun/integrator/rk65.hh [new file with mode: 0644]
upstream/src/libcnrun/model-cycle.cc [deleted file]
upstream/src/libcnrun/model-nmlio.cc [deleted file]
upstream/src/libcnrun/model-struct.cc [deleted file]
upstream/src/libcnrun/model-tags.cc [deleted file]
upstream/src/libcnrun/model.hh [deleted file]
upstream/src/libcnrun/model/.dirstamp [new file with mode: 0644]
upstream/src/libcnrun/model/cycle.cc [new file with mode: 0644]
upstream/src/libcnrun/model/forward-decls.hh [new file with mode: 0644]
upstream/src/libcnrun/model/model.hh [new file with mode: 0644]
upstream/src/libcnrun/model/nmlio.cc [new file with mode: 0644]
upstream/src/libcnrun/model/sources.cc [new file with mode: 0644]
upstream/src/libcnrun/model/sources.hh [new file with mode: 0644]
upstream/src/libcnrun/model/struct.cc [new file with mode: 0644]
upstream/src/libcnrun/model/tags.cc [new file with mode: 0644]
upstream/src/libcnrun/mx-attr.hh [deleted file]
upstream/src/libcnrun/sources.cc [deleted file]
upstream/src/libcnrun/sources.hh [deleted file]
upstream/src/libcnrun/standalone-attr.hh [deleted file]
upstream/src/libcnrun/standalone-neurons.cc [deleted file]
upstream/src/libcnrun/standalone-neurons.hh [deleted file]
upstream/src/libcnrun/standalone-synapses.cc [deleted file]
upstream/src/libcnrun/standalone-synapses.hh [deleted file]
upstream/src/libcnrun/types.cc [deleted file]
upstream/src/libcnrun/types.hh [deleted file]
upstream/src/libcnrun/units/.dirstamp [new file with mode: 0644]
upstream/src/libcnrun/units/base-neuron.hh [new file with mode: 0644]
upstream/src/libcnrun/units/base-synapse.hh [new file with mode: 0644]
upstream/src/libcnrun/units/base-unit.cc [new file with mode: 0644]
upstream/src/libcnrun/units/base-unit.hh [new file with mode: 0644]
upstream/src/libcnrun/units/forward-decls.hh [new file with mode: 0644]
upstream/src/libcnrun/units/hosted-attr.hh [new file with mode: 0644]
upstream/src/libcnrun/units/hosted-neurons.cc [new file with mode: 0644]
upstream/src/libcnrun/units/hosted-neurons.hh [new file with mode: 0644]
upstream/src/libcnrun/units/hosted-synapses.cc [new file with mode: 0644]
upstream/src/libcnrun/units/hosted-synapses.hh [new file with mode: 0644]
upstream/src/libcnrun/units/mx-attr.hh [new file with mode: 0644]
upstream/src/libcnrun/units/standalone-attr.hh [new file with mode: 0644]
upstream/src/libcnrun/units/standalone-neurons.cc [new file with mode: 0644]
upstream/src/libcnrun/units/standalone-neurons.hh [new file with mode: 0644]
upstream/src/libcnrun/units/standalone-synapses.cc [new file with mode: 0644]
upstream/src/libcnrun/units/standalone-synapses.hh [new file with mode: 0644]
upstream/src/libcnrun/units/types.cc [new file with mode: 0644]
upstream/src/libcnrun/units/types.hh [new file with mode: 0644]
upstream/src/lua-cnrun/Makefile.am
upstream/src/lua-cnrun/cnhost.hh
upstream/src/lua-cnrun/commands.cc
upstream/src/tools/Makefile.am

index 77b9ecffde61b7fd6b9fafbb9e5220f362926172..c4aa450063eed1b3796958131dd6dd3c3a1a3cbd 100644 (file)
@@ -1,7 +1,7 @@
 AC_COPYRIGHT([Copyright (c) 2008-14 Andrei Zavada <johnhommer@gmail.com>])
 
 AC_INIT([cnrun], [2.1.0_rc], [johnhommer@gmail.com])
-AC_CONFIG_SRCDIR([src/libcnrun/model.hh])
+AC_CONFIG_SRCDIR([src/libcnrun/model/model.hh])
 AC_CONFIG_MACRO_DIR([m4])
 AC_PREREQ(2.61)
 
index 2e25618879585c22aaecf5825eb986faf96e5a41..4d064201f90ffdd50406ed2b86c0af972ccde994 100644 (file)
@@ -1,30 +1,38 @@
 include $(top_srcdir)/src/Common.mk
-AM_CXXFLAGS += -shared -fPIC
+AM_CXXFLAGS += -I. -shared -fPIC
 
 lib_LTLIBRARIES = \
        libcnrun.la
 
 libcnrun_la_SOURCES = \
-       forward-decls.hh \
-       sources.cc \
-       types.cc \
-       base-unit.cc \
-       standalone-neurons.cc \
-       standalone-synapses.cc \
-       hosted-neurons.cc \
-       hosted-synapses.cc \
-       model-struct.cc \
-       model-tags.cc \
-       model-cycle.cc \
-       model-nmlio.cc \
+       integrator/base.hh \
+       integrator/rk65.hh \
+       model/cycle.cc \
+       model/forward-decls.hh \
+       model/model.hh \
+       model/nmlio.cc \
+       model/sources.cc \
+       model/struct.cc \
+       model/tags.cc \
        sources.hh \
-       types.hh \
-       mx-attr.hh \
-       base-unit.hh    standalone-attr.hh      hosted-attr.hh \
-       base-synapse.hh standalone-neurons.hh   hosted-neurons.hh  \
-       base-neuron.hh  standalone-synapses.hh  hosted-synapses.hh \
-       model.hh \
-       integrate-base.hh integrate-rk65.hh
+       units/base-neuron.hh \
+       units/base-synapse.hh \
+       units/base-unit.cc \
+       units/base-unit.hh \
+       units/forward-decls.hh \
+       units/hosted-attr.hh \
+       units/hosted-neurons.cc \
+       units/hosted-neurons.hh  \
+       units/hosted-synapses.cc \
+       units/hosted-synapses.hh \
+       units/mx-attr.hh \
+       units/standalone-attr.hh \
+       units/standalone-neurons.cc \
+       units/standalone-neurons.hh \
+       units/standalone-synapses.cc \
+       units/standalone-synapses.hh \
+       units/types.cc \
+       units/types.hh
 
 libcnrun_la_LIBADD = \
        ../libstilton/liba.a \
@@ -33,15 +41,32 @@ libcnrun_la_LIBADD = \
 libcnrun_la_LDFLAGS = \
        -shared -version-info 2:0:0
 
-libcnrunincdir = $(includedir)/libcnrun
+libcnrun_units_incdir = $(includedir)/libcnrun/units
+
+libcnrun_units_inc_HEADERS = \
+       units/forward-decls.hh \
+       units/base-neuron.hh \
+       units/base-synapse.hh \
+       units/base-unit.hh \
+       units/hosted-attr.hh \
+       units/hosted-neurons.hh  \
+       units/hosted-synapses.hh \
+       units/mx-attr.hh \
+       units/standalone-attr.hh \
+       units/standalone-neurons.hh \
+       units/standalone-synapses.hh \
+       units/types.hh
+
+libcnrun_model_incdir = $(includedir)/libcnrun/model
+
+libcnrun_model_inc_HEADERS = \
+       model/forward-decls.hh \
+       model/sources.hh \
+       model/model.hh
+
+libcnrun_integrator_incdir = $(includedir)/libcnrun/integrator
+
+libcnrun_integrator_inc_HEADERS = \
+       integrator/base.hh \
+       integrator/rk65.hh
 
-libcnruninc_HEADERS = \
-       forward-decls.hh \
-       sources.hh \
-       types.hh \
-       mx-attr.hh \
-       base-unit.hh    standalone-attr.hh      hosted-attr.hh \
-       base-synapse.hh standalone-neurons.hh   hosted-neurons.hh  \
-       base-neuron.hh  standalone-synapses.hh  hosted-synapses.hh \
-       model.hh \
-       integrate-base.hh integrate-rk65.hh
diff --git a/upstream/src/libcnrun/base-neuron.hh b/upstream/src/libcnrun/base-neuron.hh
deleted file mode 100644 (file)
index 603b692..0000000
+++ /dev/null
@@ -1,298 +0,0 @@
-/*
- *       File name:  libcn/base-neuron.hh
- *         Project:  cnrun
- *          Author:  Andrei Zavada <johnhommer@gmail.com>
- *                   building on original work by Thomas Nowotny <tnowotny@ucsd.edu>
- * Initial version:  2009-03-31
- *
- *         Purpose:  neuron base class
- *
- *         License:  GPL-2+
- */
-
-#ifndef CNRUN_LIBCN_BASENEURON_H_
-#define CNRUN_LIBCN_BASENEURON_H_
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-#include <list>
-#include <cstring>
-#include <cmath>
-#include <map>
-#include <tuple>
-
-#include "forward-decls.hh"
-#include "base-unit.hh"
-#include "base-synapse.hh"
-
-
-using namespace std;
-
-namespace cnrun {
-
-struct SSpikeloggerService;
-
-using SCleft = map<C_BaseSynapse*, double>;
-inline double operator+ ( double a, const pair<C_BaseSynapse*, double>& b) { return a + b.second; }
-
-class C_BaseNeuron
-  : public C_BaseUnit {
-
-        DELETE_DEFAULT_METHODS (C_BaseNeuron)
-
-        friend class CModel;
-        friend class C_BaseSynapse;
-
-    protected:
-        C_BaseNeuron (TUnitType intype, const string& inlabel,
-                      double inx, double iny, double inz,
-                      CModel* inM, int s_mask = 0)
-              : C_BaseUnit (intype, inlabel, inM, s_mask),
-                pos (inx, iny, inz),
-                _spikelogger_agent (nullptr)
-                {}
-
-        virtual ~C_BaseNeuron();
-
-        struct SCoord {
-
-                DELETE_DEFAULT_METHODS (SCoord)
-
-                double x, y, z;
-
-                SCoord( double inx, double iny, double inz)
-                      : x (inx), y (iny), z (inz)
-                        {}
-
-                SCoord& operator= ( tuple<double, double, double> v)
-                        {
-                                tie(x, y, z) = v;
-                                return *this;
-                        }
-
-              // distance
-                double operator- ( const SCoord &p) const
-                        {
-                                return sqrt( pow(x - p.x, 2) + pow(y - p.y, 2) + pow(z - p.z, 2));
-                        }
-                bool too_close( const SCoord& p, double mindist = .42 /* units? */) const
-                        {
-                                return operator-(p) < mindist;
-                        }
-        };
-
-    public:
-        SCoord  pos;
-
-        size_t axonal_conns() const     { return _axonal_harbour.size(); }
-        size_t dendrites() const        { return _dendrites.size(); }
-
-        bool
-        connects_to( const C_BaseNeuron &to) const __attribute__ ((pure));
-
-        C_BaseSynapse*
-        connects_via( const C_BaseNeuron &to,
-                      SCleft::mapped_type *g_ptr = nullptr) const;
-
-        void reset_state();
-
-      // even though for rate-based neurons, E is not meaningful
-      // leave these here to make the method available to synapses wanting _target-E
-        virtual double E() const
-                {  return 0;  }
-        virtual double E( vector<double>&) const
-                {  return 0;  }
-      // likewise, for those needing _source->F
-        virtual double F() const
-                {  return 0;  }
-        virtual double F( vector<double>&) const
-                {  return 0;  }
-
-        // struct __SCleft_second_plus {
-        //         double operator() ( double a, const SCleft::value_type &i) { return a + i.second; }
-        // };
-        double Isyn() const  // is the sum of Isyn() on all dendrites
-                {
-                        double I = 0.;
-                        for ( auto &Y : _dendrites )
-                                I += Y.first->Isyn(*this, Y.second);
-                        return I;
-                }
-
-        double Isyn( vector<double> &x) const  // an honourable mention
-                {
-                        double I = 0.;
-                        for ( auto &Y : _dendrites )
-                                I += Y.first->Isyn(x, *this, Y.second);
-                        return I;
-                }
-
-        virtual void possibly_fire()
-                {}
-
-      // Even though rate-based neurons do not track individual spikes,
-      // we can estimate a probability of such a neuron spiking as F*dt*rand().
-      // Which makes this method a valid one
-
-      // Note this assumes P[0] is F for all rate-based neurons, and E
-      // for those conductance-based, which by now is hard-coded for all neurons.
-        virtual size_t n_spikes_in_last_dt() const
-                {  return 0;  }
-        virtual void do_detect_spike_or_whatever()
-                {}
-
-        SSpikeloggerService* spikelogger_agent()  { return _spikelogger_agent;  }
-        SSpikeloggerService*
-        enable_spikelogging_service( int s_mask = 0);
-        SSpikeloggerService*
-        enable_spikelogging_service( double sample_period, double sigma, double from = 0.,
-                                     int s_mask = 0);
-        void disable_spikelogging_service();
-        void sync_spikelogging_history() const;
-
-        double distance_to( C_BaseNeuron*) const; // will do on demand
-
-        void dump( bool with_params = false, FILE *strm = stdout) const;
-
-    protected:
-        SCleft  _dendrites;
-        list<C_BaseSynapse*>
-                _axonal_harbour;
-
-        SSpikeloggerService
-               *_spikelogger_agent;
-};
-
-
-
-
-
-#define CN_KL_COMPUTESDF        (1 << 0)
-#define CN_KL_ISSPIKINGNOW      (1 << 1)
-#define CN_KL_PERSIST           (1 << 2)  // should not be deleted at disable_spikelogging_service
-#define CN_KL_IDLE              (1 << 3)  // should not be placed on spikelogging_neurons on enable_spikelogging_service
-
-
-struct SSpikeloggerService {
-
-        DELETE_DEFAULT_METHODS (SSpikeloggerService)
-
-        friend class C_BaseNeuron;
-        friend class C_HostedConductanceBasedNeuron;  // accesses _status from do_spikelogging_or_whatever
-        friend class COscillatorDotPoisson;  // same
-        friend class COscillatorPoisson;  // same
-        friend class CModel;  // checks CN_KL_IDLE in include_unit
-
-    public:
-        SSpikeloggerService (C_BaseNeuron *client,
-                             int s_mask = 0)
-              : _client (client),
-                t_last_spike_start (-INFINITY), t_last_spike_end (-INFINITY),
-                sample_period (42), sigma (42), start_delay (0.),
-                _status (s_mask & ~CN_KL_COMPUTESDF)
-                {}
-        SSpikeloggerService (C_BaseNeuron *client,
-                             double insample_period, double insigma, double instart_delay = 0.,
-                             int s_mask = 0)
-              : _client (client),
-                t_last_spike_start (-INFINITY), t_last_spike_end (-INFINITY),
-                sample_period (insample_period), sigma (insigma), start_delay (instart_delay),
-                _status (s_mask | CN_KL_COMPUTESDF)
-                {}
-
-        C_BaseNeuron *_client;
-
-        double  t_last_spike_start,
-                t_last_spike_end;
-
-        double  sample_period,
-                sigma,
-                start_delay;
-
-//        void spike_detect();  // multiplexing units will have a different version
-        // replaced by do_spikelogging_or_whatever on the client side
-
-        vector<double> spike_history;
-
-        void reset()
-                {
-                        _status &= ~CN_KL_ISSPIKINGNOW;
-                        t_last_spike_start = t_last_spike_end
-                                /*= t_firing_started = t_firing_ended */ = -INFINITY;
-                        spike_history.clear();
-                }
-
-        size_t n_spikes_since( double since = 0.) const  __attribute__ ((pure));
-
-      // spike density function
-        double sdf( double at, double sample_length, double sigma,
-                    size_t* nspikes = nullptr) const;
-      // spike homogeneity function
-        double shf( double at, double sample_length) const;
-
-      // why not allow custom sampling?
-        size_t get_sxf_vector_custom( vector<double> *sdf_buf, vector<double> *shf_buf, vector<size_t> *nsp_buf,
-                               double sample_period_custom, double sigma_custom,
-                               double from = 0., double to = 0.) const; // "to == 0." for model_time()
-        size_t get_sxf_vector( vector<double> *sdf_buf, vector<double> *shf_buf, vector<size_t> *nsp_buf,
-                               double from = 0., double to = 0.) const
-                {
-                        return get_sxf_vector_custom( sdf_buf, shf_buf, nsp_buf,
-                                                      sample_period, sigma,
-                                                      from, to);
-                }
-
-    protected:
-        void sync_history() const;
-
-    private:
-        int _status;
-};
-
-
-
-
-inline void
-C_BaseNeuron::reset_state()
-{
-        C_BaseUnit::reset_state();
-        if ( _spikelogger_agent )
-                _spikelogger_agent->reset();
-}
-
-
-
-inline void
-C_BaseNeuron::sync_spikelogging_history() const
-{
-        if ( _spikelogger_agent )
-                _spikelogger_agent->sync_history();
-}
-
-
-
-inline double
-C_BaseSynapse::g_on_target( C_BaseNeuron &neuron) const
-{
-        return neuron._dendrites.at(
-                const_cast<C_BaseSynapse*>(this));
-}
-inline void
-C_BaseSynapse::set_g_on_target( C_BaseNeuron &neuron, double g)
-{
-        neuron._dendrites[this] = g;
-}
-
-
-}
-
-#endif
-
-// Local Variables:
-// Mode: c++
-// indent-tabs-mode: nil
-// tab-width: 8
-// c-basic-offset: 8
-// End:
diff --git a/upstream/src/libcnrun/base-synapse.hh b/upstream/src/libcnrun/base-synapse.hh
deleted file mode 100644 (file)
index 864148e..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- *       File name:  libcn/base-synapse.hh
- *         Project:  cnrun
- *          Author:  Andrei Zavada <johnhommer@gmail.com>
- *                   building on original work by Thomas Nowotny <tnowotny@ucsd.edu>
- * Initial version:  2009-03-31
- *
- *         Purpose:  synapse base class
- *
- *         License:  GPL-2+
- */
-
-#ifndef CNRUN_LIBCN_BASESYNAPSE_H_
-#define CNRUN_LIBCN_BASESYNAPSE_H_
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-#include <cmath>
-#include <vector>
-#include <list>
-#include <map>
-
-#include "libstilton/lang.hh"
-#include "libstilton/containers.hh"
-#include "forward-decls.hh"
-#include "base-unit.hh"
-
-
-using namespace std;
-
-namespace cnrun {
-
-class C_BaseSynapse
-  : public C_BaseUnit {
-
-        DELETE_DEFAULT_METHODS (C_BaseSynapse)
-
-        friend class CModel;
-        friend class C_BaseNeuron;
-
-    protected:
-        C_BaseSynapse( TUnitType intype,
-                       C_BaseNeuron *insource, C_BaseNeuron *intarget,
-                       double ing, CModel *inM, int s_mask = 0);
-        virtual ~C_BaseSynapse();
-
-    public:
-        bool has_target( const C_BaseNeuron& tgt) const __attribute__ ((pure))
-                {
-                        return cnrun::alg::member(
-                                const_cast<C_BaseNeuron*>(&tgt), _targets);
-                }
-        C_BaseNeuron* source()  {  return _source;  }
-
-        double g_on_target( C_BaseNeuron&) const;
-        void set_g_on_target( C_BaseNeuron&, double);
-
-        C_BaseSynapse *clone_to_target( C_BaseNeuron *nt, double g);
-        C_BaseSynapse *make_clone_independent( C_BaseNeuron *target);
-
-        void reset_state()
-                {
-                        C_BaseUnit::reset_state();
-                        t_last_release_started = -INFINITY;
-                }
-
-        virtual double Isyn( const C_BaseNeuron &with_neuron, double g) const = 0;
-        virtual double Isyn( vector<double> &base, const C_BaseNeuron &with_neuron, double g) const = 0;
-        // no gsyn known to the synapse: now C_BaseNeuron::SCleft knows it
-
-        void dump( bool with_params = false, FILE *strm = stdout) const;
-
-    protected:
-        C_BaseNeuron
-               *_source;
-        list<C_BaseNeuron*>
-                _targets;
-
-        double t_last_release_started;
-
-    private:
-        virtual void update_queue()
-                {}
-};
-
-}
-
-#endif
-
-// Local Variables:
-// Mode: c++
-// indent-tabs-mode: nil
-// tab-width: 8
-// c-basic-offset: 8
-// End:
diff --git a/upstream/src/libcnrun/base-unit.cc b/upstream/src/libcnrun/base-unit.cc
deleted file mode 100644 (file)
index 9984dfa..0000000
+++ /dev/null
@@ -1,666 +0,0 @@
-/*
- *       File name:  libcn/base-unit.cc
- *         Project:  cnrun
- *          Author:  Andrei Zavada <johnhommer@gmail.com>
- *                   building on original work by Thomas Nowotny <tnowotny@ucsd.edu>
- * Initial version:  2008-08-02
- *
- *         Purpose:  unit base class
- *
- *         License:  GPL-2+
- */
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-#include <fcntl.h>
-#include <unistd.h>
-#include <iostream>
-#include <limits>
-#include <functional>
-
-#include <gsl/gsl_statistics_double.h>
-
-#include "libstilton/containers.hh"
-#include "base-unit.hh"
-#include "model.hh"
-
-
-using namespace std;
-using namespace cnrun;
-using cnrun::alg::member;
-
-unsigned short cnrun::global::precision = 4;
-int cnrun::global::verbosely = 1;
-
-cnrun::C_BaseUnit::
-C_BaseUnit (TUnitType type_, const string& label_,
-            CModel* M_, int s_mask)
-      : precision (global::precision),
-        _type (type_), _status (0 |/* CN_UENABLED |*/ s_mask),
-        M (M_),
-        _binwrite_handle (-1), _listener_disk (nullptr), _listener_mem (nullptr)
-{
-        memset( _label, 0, max_label_size);
-        if ( label_.size() )
-                strncpy( _label, label_.c_str(), max_label_size);
-        else
-                snprintf( _label, max_label_size-1, "fafa%p", this);
-
-        if ( M_ && M_->unit_by_label( label_) ) {
-                fprintf( stderr, "Model %s already has a unit labelled \"%s\"\n", M_->name.c_str(), label_.c_str());
-                throw "Duplicate unit label";
-        }
-
-        reset_params();
-        // don't have field idx to do reset_vars() safely
-}
-
-
-
-void
-cnrun::C_BaseUnit::
-reset_state()
-{
-        if ( M )
-                M->vp( 3, stderr, "Resetting \"%s\"\n", _label);
-        reset_vars();
-        if ( is_listening() )
-                restart_listening();
-}
-
-
-int
-cnrun::C_BaseUnit::
-param_idx_by_sym( const string& sym) const
-{
-        for ( size_t i = 0; i < p_no(); ++i )
-                if ( sym == __CNUDT[_type].stock_param_syms[i] )
-                        return i;
-        return -1;
-}
-
-int
-cnrun::C_BaseUnit::
-var_idx_by_sym( const string& sym) const
-{
-        for ( size_t i = 0; i < v_no(); ++i )
-                if ( sym == __CNUDT[_type].stock_var_syms[i] )
-                        return i;
-        return -1;
-}
-
-
-
-
-
-
-
-
-void
-cnrun::C_BaseUnit::
-start_listening( int mask)
-{
-        if ( !M ) {
-                fprintf( stderr, "start_listening() called for an unattached unit \"%s\"\n", _label);
-                return;
-        }
-        if ( _listener_disk || _listener_mem || _binwrite_handle != -1 ) { // listening already; check if user wants us to listen differently
-                if ( (_status | (mask & (CN_ULISTENING_DISK | CN_ULISTENING_MEM | CN_ULISTENING_BINARY | CN_ULISTENING_1VARONLY | CN_ULISTENING_DEFERWRITE)))
-                     != mask ) {
-                        stop_listening();  // this will nullptrify _listener_{mem,disk}, avoiding recursion
-                        start_listening( mask);
-                        M->vp( 4, stderr, "Unit \"%s\" was already listening\n", _label);
-                        return;
-                }
-        }
-
-      // deferred write implies a mem listener
-        if ( mask & CN_ULISTENING_DEFERWRITE && !(mask & CN_ULISTENING_MEM) )
-                mask |= CN_ULISTENING_MEM;
-
-        if ( mask & CN_ULISTENING_MEM )
-                _listener_mem = new vector<double>;
-
-        if ( mask & CN_ULISTENING_DISK ) {
-                if ( M->is_diskless )
-                        M->vp( 1, stderr, "Cannot get Unit \"%s\" to listen to disk in a diskless model\n", _label);
-                else {
-                        _listener_disk = new ofstream( (string(_label)+".var").c_str(), ios_base::trunc);
-                        _listener_disk->precision( precision);
-
-                        *_listener_disk << "# " << _label << " variables\n#<time>";
-                        if ( mask & CN_ULISTENING_1VARONLY )
-                                *_listener_disk << "\t<" << var_sym(0) << ">";
-                        else
-                                for ( size_t v = 0; v < v_no(); ++v )
-                                        *_listener_disk << "\t<" << var_sym(v) << ">";
-                        *_listener_disk << endl;
-                        M->vp( 4, stderr, "Unit \"%s\" now listening\n", _label);
-                }
-        }
-
-        if ( mask & CN_ULISTENING_BINARY )
-                _binwrite_handle = open( (string(_label)+".varx").c_str(), O_WRONLY|O_CREAT|O_TRUNC, S_IRUSR | S_IWUSR);
-
-        _status |= (mask & (CN_ULISTENING_DISK | CN_ULISTENING_MEM | CN_ULISTENING_BINARY |
-                            CN_ULISTENING_1VARONLY | CN_ULISTENING_DEFERWRITE));
-
-      // inform the model
-        M->register_listener( this);
-}
-
-
-void
-cnrun::C_BaseUnit::
-stop_listening()
-{
-      // do deferred write
-        if ( _status & CN_ULISTENING_DEFERWRITE && _listener_mem ) {
-                if ( _listener_disk ) {
-                        for ( auto mI = _listener_mem->begin(); mI != _listener_mem->end(); ) {
-                                *_listener_disk << *(mI++);
-                                if ( _status & CN_ULISTENING_1VARONLY )
-                                        *_listener_disk << "\t" << *(mI++);
-                                else
-                                        for ( size_t v = 0; v < v_no(); ++v )
-                                                *_listener_disk << "\t" << *(mI++);
-                                *_listener_disk << endl;
-                        }
-                }
-                if ( _binwrite_handle != -1 )
-                        if ( write( _binwrite_handle, _listener_mem->data(),
-                                    sizeof(double) * _listener_mem->size()) < 1 )
-                                M->vp( 0, stderr, "write() failed on \"%s.varx\"\n", _label);
-        }
-
-        if ( _listener_mem ) {
-                delete _listener_mem;
-                _listener_mem = nullptr;
-        }
-
-        if ( _listener_disk ) {
-                _listener_disk->close();
-                delete _listener_disk;
-                _listener_disk = nullptr;
-        }
-
-        if ( _binwrite_handle != -1 ) {
-                close( _binwrite_handle);
-                _binwrite_handle = -1;
-        }
-
-        _status &= ~(CN_ULISTENING_MEM | CN_ULISTENING_DISK | CN_ULISTENING_BINARY);
-
-        if ( M ) {
-                M->unregister_listener( this);
-                M->vp( 4, stderr, "Unit \"%s\" not listening now\n", _label);
-        }
-
-}
-
-
-
-
-void
-cnrun::C_BaseUnit::
-tell()
-{
-        if ( _binwrite_handle != -1 && !(_status & CN_ULISTENING_DEFERWRITE) ) {
-                if ( write( _binwrite_handle, &M->V[0], sizeof(double)) < 1 ||
-                     write( _binwrite_handle, &var_value(0),
-                            sizeof(double) * ((_status & CN_ULISTENING_1VARONLY) ? 1 : v_no())) < 1 )
-                        M->vp( 0, stderr, "write() failed in tell() for \"%s\"\n", _label);
-        }
-
-        if ( _listener_disk && !(_status & CN_ULISTENING_DEFERWRITE) ) {
-                *_listener_disk << model_time();
-                if ( _status & CN_ULISTENING_1VARONLY )
-                        *_listener_disk << "\t" << var_value(0);
-                else
-                        for ( size_t v = 0; v < v_no(); ++v )
-                                *_listener_disk << "\t" << var_value(v);
-                *_listener_disk << endl;
-        }
-
-        if ( _listener_mem ) {
-//                _listener_mem->push_back( 999);
-                _listener_mem->push_back( model_time());
-                if ( _status & CN_ULISTENING_1VARONLY )
-                        _listener_mem->push_back( var_value(0));
-                else
-                        for ( size_t v = 0; v < v_no(); ++v )
-                                _listener_mem->push_back( var_value(v));
-        }
-}
-
-
-
-
-
-
-void
-cnrun::C_BaseUnit::
-dump( bool with_params, FILE *strm) const
-{
-        fprintf( strm, "[%lu] (%s) \"%s\"\n", _serial_id, species(), _label);
-
-        if ( with_params ) {
-                fprintf( strm, "    Pp: ");
-                for ( size_t p = 0; p < p_no(); ++p )
-                        if ( *param_sym(p) != '.' || M->options.verbosely > 5 )
-                                fprintf( strm, "%s = %g; ", param_sym(p), get_param_value(p));
-                fprintf( strm, "\n");
-        }
-        fprintf( strm, "    Vv: ");
-        for ( size_t v = 0; v < v_no(); ++v )
-                if ( *var_sym(v) != '.' || M->options.verbosely > 5 )
-                        fprintf( strm, "%s = %g; ", var_sym(v), get_var_value(v));
-        fprintf( strm, "\n");
-
-        if ( _sources.size() ) {
-                fprintf( strm, "   has sources:  ");
-                for ( auto &S : _sources )
-                        fprintf( strm, "%s << %s;  ",
-                                 (S.sink_type == SINK_PARAM) ? param_sym(S.idx) : var_sym(S.idx),
-                                 S.source->name());
-                fprintf( strm, "\n");
-        }
-
-        if ( is_listening() ) {
-                fprintf( strm, "   listening to %s%s%s\n",
-                         _listener_mem ? "mem" : "",
-                         _listener_mem && _listener_disk ? ", " : "",
-                         _listener_disk ? "disk" : "");
-        }
-}
-
-
-
-
-
-
-// source interface
-
-void
-cnrun::C_BaseUnit::
-detach_source( C_BaseSource *s, TSinkType sink_type, size_t idx)
-{
-        // list <SSourceInterface<C_BaseSource>>::iterator K;
-        // while ( (K = find( _sources.begin(), _sources.end(),
-        //                    )) != _sources.end() )
-        //         _sources.erase( K);
-        _sources.remove( SSourceInterface<C_BaseSource> (s, sink_type, idx));
-        M->unregister_unit_with_sources( this);
-}
-
-
-void
-cnrun::C_BaseUnit::
-apprise_from_sources()
-{
-        for ( auto &S : _sources )
-                switch ( S.sink_type ) {
-                case SINK_PARAM:
-//                        printf( "apprise_from_sources() for %s{%d} = %g\n", _label, S->idx, (*S->source)( model_time()));
-                        param_value( S.idx) = (*S.source)( model_time());
-                        param_changed_hook();
-                    break;
-                case SINK_VAR:
-                        var_value( S.idx) = (*S.source)( model_time());
-                    break;
-                }
-}
-
-
-cnrun::C_BaseUnit::
-~C_BaseUnit()
-{
-        if ( M )
-                M->vp( 5, "   deleting base unit \"%s\"\n", _label);
-
-        if ( is_listening() ) {
-                stop_listening();
-                if ( M && M->model_time() == 0. )
-                      // nothing has been written yet, delete the files on disk
-                        unlink( (string(_label) + ".var").c_str());
-        }
-        if ( M )
-                M->exclude_unit( this, CModel::TExcludeOption::no_delete);
-}
-
-
-
-
-
-
-// ----- C_BaseNeuron
-
-
-bool
-cnrun::C_BaseNeuron::
-connects_to( const C_BaseNeuron &to) const
-{
-        for ( auto &A : _axonal_harbour )
-                if ( A->has_target( to) )
-                        return true;
-        return false;
-}
-
-cnrun::C_BaseSynapse*
-cnrun::C_BaseNeuron::
-connects_via( const C_BaseNeuron &to,
-              SCleft::mapped_type *g_ptr) const
-{
-        for ( auto &A : _axonal_harbour )
-                if ( A->has_target( to) ) {
-                        if ( g_ptr )
-                                *g_ptr = to._dendrites.at(A);
-                        return A;
-                }
-        if ( g_ptr )
-                *g_ptr = NAN;
-        return nullptr;
-}
-
-
-void
-cnrun::C_BaseNeuron::
-dump( bool with_params, FILE *strm) const
-{
-        C_BaseUnit::dump( with_params);
-        if ( _spikelogger_agent && !(_spikelogger_agent->_status & CN_KL_IDLE) )
-                fprintf( strm, "   logging spikes at %g:%g\n", _spikelogger_agent->sample_period, _spikelogger_agent->sigma);
-        fprintf( strm, "\n");
-
-}
-
-
-cnrun::C_BaseNeuron::
-~C_BaseNeuron()
-{
-        if ( M )
-                M->vp( 4, "  deleting base neuron \"%s\"\n", _label);
-
-      // kill all efferents
-        for ( auto Y = _axonal_harbour.rbegin(); Y != _axonal_harbour.rend(); ++Y ) {
-                (*Y) -> _source = nullptr;
-                delete (*Y);
-        }
-      // unlink ourselves from all afferents
-        for ( auto Y = _dendrites.rbegin(); Y != _dendrites.rend(); ++Y )
-                Y->first->_targets.remove( this);
-
-        if ( _spikelogger_agent ) {
-                if ( M && !(_spikelogger_agent->_status & CN_KL_IDLE) )
-                        M->unregister_spikelogger( this);
-                delete _spikelogger_agent;
-                _spikelogger_agent = nullptr;
-        }
-}
-
-
-
-
-// --- SSpikeloggerService
-
-double
-cnrun::SSpikeloggerService::
-sdf( double at, double sample_width, double sigma, size_t *nspikes) const
-{
-        if ( nspikes )
-                *nspikes = 0;
-
-        double  dt,
-                result = 0.;
-        for ( auto &T : spike_history ) {
-                dt = T - at;
-                if ( dt < -sample_width/2. )
-                        continue;
-                if ( dt >  sample_width/2. )
-                        break;
-                if ( nspikes )
-                        ++(*nspikes);
-                result += exp( -dt*dt/(sigma * sigma));
-        }
-        return result;
-}
-
-
-double
-cnrun::SSpikeloggerService::
-shf( double at, double sample_width) const
-{
-        double  dt,
-                last_spike_at;
-        vector<double>
-                intervals;
-        bool    counted_one = false;
-        for ( auto &T : spike_history ) {
-                dt = T - at;
-                if ( dt < -sample_width/2. )
-                        continue;
-                if ( dt >  sample_width/2. )
-                        break;
-
-                if ( counted_one )
-                        intervals.push_back( last_spike_at - T);
-                else
-                        counted_one = true;
-
-                last_spike_at = T;
-        }
-
-        return (intervals.size() < 3)
-                ? 0
-                : gsl_stats_sd( intervals.data(), 1, intervals.size());
-}
-
-
-size_t
-cnrun::SSpikeloggerService::
-get_sxf_vector_custom( vector<double> *sdf_buffer, vector<double> *shf_buffer,
-                       vector<size_t> *nspikes_buffer,
-                       double sample_period_custom, double sigma_custom,
-                       double from, double to) const
-{
-        if ( to == 0. )
-                to = _client->M->model_time();
-
-        if ( sdf_buffer )
-                sdf_buffer->clear();
-        if ( shf_buffer )
-                shf_buffer->clear();
-        if ( nspikes_buffer )
-                nspikes_buffer->clear();
-
-        for ( double t = from; t <= to; t += sample_period_custom ) {
-                size_t  nspikes = 0;
-                double  sdf_value = sdf(
-                        t, sample_period_custom,
-                        sigma_custom, &nspikes);
-                if ( sdf_buffer )
-                        sdf_buffer->push_back( sdf_value);
-                if ( shf_buffer )
-                        shf_buffer->push_back( shf( t, sample_period_custom));
-                if ( nspikes_buffer )
-                        nspikes_buffer->push_back( nspikes);
-        }
-
-        return (to - from) / sample_period_custom;
-}
-
-
-void
-cnrun::SSpikeloggerService::
-sync_history() const
-{
-        if ( !_client->M || (_client->M && _client->M->is_diskless) )
-                return;
-
-        ofstream spikecnt_strm( (string(_client->_label) + ".spikes").c_str());
-        spikecnt_strm.precision( _client->precision);
-        spikecnt_strm << "#spike time\n";
-
-        for ( auto &V : spike_history )
-                spikecnt_strm << V << endl;
-
-        if ( _status & CN_KL_COMPUTESDF ) {
-                ofstream sdf_strm( (string(_client->_label) + ".sxf").c_str());
-                sdf_strm.precision( _client->precision);
-                sdf_strm << "#<time>\t<sdf>\t<shf>\t<nspikes>\n";
-
-                vector<double> sdf_vector, shf_vector;
-                vector<size_t> nspikes_vector;
-                get_sxf_vector( &sdf_vector, &shf_vector, &nspikes_vector,
-                                start_delay, 0);
-
-                double t = start_delay;
-                for ( size_t i = 0; i < sdf_vector.size(); ++i, t += sample_period )
-                        sdf_strm << t << "\t"
-                                 << sdf_vector[i] << "\t"
-                                 << shf_vector[i] << "\t"
-                                 << nspikes_vector[i] << endl;
-        }
-}
-
-
-size_t
-cnrun::SSpikeloggerService::
-n_spikes_since( double since) const
-{
-        size_t i = 0;
-        for ( auto &K : spike_history )
-                if ( K > since )
-                        return spike_history.size() - i++;
-        return 0;
-}
-
-
-
-// ----- CSynapse
-
-cnrun::C_BaseSynapse::
-C_BaseSynapse( TUnitType intype,
-               C_BaseNeuron *insource, C_BaseNeuron *intarget,
-               double ing, CModel *inM, int s_mask)
-      : C_BaseUnit (intype, "overwrite-me", inM, s_mask),
-        _source (insource),
-        t_last_release_started (-INFINITY)
-{
-        if ( M )
-                M->vp( 5, "Creating a \"%s\" base synapse\n", species());
-        _targets.push_back( intarget);
-        intarget->_dendrites[this] = ing;
-        _source->_axonal_harbour.push_back( this);
-        snprintf( _label, max_label_size-1, "%s:1", _source->_label);
-}
-
-
-
-
-
-
-cnrun::C_BaseSynapse*
-cnrun::C_BaseSynapse::
-clone_to_target( C_BaseNeuron *tgt, double g)
-{
-      // check if we have no existing connection already to tgt
-        if ( member( tgt, _targets) ) {
-                M->vp( 1, stderr, "Neuron \"%s\" already synapsing onto \"%s\"\n",
-                       _source->_label, tgt->_label);
-                return nullptr;
-        }
-
-        tgt -> _dendrites[this] = g;
-        _targets.push_back( tgt);
-
-        snprintf( _label, max_label_size-1, "%s:%zu", _source->_label, _targets.size());
-
-        return this;
-}
-
-
-
-
-cnrun::C_BaseSynapse*
-cnrun::C_BaseSynapse::
-make_clone_independent( C_BaseNeuron *tgt)
-{
-        double g = g_on_target( *tgt);
-        if ( !isfinite(g) || !M )
-                return nullptr;
-
-        if ( M )
-                M->vp( 4, "promoting a clone of %s synapse from \"%s\" to \"%s\"\n",
-                       species(), _label, tgt->_label);
-        // if ( unlikely (member( tgt, _targets)) )
-        //         fprintf( stderr, "ебать!\n");
-        _targets.remove( tgt);
-
-        // if ( unlikely (member( this, tgt->_dendrites)) )
-        //         fprintf( stderr, "ебать-колотить!\n");
-        tgt -> _dendrites.erase( this);
-
-        snprintf( _label, max_label_size-1, "%s:%zu", _source->_label, _targets.size());
-
-        C_BaseSynapse* ret = M -> add_synapse_species(
-                _type, _source, tgt, g,
-                CModel::TSynapseCloningOption::no /* prevents re-creation of a clone we have just excised */,
-                TIncludeOption::is_last);
-        // the newly added synapse has stock paramaters yet: copy ours
-        if ( ret ) {
-                ret->P = P;
-                // also see to vars
-                for ( size_t i = 0; i < v_no(); ++i )
-                        ret->var_value(i) = get_var_value(i);
-                return ret;
-        }
-        return nullptr;
-}
-
-
-
-
-
-
-void
-cnrun::C_BaseSynapse::
-dump( bool with_params, FILE *strm) const
-{
-        C_BaseUnit::dump( with_params);
-        fprintf( strm, "  gsyn on targets (%zu):  ", _targets.size());
-        for ( auto &T : _targets )
-                fprintf( strm, "%s: %g;  ", T->_label, g_on_target( *T));
-        fprintf( strm, "\n\n");
-}
-
-
-
-
-
-cnrun::C_BaseSynapse::
-~C_BaseSynapse()
-{
-        if ( M )
-                M->vp( 4, "  deleting base synapse \"%s\"\n", _label);
-
-        for ( auto &T : _targets )
-                if ( T )
-                        T->_dendrites.erase( this);
-
-        if ( _source ) {
-                _source->_axonal_harbour.remove( this);
-                if ( M )
-                        M->vp( 5, "    removing ourselves from \"%s\" axonals (%zu still there)\n",
-                               _source->_label, _source->_axonal_harbour.size());
-        }
-}
-
-// Local Variables:
-// Mode: c++
-// indent-tabs-mode: nil
-// tab-width: 8
-// c-basic-offset: 8
-// End:
diff --git a/upstream/src/libcnrun/base-unit.hh b/upstream/src/libcnrun/base-unit.hh
deleted file mode 100644 (file)
index 21bfbd4..0000000
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- *       File name:  libcn/base-unit.hh
- *         Project:  cnrun
- *          Author:  Andrei Zavada <johnhommer@gmail.com>
- *                   building on original work by Thomas Nowotny <tnowotny@ucsd.edu>
- * Initial version:  2008-08-02
- *
- *         Purpose:  unit base class
- *
- *         License:  GPL-2+
- */
-
-#ifndef CNRUN_LIBCN_BASEUNIT_H_
-#define CNRUN_LIBCN_BASEUNIT_H_
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-#include <fstream>
-#include <cstring>
-#include <vector>
-#include <list>
-
-#include "libstilton/lang.hh"
-#include "libstilton/string.hh"
-#include "forward-decls.hh"
-#include "types.hh"
-#include "sources.hh"
-
-
-using namespace std;
-using cnrun::stilton::str::sasprintf;
-
-namespace cnrun {
-
-namespace global {
-extern unsigned short precision;
-extern int verbosely;
-}
-
-// for all units
-#define CN_UERROR                        (1 << 0)
-#define CN_UOWNED                        (1 << 1)
-#define CN_UHASPARAMRANGE                (1 << 2)
-#define CN_ULISTENING_MEM                (1 << 3)
-#define CN_ULISTENING_DISK               (1 << 4)
-#define CN_ULISTENING_1VARONLY           (1 << 5)
-#define CN_ULISTENING_DEFERWRITE         (1 << 6)
-#define CN_ULISTENING_BINARY             (1 << 7)
-//#define CN_NDYNPARAMS                        (1 << 8)
-
-// only for neurons
-#define CN_NFIRING                       (1 <<  9)  // firing now
-#define CN_NREFRACT                      (1 << 10)  // in refractory phase now
-
-
-// the base unit provides the methods for the following:
-// * classification;
-// * access to parameters, tape reader and range interface;
-// * attachment to the mother model;
-// * listening, i.e., keeping a history of vars along a timeline;
-class C_BaseUnit {
-
-        DELETE_DEFAULT_METHODS (C_BaseUnit)
-
-        friend class CModel;
-        friend class SSpikeloggerService;
-
-    public:
-        static const constexpr size_t max_label_size = 40;
-
-    protected:
-        C_BaseUnit (TUnitType, const string& label,
-                    CModel*, int s_mask);
-    public:
-        virtual ~C_BaseUnit();  // surely virtual
-
-      // written variables precision
-        unsigned short precision;
-
-        int     status() const  {  return _status; }
-        TUnitType type() const  {  return _type;   }
-
-      // classification
-        int  traits()        const {  return __CNUDT[_type].traits;                  }
-        bool is_hostable()   const {  return __CNUDT[_type].traits & UT_HOSTED;      }
-        bool is_ddtbound()   const {  return __CNUDT[_type].traits & UT_DDTSET;      }
-        bool is_neuron()     const {  return _type >= NT_FIRST && _type <= NT_LAST;  }
-        bool is_synapse()    const {  return _type >= YT_FIRST && _type <= YT_LAST;  }
-        bool is_oscillator() const {  return __CNUDT[_type].traits & UT_OSCILLATOR;  }
-        bool is_conscious()  const {  return is_oscillator();                        }
-
-        unsigned long serial() const
-                {  return _serial_id;  }
-        const char *label() const  // for synapses, it is "%s:%d", src->label, targets.size()
-                {  return _label;  }
-        void set_label( const string& new_label)
-                {  strncpy( _label, new_label.c_str(), max_label_size-1); }
-
-        const char *class_name() const
-                {  return is_neuron() ? "Neuron" : "Synapse";  }
-        const char *species() const
-                {  return __CNUDT[_type].species;              }
-        const char *family() const
-                {  return __CNUDT[_type].family;               }
-        const char *type_description() const
-                {  return __CNUDT[_type].description;          }
-
-      // parent model
-        const CModel&
-        parent_model() const        { return *M; }
-        double
-        model_time() const;  // defined in model.h
-
-        bool is_owned() const       { return _status & CN_UOWNED; }
-
-      // parameter & variable names and symbols
-        const char *const param_name( size_t i)       const { return __CNUDT[_type].stock_param_names[i]; }
-        const char *const param_sym( size_t i)        const { return __CNUDT[_type].stock_param_syms[i];  }
-        int param_idx_by_sym( const string&) const __attribute__ ((pure));
-
-        const char *const var_name( size_t i)         const { return __CNUDT[_type].stock_var_names[i];   }
-        const char *const var_sym( size_t i)          const { return __CNUDT[_type].stock_var_syms[i];    }
-        int var_idx_by_sym( const string&) const __attribute__ ((pure));
-
-        unsigned short v_no() const        { return __CNUDT[_type].vno; }
-        unsigned short p_no() const        { return __CNUDT[_type].pno; }
-
-      // purity checks
-        bool is_not_altered() const
-                {
-                        return (memcmp( P.data(), __CNUDT[_type].stock_param_values,
-                                       sizeof (double) * p_no()) == 0) &&
-                                !has_sources();
-                }
-        bool has_same_params( const C_BaseUnit &rv) const
-                {
-                        return _type == rv._type &&
-                                memcmp( P.data(), rv.P.data(), sizeof (double) * p_no()) == 0;
-                }
-        bool has_sources() const __attribute__ ((pure))
-                {
-                        return not _sources.empty();
-                }
-        bool has_same_sources( const C_BaseUnit &rv) const __attribute__ ((pure))
-                {
-                        return _sources == rv._sources;
-                        // not sure taking the order of otherwise identical sources should matter
-                }
-        bool is_identical( const C_BaseUnit &rv) const __attribute__ ((pure))
-                {
-                        return _type == rv._type && has_same_params(rv) &&
-                                ((has_sources() && has_same_sources(rv)) ||
-                                 (!has_sources() && !rv.has_sources()));
-                }
-
-      // parameters
-        double
-        get_param_value( size_t p) const
-                {  return P[p];  }
-
-        double
-        get_param_value( const string& sym) const
-                {
-                        int id = param_idx_by_sym( sym);
-                        if ( unlikely (id == -1) )
-                                throw sasprintf( "Bad parameter name \"%s\" for unit \"%s\"", sym.c_str(), _label);
-                        return P[id];
-                }
-
-        double&
-        param_value( size_t p)
-                {
-                        return P[p];
-                }
-
-        double&
-        param_value( const string& sym)
-                {
-                        int id = param_idx_by_sym( sym);
-                        if ( unlikely (id == -1) )
-                                throw sasprintf( "Bad parameter name \"%s\" for unit \"%s\"",
-                                                 sym.c_str(), _label);
-                        return P[id];
-                }
-
-        void
-        reset_params()
-                {
-                        P.resize( p_no());
-                        memcpy( P.data(), __CNUDT[_type].stock_param_values,
-                                sizeof(double) * p_no());
-                        param_changed_hook();
-                }
-
-      // variables: differs per hosted or standalone
-        virtual double &var_value( size_t) = 0;
-        virtual const double &get_var_value( size_t) const = 0;
-        virtual void reset_vars() = 0;
-        virtual void reset_state();
-
-        virtual void dump( bool with_params = false, FILE *strm = stdout) const;
-
-      // state history
-        bool is_listening() const
-                {
-                        return _status & (CN_ULISTENING_DISK | CN_ULISTENING_MEM);
-                }
-        void start_listening( int mask = 0 | CN_ULISTENING_DISK);
-        void stop_listening();
-        void restart_listening()
-                {
-                        int lbits = _status & (CN_ULISTENING_DISK | CN_ULISTENING_MEM
-                                               | CN_ULISTENING_1VARONLY | CN_ULISTENING_DEFERWRITE);
-                        stop_listening();
-                        start_listening( lbits);
-                }
-        void pause_listening();
-        void resume_listening();
-
-        void tell();
-
-        const vector<double>*
-        listener_mem() const
-                { return _listener_mem; }
-
-      // source interface
-        enum TSinkType { SINK_PARAM, SINK_VAR };
-
-        template <class T>
-        struct SSourceInterface {
-            friend class C_BaseUnit;
-            friend class CModel;
-            private:
-                C_BaseSource *source;
-                TSinkType sink_type;
-                unsigned short idx;
-
-                SSourceInterface (T *insource, TSinkType insink_type, unsigned short inidx)
-                      : source (insource), sink_type (insink_type), idx (inidx)
-                        {}
-            public:
-                bool operator== ( const SSourceInterface &rv) const
-                        {
-                                return  source    == rv.source &&
-                                        sink_type == rv.sink_type &&
-                                        idx       == rv.idx;
-                        }
-        };
-        template <class T>
-        void attach_source( T *s, TSinkType t, unsigned short idx);
-        void detach_source( C_BaseSource*, TSinkType, size_t idx);
-
-        void apprise_from_sources();
-        virtual void param_changed_hook()
-                {}
-
-    protected:
-        TUnitType
-                _type;  // will look up p, pno and vno from __CNUDT using _type as index
-        int     _status;
-
-        unsigned long
-                _serial_id;  // assigned incrementally as read by import_NetworkML
-        char    _label[max_label_size];
-
-        CModel  *M;
-
-      // private copy of params
-        vector<double> P;
-
-        list<SSourceInterface<C_BaseSource>>
-                _sources;
-
-    private:
-      // where vars are written by tell()
-        int _binwrite_handle;
-        ofstream *_listener_disk;
-      // ... and/or stored, in a diskless model
-        vector<double> *_listener_mem;
-};
-
-}
-
-#endif
-
-// Local Variables:
-// Mode: c++
-// indent-tabs-mode: nil
-// tab-width: 8
-// c-basic-offset: 8
-// End:
diff --git a/upstream/src/libcnrun/forward-decls.hh b/upstream/src/libcnrun/forward-decls.hh
deleted file mode 100644 (file)
index 07ae859..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- *       File name:  libcn/forward-decls.hh
- *         Project:  cnrun
- *          Author:  Andrei Zavada <johnhommer@gmail.com>
- * Initial version:  2014-09-16
- *
- *         Purpose:  forward declarations
- *
- *         License:  GPL-2+
- */
-
-#ifndef CNRUN_LIBCN_FORWARDDECLS_H_
-#define CNRUN_LIBCN_FORWARDDECLS_H_
-
-namespace cnrun {
-
-class C_BaseUnit;
-class C_BaseNeuron;
-class C_BaseSynapse;
-class C_HostedNeuron;
-class C_HostedSynapse;
-class C_StandaloneNeuron;
-class C_StandaloneSynapse;
-
-class C_HostedConductanceBasedNeuron;
-class C_HostedRateBasedNeuron;
-
-class CNeuronMap;
-class CSynapseMap;
-
-class CModel;
-
-}
-
-#endif
-
-// Local Variables:
-// Mode: c++
-// indent-tabs-mode: nil
-// tab-width: 8
-// c-basic-offset: 8
-// End:
diff --git a/upstream/src/libcnrun/hosted-attr.hh b/upstream/src/libcnrun/hosted-attr.hh
deleted file mode 100644 (file)
index 84cc6f5..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *       File name:  libcn/hosted-attr.hh
- *         Project:  cnrun
- *          Author:  Andrei Zavada <johnhommer@gmail.com>
- *                   building on original work by Thomas Nowotny <tnowotny@ucsd.edu>
- * Initial version:  2009-03-31
- *
- *         Purpose:  Interface class containing hosted unit attributes.
- *
- *         License:  GPL-2+
- */
-
-#ifndef CNRUN_LIBCN_HOSTEDATTR_H_
-#define CNRUN_LIBCN_HOSTEDATTR_H_
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-#include "libstilton/lang.hh"
-#include <vector>
-
-
-using namespace std;
-
-namespace cnrun {
-
-class C_HostedAttributes {
-
-        friend class CIntegrateRK65;
-        friend class CModel;
-
-    protected:
-      // variables for units in the model are catenated on a single
-      // vector<double>, as an essential optimization measure; each
-      // unit knows its own set of variables by this idx:
-        size_t idx;
-      // the containing model provides idx on registering our unit
-
-    public:
-        virtual void reset_vars() = 0;
-        virtual double &var_value( size_t) = 0;
-
-        virtual void derivative( vector<double>&, vector<double>&) = 0;
-};
-
-}
-
-#endif
-
-// Local Variables:
-// Mode: c++
-// indent-tabs-mode: nil
-// tab-width: 8
-// c-basic-offset: 8
-// End:
diff --git a/upstream/src/libcnrun/hosted-neurons.cc b/upstream/src/libcnrun/hosted-neurons.cc
deleted file mode 100644 (file)
index acb7540..0000000
+++ /dev/null
@@ -1,766 +0,0 @@
-/*
- *       File name:  libcn/hosted-neurons.cc
- *         Project:  cnrun
- *          Author:  Andrei Zavada <johnhommer@gmail.com>
- *                   building on original work by Thomas Nowotny <tnowotny@ucsd.edu>
- * Initial version:  2008-10-16
- *
- *         Purpose:  hosted neuron classes (those having their
- *                   state vars on parent model's integration vectors)
- *
- *         License:  GPL-2+
- */
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-#include <cmath>
-#include <iostream>
-
-#include "libstilton/lang.hh"
-
-#include "types.hh"
-#include "model.hh"
-
-
-cnrun::C_HostedNeuron::
-C_HostedNeuron (TUnitType intype, const string& inlabel,
-                double inx, double iny, double inz,
-                CModel* inM, int s_mask,
-                TIncludeOption include_option)
-      : C_BaseNeuron (intype, inlabel, inx, iny, inz, inM, s_mask)
-{
-        if ( M )
-                M->include_unit( this, include_option);
-        else {
-//                _status &= ~CN_UENABLED;
-                idx = (unsigned long)-1;
-        }
-}
-
-
-
-
-
-void
-cnrun::C_HostedConductanceBasedNeuron::
-do_detect_spike_or_whatever()
-{
-        if ( unlikely (E() >= M->options.spike_threshold) ) {
-                if ( !(_spikelogger_agent->_status & CN_KL_ISSPIKINGNOW ) ) {
-                        _spikelogger_agent->spike_history.push_back(
-                                _spikelogger_agent->t_last_spike_start = model_time());
-                        _spikelogger_agent->_status |= CN_KL_ISSPIKINGNOW;
-                }
-        } else
-//                if ( model_time() - t_last_spike_end > M->spike_lapse ) {
-                if ( _spikelogger_agent->_status & CN_KL_ISSPIKINGNOW ) {
-                        _spikelogger_agent->_status &= ~CN_KL_ISSPIKINGNOW;
-                        _spikelogger_agent->t_last_spike_end = model_time();
-                }
-}
-
-
-
-
-
-
-
-
-// SPECIFIC NEURONS:
-
-// ===== HH and variations
-
-const char* const cnrun::CN_ParamNames_NeuronHH_d[] = {
-        "Na conductance, " CN_PU_CONDUCTANCE,
-        "Na equi potential, " CN_PU_POTENTIAL,
-        "K conductance, " CN_PU_CONDUCTANCE,
-        "K equi potential, " CN_PU_POTENTIAL,
-        "Leak conductance, " CN_PU_CONDUCTANCE,
-        "Leak equi potential, " CN_PU_POTENTIAL,
-        "Membrane specific capacitance, " CN_PU_CAPACITY_DENSITY,
-
-        ".alpha_m_a",        ".alpha_m_b",        ".alpha_m_c",        ".beta_m_a",        ".beta_m_b",        ".beta_m_c",
-        ".alpha_h_a",        ".alpha_h_b",        ".alpha_h_c",        ".beta_h_a",        ".beta_h_b",        ".beta_h_c",
-        ".alpha_n_a",        ".alpha_n_b",        ".alpha_n_c",        ".beta_n_a",        ".beta_n_b",        ".beta_n_c",
-
-        "Externally applied DC, " CN_PU_CURRENT,
-};
-const char* const cnrun::CN_ParamSyms_NeuronHH_d[] = {
-        "gNa",
-        "ENa",
-        "gK",
-        "EK",
-        "gl",
-        "El",
-        "Cmem",
-
-        ".alpha_m_a",        ".alpha_m_b",        ".alpha_m_c",        ".beta_m_a",        ".beta_m_b",        ".beta_m_c",
-        ".alpha_h_a",        ".alpha_h_b",        ".alpha_h_c",        ".beta_h_a",        ".beta_h_b",        ".beta_h_c",
-        ".alpha_n_a",        ".alpha_n_b",        ".alpha_n_c",        ".beta_n_a",        ".beta_n_b",        ".beta_n_c",
-
-        "Idc",
-};
-const double cnrun::CN_Params_NeuronHH_d[] = {
-        7.15,   //   gNa: Na conductance in 1/(mOhms * cm^2)
-       50.0,    //   ENa: Na equi potential in mV
-        1.430,  //   gK: K conductance in 1/(mOhms * cm^2)
-      -95.0,    //   EK: K equi potential in mV
-        0.0267, //   gl: leak conductance in 1/(mOhms * cm^2)
-      -63.563,  //   El: leak equi potential in mV
-        0.143,  //   Cmem: membr. specific capacitance, muF/cm^2
-
-        0.32,   52.,   4.,
-        0.28,   25.,   5.,
-        0.128,  48.,  18.,
-        4.0,    25.,   5.,
-        0.032,  50.,   5.,
-        0.5,    55.,  40.,
-
-          0.                // Externally applied constant current
-};
-
-
-
-
-const double cnrun::CN_Vars_NeuronHH_d[] = {
-        -66.81,         // 0 - membrane potential E
-          0.023,        // 1 - prob. for Na channel activation m
-          0.800,        // 2 - prob. for not Na channel blocking h
-          0.220,        // 3 - prob. for K channel activation n
-};
-
-const char* const cnrun::CN_VarNames_NeuronHH_d[] = {
-        "Membrane potential, " CN_PU_POTENTIAL,
-        "Prob. of Na channel activation",
-        "1-Prob. of Na channel blocking",
-        "Prob. of K channel activation",
-};
-const char* const cnrun::CN_VarSyms_NeuronHH_d[] = {
-        "E",
-        ".m",
-        ".h",
-        ".n"
-};
-
-
-
-void
-__attribute__ ((hot))
-cnrun::CNeuronHH_d::
-derivative( vector<double>& x, vector<double>& dx)
-{
-      // differential eqn for E, the membrane potential
-        dE(dx) = (
-                     P[gNa] * gsl_pow_3(m(x)) * h(x) * (P[ENa] - E(x))
-                   + P[gK]  * gsl_pow_4(n(x))        * (P[EK]  - E(x))
-                   + P[gl]                           * (P[El]  - E(x)) + (Isyn(x) + P[Idc])
-                  ) / P[Cmem];
-
-        double _a, _b, K;
-      // diferential eqn for m, the probability for one Na channel activation
-      // particle
-        K = -P[alpha_m_b] - E(x),
-                _a = P[alpha_m_a] * K / expm1( K / P[alpha_m_c]);
-//        _a = 0.32 * (13.0 - E(x) - P[V0]) / expm1( (13.0 - E(x) - P[V0]) / 4.0);
-        K =  P[beta_m_b] + E(x),
-                _b = P[beta_m_a]  * K / expm1( K / P[beta_m_c]);
-//        _b = 0.28 * (E(x) + P[V0] - 40.0) / expm1( (E(x) + P[V0] - 40.0) / 5.0);
-        dm(dx) = _a * (1 - m(x)) - _b * m(x);
-
-      // differential eqn for h, the probability for the Na channel blocking
-      // particle to be absent
-        K = -P[alpha_h_b] - E(x),
-                _a = P[alpha_h_a] * exp( K / P[alpha_h_c]);
-//        _a = 0.128 * exp( (17.0 - E(x) - P[V0]) / 18.0);
-        K = -P[beta_h_b] - E(x),
-                _b = P[beta_h_a] / (exp( K / P[beta_h_c]) + 1);
-//        _b = 4.0 / (exp( (40.0 - E(x) - P[V0]) / 5.0) + 1.0);
-        dh(dx) = _a * (1 - h(x)) - _b * h(x);
-
-      // differential eqn for n, the probability for one K channel activation
-      // particle
-        K = -P[alpha_n_b] - E(x),
-                _a = P[alpha_n_a] * K / expm1( K / P[alpha_n_c]);
-//        _a = 0.032 * (15.0 - E(x) - P[V0]) / (exp( (15.0 - E(x) - P[V0]) / 5.0) - 1.0);
-        K = -P[beta_n_b] - E(x),
-                _b = P[beta_n_a] * exp( K / P[beta_n_c]);
-//        _b = 0.5 * exp( (10.0 - E(x) - P[V0]) / 40.0);
-        dn(dx)= _a * (1 - n(x)) -_b * n(x);
-}
-
-// void
-// CNeuronHH::derivative( vector<double>& x, vector<double>& dx)
-// {
-//        enum TParametersNeuronHH {
-//                gNa, ENa, gK,  EK, gl, El, Cmem, Idc
-//        };
-
-//       // differential eqn for E, the membrane potential
-//        dE(dx) = (
-//                   P[gNa] * ___pow3(m(x)) * h(x) * (P[ENa] - E(x))
-//                 + P[gK]  * ___pow4(n(x))        * (P[EK]  - E(x))
-//                 + P[gl]  *                        (P[El]  - E(x))  + (Isyn(x) + P[Idc])
-//                 ) / P[Cmem];
-
-//        double _a, _b;
-//       // diferential eqn for m, the probability for Na channel activation
-//        _a = (3.5 + 0.1 * E(x)) / -expm1( -3.5 - 0.1 * E(x));
-//        _b = 4.0 * exp( -(E(x) + 60.0) / 18.0);
-//        dm(dx) = _a * (1.0 - m(x)) - _b * m(x);
-
-//       // differential eqn for h, the probability for Na channel inactivation
-//        _a = 0.07 * exp( -E(x) / 20.0 - 3.0);
-//        _b = 1.0 / (exp( -3.0 - 0.1 * E(x)) + 1.0);
-//        dh(dx) = _a * (1.0 - h(x)) -_b * h(x);
-
-//       // differential eqn for n, the probability for K channel activation
-//        _a = (-0.5 - 0.01 * E(x)) / expm1( -5.0 - 0.1 * E(x));
-//        _b = 0.125 * exp( -(E(x) + 60.0) / 80.0);
-//        dn(dx) = _a * (1.0 - n(x)) - _b * n(x);
-// }
-
-
-
-
-
-
-
-
-const char* const cnrun::CN_ParamNames_NeuronHH2_d[] = {
-        "Na conductance, " CN_PU_CONDUCTANCE,
-        "Na equi potential, " CN_PU_POTENTIAL,
-        "K conductance, " CN_PU_CONDUCTANCE,
-        "K equi potential, " CN_PU_POTENTIAL,
-        "Leak conductance, " CN_PU_CONDUCTANCE,
-        "Leak equi potential, " CN_PU_POTENTIAL,
-        "Membrane specific capacitance, " CN_PU_CAPACITY_DENSITY,
-        "K leakage conductance, " CN_PU_CONDUCTANCE,
-        "K leakage equi potential, " CN_PU_POTENTIAL,
-
-        ".alpha_m_a",        ".alpha_m_b",        ".alpha_m_c",        ".beta_m_a",        ".beta_m_b",        ".beta_m_c",
-        ".alpha_h_a",        ".alpha_h_b",        ".alpha_h_c",        ".beta_h_a",        ".beta_h_b",        ".beta_h_c",
-        ".alpha_n_a",        ".alpha_n_b",        ".alpha_n_c",        ".beta_n_a",        ".beta_n_b",        ".beta_n_c",
-
-//        "Total equi potential (?), " CN_PU_POTENTIAL,
-
-        "Externally applied DC, " CN_PU_CURRENT,
-};
-const char* const cnrun::CN_ParamSyms_NeuronHH2_d[] = {
-        "gNa",
-        "ENa",
-        "gK",
-        "EK",
-        "gl",
-        "El",
-        "Cmem",
-        "gKl",
-        "EKl",
-
-        ".alpha_m_a",        ".alpha_m_b",        ".alpha_m_c",        ".beta_m_a",        ".beta_m_b",        ".beta_m_c",
-        ".alpha_h_a",        ".alpha_h_b",        ".alpha_h_c",        ".beta_h_a",        ".beta_h_b",        ".beta_h_c",
-        ".alpha_n_a",        ".alpha_n_b",        ".alpha_n_c",        ".beta_n_a",        ".beta_n_b",        ".beta_n_c",
-
-//        "V0",
-
-        "Idc",
-};
-const double cnrun::CN_Params_NeuronHH2_d[] = {
-        7.15,    //   gNa: Na conductance in 1/(mOhms * cm^2)
-       50.0,     //   ENa: Na equi potential in mV
-        1.43,    //   gK: K conductance in 1/(mOhms * cm^2)
-      -95.0,     //   EK: K equi potential in mV
-        0.0267,  //   gl: leak conductance in 1/(mOhms * cm^2)
-      -63.56,    //   El: leak equi potential in mV
-        0.143,   //   Cmem: membr. specific capacitance, muF/cm^2
-        0.00572, //   gKl: potassium leakage conductivity
-      -95.0,     //   EKl: potassium leakage equi pot in mV
-
-        0.32,   52.,   4.,
-        0.28,   25.,   5.,
-        0.128,  48.,  18.,
-        4.0,    25.,   5.,
-        0.032,  50.,   5.,
-        0.5,    55.,  40.,
-
-//       65.0,                //   V0: ~ total equi potential (?)
-
-        0.,                //   Idc: constant, externally applied current
-};
-
-
-const double cnrun::CN_Vars_NeuronHH2_d[] = {
-// as in a single-neuron run
-      -66.56,   // 0 - membrane potential E
-        0.0217, // 1 - prob. for Na channel activation m
-        0.993,  // 2 - prob. for not Na channel blocking h
-        0.051,  // 3 - prob. for K channel activation n
-
-// previously thought to be resting state values
-//      -60.0,              // 0 - membrane potential E
-//        0.0529324,        // 1 - prob. for Na channel activation m
-//        0.3176767,        // 2 - prob. for not Na channel blocking h
-//        0.5961207,        // 3 - prob. for K channel activation n
-};
-
-
-
-
-
-void
-cnrun::CNeuronHH2_d::
-derivative( vector<double>& x, vector<double>& dx)
-{
-        enum TParametersNeuronHH2 {
-                gNa, ENa, gK,  EK, gl, El, Cmem,
-                gKl, EKl, //V0,
-                alpha_m_a,        alpha_m_b,        alpha_m_c,
-                beta_m_a,        beta_m_b,        beta_m_c,
-                alpha_h_a,        alpha_h_b,        alpha_h_c,
-                beta_h_a,        beta_h_b,        beta_h_c,
-                alpha_n_a,        alpha_n_b,        alpha_n_c,
-                beta_n_a,        beta_n_b,        beta_n_c,
-                Idc,
-        };
-
-      // differential eqn for E, the membrane potential
-        dE(dx) = (
-                     P[gNa] * gsl_pow_3(m(x)) * h(x) * (P[ENa] - E(x))
-                   + P[gK]  * gsl_pow_4(n(x))        * (P[EK]  - E(x))
-                   + P[gl]                           * (P[El]  - E(x))
-                   + P[gKl]                          * (P[EKl] - E(x)) + (Isyn(x) + P[Idc])
-                  ) / P[Cmem];
-
-        double _a, _b, K;
-      // diferential eqn for m, the probability for one Na channel activation
-      // particle
-        K = -P[alpha_m_b] - E(x),
-                _a = P[alpha_m_a] * K / expm1( K / P[alpha_m_c]);
-//        _a = 0.32 * (13.0 - E(x) - P[V0]) / expm1( (13.0 - E(x) - P[V0]) / 4.0);
-        K =  P[beta_m_b] + E(x),
-                _b = P[beta_m_a]  * K / expm1( K / P[beta_m_c]);
-//        _b = 0.28 * (E(x) + P[V0] - 40.0) / expm1( (E(x) + P[V0] - 40.0) / 5.0);
-        dm(dx) = _a * (1 - m(x)) - _b * m(x);
-
-      // differential eqn for h, the probability for the Na channel blocking
-      // particle to be absent
-        K = -P[alpha_h_b] - E(x),
-                _a = P[alpha_h_a] * exp( K / P[alpha_h_c]);
-//        _a = 0.128 * exp( (17.0 - E(x) - P[V0]) / 18.0);
-        K = -P[beta_h_b] - E(x),
-                _b = P[beta_h_a] / (exp( K / P[beta_h_c]) + 1);
-//        _b = 4.0 / (exp( (40.0 - E(x) - P[V0]) / 5.0) + 1.0);
-        dh(dx) = _a * (1 - h(x)) - _b * h(x);
-
-      // differential eqn for n, the probability for one K channel activation
-      // particle
-        K = -P[alpha_n_b] - E(x),
-                _a = P[alpha_n_a] * K / expm1( K / P[alpha_n_c]);
-//        _a = 0.032 * (15.0 - E(x) - P[V0]) / (exp( (15.0 - E(x) - P[V0]) / 5.0) - 1.0);
-        K = -P[beta_n_b] - E(x),
-                _b = P[beta_n_a] * exp( K / P[beta_n_c]);
-//        _b = 0.5 * exp( (10.0 - E(x) - P[V0]) / 40.0);
-        dn(dx)= _a * (1 - n(x)) -_b * n(x);
-}
-
-
-
-
-
-
-
-
-//#ifdef CN_WANT_MORE_NEURONS
-
-
-const char* const cnrun::CN_ParamNames_NeuronEC_d[] = {
-        "Na conductance, " CN_PU_CONDUCTANCE,
-        "Na equi potential, " CN_PU_POTENTIAL,
-        "K conductance, " CN_PU_CONDUCTANCE,
-        "K equi potential, " CN_PU_POTENTIAL,
-        "Leak conductance, " CN_PU_CONDUCTANCE,
-        "Leak equi potential, " CN_PU_POTENTIAL,
-        "Membrane capacity density, " CN_PU_CAPACITY_DENSITY,
-        "Externally applied DC, " CN_PU_CURRENT,
-        "K leakage conductance, " CN_PU_CONDUCTANCE,
-        "K leakage equi potential, " CN_PU_POTENTIAL,
-        "Total equi potential, " CN_PU_POTENTIAL,
-        "gh1",
-        "gh2",
-        "Vh, " CN_PU_POTENTIAL
-};
-const char* const cnrun::CN_ParamSyms_NeuronEC_d[] = {
-        "gNa",
-        "ENa",
-        "gK",
-        "EK",
-        "gl",
-        "El",
-        "Cmem",
-        "Idc",
-        "gKl",
-        "EKl",
-        "V0",
-        "gh1",
-        "gh2",
-        "Vh"
-};
-const double cnrun::CN_Params_NeuronEC_d[] = {
-        7.15,   //  0 - gNa: Na conductance in 1/(mOhms * cm^2)
-       50.0,    //  1 - ENa: Na equi potential in mV
-        1.43,   //  2 - gK: K conductance in 1/(mOhms * cm^2)
-      -95.0,    //  3 - EK: K equi potential in mV
-        0.021,  //  4 - gl: leak conductance in 1/(mOhms * cm^2)
-      -55.0,    //  5 - El: leak equi potential in mV
-        0.286,  //  6 - Cmem: membr. capacity density in muF/cm^2 // 0.143
-        0.,     //  7 - Externally applied constant current
-        0.035,  //  8 - gKl: potassium leakage conductivity
-      -95.0,    //  9 - EKl: potassium leakage equi pot in mV
-       65.0,    // 10 - V0: ~ total equi potential (?)
-        0.0185, // 11 - gh1 // 1.85
-        0.01,   // 12 - gh2
-      -20.0,    // 13 - Vh
-};
-
-const char* const cnrun::CN_VarNames_NeuronEC_d[] = {
-        "Membrane potential",
-        "Prob. of Na channel activation",
-        "Prob. of not Na channel blocking",
-        "Prob. of K channel activation",
-        "Ih1 activation",
-        "Ih2 activation"
-};
-const char* const cnrun::CN_VarSyms_NeuronEC_d[] = {
-        "E",
-        ".m",
-        ".h",
-        ".n",
-        ".Ih1",
-        ".Ih2"
-};
-const double cnrun::CN_Vars_NeuronEC_d[] = {
-      -64.1251,    // 0 - membrane potential E
-        0.0176331, // 1 - prob. for Na channel activation m
-        0.994931,  // 2 - prob. for not Na channel blocking h
-        0.0433969, // 3 - prob. for K channel activation n
-        0.443961,  // 4 - Ih1 activation
-        0.625308   // 5 - Ih2 activation
-};
-
-
-
-
-#define _xfunc(a,b,k,V)  ((a) * (V) + (b)) / (1.0 - exp(((V)+(b)/(a))/(k)))
-
-void
-cnrun::CNeuronEC_d::
-derivative( vector<double>& x, vector<double>& dx)
-{
-        enum TParametersNeuronEC {
-                gNa, ENa, gK,  EK, gl, El, Cmem, Idc,
-                gKl, EKl, V0,
-                gh1, gh2,
-                Vh
-        };
-
-        double _a, _b;
-      // differential eqn for E, the membrane potential
-        dE(dx) = -(gsl_pow_3( m(x)) * h(x) * P[gNa] * (E(x) - P[ENa]) +
-                 gsl_pow_4( n(x)) * P[gK] * (E(x) - P[EK]) +
-                 (Ih1(x) * P[gh1] + Ih2(x) * P[gh2]) * (E(x) - P[Vh])+
-                 P[gl] * (E(x) - P[El]) + P[gKl] * (E(x) - P[EKl]) - Isyn(x)) / P[Cmem];
-
-      // diferential eqn for m, the probability for one Na channel activation particle
-        _a = 0.32 * (13.0 - E(x) - P[V0]) / expm1( (13.0 - E(x) - P[V0]) / 4.0);
-        _b = 0.28 * (E(x) + P[V0] - 40.0) / expm1( (E(x) + P[V0] - 40.0) / 5.0);
-        dm(dx) = _a * (1.0 - m(x)) - _b * m(x);
-
-      // differential eqn for h, the probability for the Na channel blocking particle to be absent
-        _a = 0.128 * exp( (17.0 - E(x) - P[V0]) / 18.0);
-        _b = 4.0 / (exp( (40.0 - E(x) - P[V0]) / 5.0) + 1.0);
-        dh(dx) = _a * (1.0 - h(x)) - _b * h(x);
-
-      // differential eqn for n, the probability for one K channel activation particle
-        _a = 0.032 * (15.0 - E(x) - P[V0]) / expm1( (15.0 - E(x) - P[V0]) / 5.0);
-        _b = 0.5 * exp( (10.0 - E(x) - P[V0]) / 40.0);
-        dn(dx) = _a * (1.0 - n(x)) - _b * n(x);
-
-      // differential equation for the Ih1 activation variable
-        _a = _xfunc (-2.89e-3, -0.445,  24.02, E(x));
-        _b = _xfunc ( 2.71e-2, -1.024, -17.40, E(x));
-        dIh1(dx) = _a * (1.0 - Ih1(x)) - _b * Ih1(x);
-
-      // differential equation for the Ih2 activation variable
-        _a = _xfunc (-3.18e-3, -0.695,  26.72, E(x));
-        _b = _xfunc ( 2.16e-2, -1.065, -14.25, E(x));
-        dIh2(dx) = _a * (1.0 - Ih2(x)) - _b * Ih2(x);
-}
-
-#undef _xfunc
-
-
-
-
-
-
-
-
-
-
-
-
-const char* const cnrun::CN_ParamNames_NeuronECA_d[] = {
-        "Na conductance, " CN_PU_CONDUCTANCE,
-        "Na equi potential, " CN_PU_POTENTIAL,
-        "K conductance, " CN_PU_CONDUCTANCE,
-        "K equi potential, " CN_PU_POTENTIAL,
-        "Leak conductance, " CN_PU_CONDUCTANCE,
-        "Leak equi potential, " CN_PU_POTENTIAL,
-        "Membrane capacity density, " CN_PU_CAPACITY_DENSITY,
-        "Externally applied DC, " CN_PU_CURRENT,
-        "gNap",
-        "gh",
-        "Vh",
-};
-const char* const cnrun::CN_ParamSyms_NeuronECA_d[] = {
-        "gNa",
-        "ENa",
-        "gK",
-        "EK",
-        "gl",
-        "El",
-        "Cmem",
-        "Idc",
-        "gNap",
-        "gh",
-        "Vh",
-};
-const double cnrun::CN_Params_NeuronECA_d[] = {
-        52.0,        //  0 - Na conductance in 1/(mOhms * cm^2)
-        55.0,        //  1 - Na equi potential in mV
-        11.0,        //  2 - K conductance in 1/(mOhms * cm^2)
-       -90.0,        //  3 - K equi potential in mV
-         0.5,        //  4 - Leak conductance in 1/(mOhms * cm^2)
-       -65.0,        //  5 - Leak equi potential in mV
-         1.5,        //  6 - Membr. capacity density in muF/cm^2
-         0.,         //  7 - Externally applied constant current
-         0.5,        //  8 - gNap
-         1.5,        //  9 - gh
-       -20.0,        // 10 - Vh
-};
-
-const char* const cnrun::CN_VarNames_NeuronECA_d[] = {
-        "Membrane potential",
-        "Prob. of Na channel activation",
-        "Prob. of Na channel blocking",
-        "Prob. of K channel activation",
-        "mNap",
-        "Ih1 activation",
-        "Ih2 activation"
-};
-const char* const cnrun::CN_VarSyms_NeuronECA_d[] = {
-        "E",
-        ".m",
-        ".h",
-        ".n",
-        ".mNap",
-        ".Ih1",
-        ".Ih2"
-};
-const double cnrun::CN_Vars_NeuronECA_d[] = {
-      -53.77902178,    // E
-        0.0262406368,  // prob. for Na channel activation m
-        0.9461831106,  // prob. for not Na channel blocking h
-        0.1135915933,  // prob. for K channel activation n
-        0.08109646237, // Nap
-        0.06918464221, // Ih1 activation
-        0.09815937825  // Ih2 activation
-};
-
-
-
-void
-cnrun::CNeuronECA_d::
-derivative( vector<double>& x, vector<double>& dx)
-{
-        enum TParametersNeuronECA {  // lacks SParametersNeuronEC's gKl and EKl, so derives directly from HH
-                gNa, ENa, gK,  EK, gl, El, Cmem, Idc,
-                gNap, gh,
-                Vh
-        };
-
-      // differential eqn for E, the membrane potential
-        dE(dx) = -((gsl_pow_3( m(x)) * h(x) * P[gNa] + P[gNap] * mNap(x)) * (E(x) - P[ENa]) +
-                   gsl_pow_4( n(x)) * P[gK] * (E(x) - P[EK]) +
-                   P[gh] * (Ih1(x) * 0.65 + Ih2(x) * 0.35) * (E(x) - P[Vh]) +
-                   P[gl] * (E(x) - P[El]) - (Isyn(x) + P[Idc]) + 2.85) / P[Cmem];
-
-        double _a, _b;
-      // diferential eqn for m, the probability for one Na channel activation particle
-        _a = -0.1 * (E(x) + 23) / expm1( -0.1 * (E(x) + 23));
-        _b =  4.  * exp( -(E(x) + 48) / 18);
-        dm(dx) = _a * (1. - m(x)) - _b * m(x);
-
-      // differential eqn for h, the probability for the Na channel blocking particle to be absent
-        _a = 0.07 * exp( -(E(x) + 37.0) / 20.0);
-        _b = 1. / (exp( -0.1 * (E(x) + 7.)) + 1.0);
-        dh(dx) = _a * (1.0 - h(x)) - _b * h(x);
-
-      // differential eqn for n, the probability for one K channel activation particle
-        _a = -0.01  * (E(x) + 27) / expm1( -0.1 * (E(x) + 27));
-        _b =  0.125 * exp( -(E(x) + 37) / 80);
-        dn(dx) = _a * (1.0 - n(x)) - _b * n(x);
-
-        _a = 1. / (0.15 * (1 + exp( -(E(x) + 38) / 6.5)));
-        _b = exp( -(E(x) + 38) / 6.5) / (0.15 * (1 + exp( -(E(x) + 38) / 6.5)));
-        dmNap(dx) = _a * (1.0 - mNap(x)) - _b * mNap(x);
-
-      // differential equation for the Ihf activation variable
-        _a = 1. / (1 + exp( (E(x) + 79.2) / 9.78));
-        _b = 0.51 / (exp( (E(x) - 1.7) / 10) + exp( -(E(x) + 340) / 52)) + 1;
-        dIh1(dx) = (_a - Ih1(x)) / _b;
-
-      // differential equation for the Ihs activation variable
-        _a = 1. / (1 + exp( (E(x) + 71.3) / 7.9));
-        _b = 5.6 / (exp( (E(x) - 1.7) / 14) + exp( -(E(x) + 260) / 43)) + 1;
-        dIh2(dx) = (_a - Ih2(x)) / _b;
-}
-
-
-
-
-// =========== oscillators
-
-const char* const cnrun::CN_ParamNames_OscillatorColpitts[] = {
-        "a",
-        "g",
-        "q",
-        "η"
-};
-const char* const cnrun::CN_ParamSyms_OscillatorColpitts[] = {
-        "a",
-        "g",
-        "q",
-        "eta"
-};
-const double cnrun::CN_Params_OscillatorColpitts[] = {
-        1.0,    // a
-        0.0797, // g
-        0.6898, // q
-        6.2723  // eta
-};
-
-
-const char* const cnrun::CN_VarNames_OscillatorColpitts[] = {
-        "x0",
-        "x1",
-        "x2"
-};
-const char* const cnrun::CN_VarSyms_OscillatorColpitts[] = {
-        "x0",
-        "x1",
-        "x2"
-};
-const double cnrun::CN_Vars_OscillatorColpitts[] = {
-        0.02,
-        0.69,
-       -0.53
-};
-
-
-void
-cnrun::COscillatorColpitts::
-derivative( vector<double>& x, vector<double>& dx)
-{
-        enum TParametersOscilColpitts {
-                a, g, q,
-                eta
-        };
-
-        dx0(dx) =  P[a]   *  x1(x) + Isyn(x);
-        dx1(dx) = -P[g]   * (x0(x) + x2(x)) - P[q] * x1(x);
-        dx2(dx) =  P[eta] * (x1(x) + 1.0 - exp( -x0(x)));
-//        dx[idx  ] =  p[0] *  x[idx+1] + Isyn;
-//        dx[idx+1] = -p[1] * (x[idx  ] + x[idx+2]) - p[2] * x[idx+1];
-//        dx[idx+2] =  p[3] * (x[idx+1] + 1.0 - exp(-x[idx]));
-}
-
-
-
-
-
-
-/*
-
-const char* const CN_ParamNames_OscillatorLV[] = {
-        "Self inhibition",
-};
-const char* const CN_ParamSyms_OscillatorLV[] = {
-        "rho_ii",
-};
-const double CN_Params_OscillatorLV[] = {
-        1.0,        // 0 - rho_ii: "self inhibition"
-};
-
-
-const char* const CN_VarNames_OscillatorLV[] = {
-        "Membrane potential, " CN_PU_POTENTIAL,
-        "Firing rate"
-};
-const char* const CN_VarSyms_OscillatorLV[] = {
-        "E",
-        "fr"
-};
-const double CN_Vars_OscillatorLV[] = {
-        0.,        // 0 - added a place for E
-        0.1        // 1 - firing rate
-};
-
-
-*/
-
-
-
-
-
-
-
-const char* const cnrun::CN_ParamNames_OscillatorVdPol[] = {
-        "η",
-        "ω²",
-//        "\317\203"
-};
-const char* const cnrun::CN_ParamSyms_OscillatorVdPol[] = {
-        "eta",
-        "omegasq", // omega^2
-//        "sigma"
-};
-const double cnrun::CN_Params_OscillatorVdPol[] = {
-        1.0,        // eta
-        0.1,        // omega^2
-//        0.0        // noise level
-};
-
-const char* const cnrun::CN_VarNames_OscillatorVdPol[] = {
-        "Amplitude",
-        "v"
-};
-const char* const cnrun::CN_VarSyms_OscillatorVdPol[] = {
-        "A",
-        "v"
-};
-const double cnrun::CN_Vars_OscillatorVdPol[] = {
-        0.1,       // amplitude
-        0.0        // internal var
-};
-
-
-//#endif // CN_WANT_MORE_NEURONS
-
-// Local Variables:
-// Mode: c++
-// indent-tabs-mode: nil
-// tab-width: 8
-// c-basic-offset: 8
-// End:
diff --git a/upstream/src/libcnrun/hosted-neurons.hh b/upstream/src/libcnrun/hosted-neurons.hh
deleted file mode 100644 (file)
index d77e2b5..0000000
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- *       File name:  libcn/hosted-neurons.hh
- *         Project:  cnrun
- *          Author:  Andrei Zavada <johnhommer@gmail.com>
- *                   building on original work by Thomas Nowotny <tnowotny@ucsd.edu>
- * Initial version:  2008-10-11
- *
- *         Purpose:  hosted neuron classes (those having their
- *                   state vars on parent model's integration vectors)
- *
- *         License:  GPL-2+
- */
-
-#ifndef CNRUN_LIBCN_HOSTEDNEURONS_H_
-#define CNRUN_LIBCN_HOSTEDNEURONS_H_
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-#include <gsl/gsl_math.h>
-
-#include "forward-decls.hh"
-#include "base-neuron.hh"
-#include "hosted-attr.hh"
-
-namespace cnrun {
-
-enum class TIncludeOption { is_last, is_notlast, };
-
-class C_HostedNeuron
-  : public C_BaseNeuron, public C_HostedAttributes {
-
-        DELETE_DEFAULT_METHODS (C_HostedNeuron)
-
-    protected:
-        C_HostedNeuron (TUnitType intype, const string& inlabel,
-                        double x, double y, double z,
-                        CModel*, int s_mask,
-                        TIncludeOption include_option);
-    public:
-        void reset_vars();
-        double &var_value( size_t);
-        const double &get_var_value( size_t) const;
-};
-
-
-
-
-
-class C_HostedConductanceBasedNeuron
-  : public C_HostedNeuron {
-
-        DELETE_DEFAULT_METHODS (C_HostedConductanceBasedNeuron)
-
-    protected:
-        C_HostedConductanceBasedNeuron (TUnitType intype, const string& inlabel,
-                                        double inx, double iny, double inz,
-                                        CModel* inM, int s_mask,
-                                        TIncludeOption include_option)
-              : C_HostedNeuron (intype, inlabel, inx, iny, inz, inM, s_mask, include_option)
-                {}
-
-    public:
-        double  E() const; // needs access to parent model var vector, defined in model.h
-        double  E( vector<double> &b) const  { return b[idx+0]; }
-        double& dE( vector<double> &b)       { return b[idx+0]; }
-
-        size_t n_spikes_in_last_dt() const;
-
-        void do_detect_spike_or_whatever();
-};
-
-
-
-
-
-// for completeness' sake -- no descendants yet
-class C_HostedRateBasedNeuron
-  : public C_HostedNeuron {
-
-        DELETE_DEFAULT_METHODS (C_HostedRateBasedNeuron)
-
-    protected:
-        C_HostedRateBasedNeuron (TUnitType intype, const string& inlabel,
-                                 double inx, double iny, double inz,
-                                 CModel* inM, int s_mask,
-                                 TIncludeOption include_option)
-              : C_HostedNeuron (intype, inlabel, inx, iny, inz, inM, s_mask, include_option)
-                {}
-
-    public:
-        size_t n_spikes_in_last_dt() const;
-};
-
-
-
-
-
-
-
-
-
-
-// Hodgkin-Huxley classic
-
-class CNeuronHH_d
-  : public C_HostedConductanceBasedNeuron {
-
-        DELETE_DEFAULT_METHODS (CNeuronHH_d)
-
-    public:
-        CNeuronHH_d (const string& inlabel,
-                     double x, double y, double z,
-                     CModel *inM, int s_mask = 0,
-                     TIncludeOption include_option = TIncludeOption::is_last)
-              : C_HostedConductanceBasedNeuron (NT_HH_D, inlabel, x, y, z,
-                                                inM, s_mask, include_option)
-                {}
-
-      // parameters (since gcc 4.4, accessible from within member functions defined outside class definition, gee!)
-        enum {
-                gNa, ENa, gK,  EK, gl, El, Cmem,
-                alpha_m_a,        alpha_m_b,        alpha_m_c,        beta_m_a,        beta_m_b,        beta_m_c,
-                alpha_h_a,        alpha_h_b,        alpha_h_c,        beta_h_a,        beta_h_b,        beta_h_c,
-                alpha_n_a,        alpha_n_b,        alpha_n_c,        beta_n_a,        beta_n_b,        beta_n_c,
-                Idc,
-        };
-
-      // current state
-      // these wrappers mainly for code legibility in derivative(); otherwise, not used
-      // for reporting, CModel accesses vars as V[idx+n]
-        double   m( vector<double>& b) const { return b[idx+1]; }
-        double   h( vector<double>& b) const { return b[idx+2]; }
-        double   n( vector<double>& b) const { return b[idx+3]; }
-        double& dm( vector<double>& b)       { return b[idx+1]; }
-        double& dh( vector<double>& b)       { return b[idx+2]; }
-        double& dn( vector<double>& b)       { return b[idx+3]; }
-
-        void derivative( vector<double>&, vector<double>&) __attribute__ ((hot));
-};
-
-
-
-
-
-
-
-class CNeuronHH2_d
-  : public C_HostedConductanceBasedNeuron {
-
-        DELETE_DEFAULT_METHODS (CNeuronHH2_d)
-
-    public:
-        CNeuronHH2_d (const string& inlabel,
-                      double x, double y, double z,
-                      CModel *inM, int s_mask = 0,
-                      TIncludeOption include_option = TIncludeOption::is_last)
-              : C_HostedConductanceBasedNeuron( NT_HH2_D, inlabel, x, y, z,
-                                                inM, s_mask, include_option)
-                {}
-
-        double   m( vector<double>& b) const { return b[idx+1]; }
-        double   h( vector<double>& b) const { return b[idx+2]; }
-        double   n( vector<double>& b) const { return b[idx+3]; }
-        double& dm( vector<double>& b)       { return b[idx+1]; }
-        double& dh( vector<double>& b)       { return b[idx+2]; }
-        double& dn( vector<double>& b)       { return b[idx+3]; }
-
-        void derivative( vector<double>&, vector<double>&);
-};
-
-
-
-//#ifdef CN_WANT_MORE_NEURONS
-
-// Entorhinal cortex stellate cell
-
-class CNeuronEC_d
-  : public C_HostedConductanceBasedNeuron {
-
-        DELETE_DEFAULT_METHODS (CNeuronEC_d)
-
-    public:
-        CNeuronEC_d( const string& inlabel,
-                     double x, double y, double z,
-                     CModel *inM, int s_mask = 0,
-                     TIncludeOption include_option = TIncludeOption::is_last)
-              : C_HostedConductanceBasedNeuron (NT_EC_D, inlabel, x, y, z,
-                                                inM, s_mask, include_option)
-                {}
-
-        double    m   ( vector<double>& b) const { return b[idx+1]; }
-        double    h   ( vector<double>& b) const { return b[idx+2]; }
-        double    n   ( vector<double>& b) const { return b[idx+3]; }
-        double    Ih1 ( vector<double>& b) const { return b[idx+4]; }
-        double    Ih2 ( vector<double>& b) const { return b[idx+5]; }
-        double&   dm  ( vector<double>& b)       { return b[idx+1]; }
-        double&   dh  ( vector<double>& b)       { return b[idx+2]; }
-        double&   dn  ( vector<double>& b)       { return b[idx+3]; }
-        double& dIh1  ( vector<double>& b)       { return b[idx+4]; }
-        double& dIh2  ( vector<double>& b)       { return b[idx+5]; }
-
-
-        void derivative( vector<double>&, vector<double>&);
-};
-
-
-
-
-
-
-class CNeuronECA_d
-  : public C_HostedConductanceBasedNeuron {
-
-        DELETE_DEFAULT_METHODS (CNeuronECA_d)
-
-    public:
-        CNeuronECA_d( const string& inlabel,
-                      double x, double y, double z,
-                      CModel *inM, int s_mask = 0,
-                      TIncludeOption include_option = TIncludeOption::is_last)
-              : C_HostedConductanceBasedNeuron( NT_ECA_D, inlabel, x, y, z,
-                                                inM, s_mask, include_option)
-                {}
-
-        double      m( vector<double>& b) const { return b[idx+1]; }
-        double      h( vector<double>& b) const { return b[idx+2]; }
-        double      n( vector<double>& b) const { return b[idx+3]; }
-        double   mNap( vector<double>& b) const { return b[idx+4]; }
-        double    Ih1( vector<double>& b) const { return b[idx+5]; }
-        double    Ih2( vector<double>& b) const { return b[idx+6]; }
-
-        double&    dm( vector<double>& b)       { return b[idx+1]; }
-        double&    dh( vector<double>& b)       { return b[idx+2]; }
-        double&    dn( vector<double>& b)       { return b[idx+3]; }
-        double& dmNap( vector<double>& b)       { return b[idx+4]; }
-        double&  dIh1( vector<double>& b)       { return b[idx+5]; }
-        double&  dIh2( vector<double>& b)       { return b[idx+6]; }
-
-        void derivative( vector<double>&, vector<double>&);
-};
-
-//#endif  // CN_WANT_MORE_NEURONS
-
-
-
-
-
-
-
-
-//#ifdef CN_WANT_MORE_NEURONS
-
-class COscillatorColpitts
-  : public C_HostedConductanceBasedNeuron {
-
-        DELETE_DEFAULT_METHODS (COscillatorColpitts)
-
-    public:
-        COscillatorColpitts( const string& inlabel,
-                             double x, double y, double z,
-                             CModel *inM, int s_mask = 0,
-                             TIncludeOption include_option = TIncludeOption::is_last)
-              : C_HostedConductanceBasedNeuron (NT_COLPITTS, inlabel, x, y, z,
-                                                inM, s_mask, include_option)
-                {}
-
-        double   x0( vector<double>& b) const { return b[idx+0]; }  // there's no E() for this one
-        double   x1( vector<double>& b) const { return b[idx+1]; }
-        double   x2( vector<double>& b) const { return b[idx+2]; }
-        double& dx0( vector<double>& b)       { return b[idx+0]; }
-        double& dx1( vector<double>& b)       { return b[idx+1]; }
-        double& dx2( vector<double>& b)       { return b[idx+2]; }
-
-        virtual void derivative( vector<double>&, vector<double>&);
-};
-
-
-
-
-
-
-
-/*
-// ne marche pas
-
-class COscillatorLV
-  : public C_HostedConductanceBasedNeuron {
-
-    public:
-        double   fr( vector<double>& b) const        { return b[idx+1]; }
-        double& dfr( vector<double>& b)                { return b[idx+1]; }
-
-        COscillatorLV( const char *inlabel,
-                       double x, double y, double z,
-                       CModel *inM, int s_mask = 0,
-                       CModel::TIncludeOption include_option = true)
-              : C_HostedConductanceBasedNeuron( NT_LV, inlabel, x, y, z,
-                                                inM, s_mask, include_option)
-                {}
-
-        enum TParametersOscilLV {
-                rho
-        };
-        void derivative( vector<double>& x, vector<double>& dx)
-                {
-                        dE(dx) = fr(x) * (1.0 - P[rho] * fr(x)) - Isyn(x);
-                }
-};
-
-
-*/
-
-
-
-
-class COscillatorVdPol
-  : public C_HostedConductanceBasedNeuron {
-
-        DELETE_DEFAULT_METHODS (COscillatorVdPol)
-
-     public:
-        COscillatorVdPol (const string& inlabel,
-                          double x, double y, double z,
-                          CModel *inM, int s_mask = 0,
-                          TIncludeOption include_option = TIncludeOption::is_last)
-              : C_HostedConductanceBasedNeuron (NT_VDPOL, inlabel, x, y, z,
-                                                inM, s_mask, include_option)
-                {}
-
-        double   amp( vector<double>& b) const  { return b[idx+0]; }
-        double    _x( vector<double>& b) const  { return b[idx+1]; }
-        double& damp( vector<double>& b)        { return b[idx+0]; }
-        double&  d_x( vector<double>& b)        { return b[idx+1]; }
-
-        enum TParametersOscilVdPol {
-                eta, omega2
-        };
-        void derivative( vector<double> &x, vector<double> &dx)
-                {
-                        damp(dx) = _x(x);
-                        d_x(dx) = (P[eta] - gsl_pow_2( amp(x))) * _x(x) - P[omega2] * amp(x) + Isyn(x);
-                }
-};
-
-//#endif  // CN_WANT_MORE_NEURONS
-
-}
-
-#endif
-
-// Local Variables:
-// Mode: c++
-// indent-tabs-mode: nil
-// tab-width: 8
-// c-basic-offset: 8
-// End:
diff --git a/upstream/src/libcnrun/hosted-synapses.cc b/upstream/src/libcnrun/hosted-synapses.cc
deleted file mode 100644 (file)
index 8a57542..0000000
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- *       File name:  libcn/hosted-synapses.cc
- *         Project:  cnrun
- *          Author:  Andrei Zavada <johnhommer@gmail.com>
- *                   building on original work by Thomas Nowotny <tnowotny@ucsd.edu>
- * Initial version:  2009-04-03
- *
- *         Purpose:  hosted synapse classes (those having their
- *                   state vars on parent model's integration vectors)
- *
- *         License:  GPL-2+
- */
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-#include <iostream>
-
-#include "hosted-synapses.hh"
-
-#include "types.hh"
-#include "model.hh"
-
-using namespace std;
-
-
-// the base synapse here
-cnrun::C_HostedSynapse::
-C_HostedSynapse (const TUnitType type_,
-                 C_BaseNeuron *source_, C_BaseNeuron *target_,
-                 const double g_, CModel *M_, int s_mask,
-                 TIncludeOption include_option)
-      : C_BaseSynapse (type_, source_, target_, g_, M_, s_mask),
-        C_HostedAttributes()
-{
-        if ( M )
-                M->include_unit( this, include_option);
-        else
-                idx = (unsigned long)-1;
-}
-
-
-
-
-
-// -- parameters
-
-const char* const cnrun::CN_ParamNames_SynapseAB_dd[] = {
-//        "Synaptic strength g, " CN_PU_CONDUCTANCE,
-        "Reversal potential Esyn, " CN_PU_POTENTIAL,
-        "Presyn threshold potential Epre, " CN_PU_POTENTIAL,
-        "Rise rate α, " CN_PU_RATE,
-        "Decay rate β, " CN_PU_RATE,
-        "Time of transmitter release, " CN_PU_TIME,
-//        "Noise level \317\203",
-};
-const char* const cnrun::CN_ParamSyms_SynapseAB_dd[] = {
-//        "gsyn",
-        "Esyn",
-        "Epre",
-        "alpha",
-        "beta",
-        "trel",
-//        "sigma",
-};
-
-const double cnrun::CN_Params_SynapseAB_dd[] = {
-//        0.12,
-        0,
-      -20,
-        0.5,
-        0.05,
-        5.0,
-//        0.
-};
-
-const double cnrun::CN_Params_SynapseABMinus_dd[] = {
-//        0.12,
-        0,
-      -20,
-        0.27785150819749,
-        0.05,
-        5.0,
-//        0.
-};
-
-const double cnrun::CN_Params_SynapseMxAB_dd[] = {
-//        0.12,
-        0,
-      -20,
-        0.27785150819749,  // the only parameter differing from its AB namesake,
-                           // which is also by principle the same as in the ABMinus variation
-        0.05,
-        5.0,
-//        0.
-};
-
-
-const char* const cnrun::CN_ParamNames_SynapseAB_dr[] = {
-//        "Synaptic strength g, " CN_PU_CONDUCTANCE,
-        "Assumed (target->E - Esyn), " CN_PU_POTENTIAL,
-        "Presyn threshold potential Epre, " CN_PU_POTENTIAL,
-        "Rise rate α, " CN_PU_RATE,
-        "Decay rate β, " CN_PU_RATE,
-        "Time of transmitter release, " CN_PU_TIME,
-//        "Noise level \317\203",
-};
-const char* const cnrun::CN_ParamSyms_SynapseAB_dr[] = {
-//        "gsyn",
-        "Ediff",
-        "Epre",
-        "alpha",
-        "beta",
-        "trel",
-//        "sigma",
-};
-
-
-const double cnrun::CN_Params_SynapseMxAB_dr[] = {
-//        0.12,
-      -60 - 0,  // Ediff: a reasonable Esyn - target->E, the latter being -60 mV at rest
-      -20,
-        0.27785150819749,
-        0.05,
-        5.0,
-//        0.
-};
-
-
-
-
-
-
-
-const char* const cnrun::CN_ParamNames_SynapseAB_rr[] = {
-//        "Synaptic strength g, " CN_PU_CONDUCTANCE,
-        "Assumed (target->E - Esyn), " CN_PU_VOLTAGE,
-        "Rise rate α, " CN_PU_RATE,
-        "Decay rate β, " CN_PU_RATE,
-        "Refractory period T, " CN_PU_TIME,
-//        "Noise level \317\203",
-};
-const char* const cnrun::CN_ParamSyms_SynapseAB_rr[] = {
-//        "gsyn",
-        "Ediff",
-        "alpha",
-        "beta",
-        "T",
-//        "sigma",
-};
-const double cnrun::CN_Params_SynapseAB_rr[] = {
-//        0.12,
-      -60 - 0,
-        0.27785150819749,
-        0.05,
-        5,
-//        0.
-};
-
-
-
-const char* const cnrun::CN_ParamNames_SynapseRall_dd[] = {
-//        "Synaptic strength g, " CN_PU_CONDUCTANCE,
-        "Reversal potential, " CN_PU_POTENTIAL,
-        "Presynaptic threshold potential, " CN_PU_POTENTIAL,
-        "τ, " CN_PU_RATE,
-//        "Noise level \317\203",
-};
-const char* const cnrun::CN_ParamSyms_SynapseRall_dd[] = {
-//        "gsyn",
-        "Esyn",
-        "Epre",
-        "tau",
-//        "sigma",
-};
-const double cnrun::CN_Params_SynapseRall_dd[] = {
-//        0.12,
-        0,
-      -20,
-        2,
-//        0.
-};
-
-
-
-
-// -- variables
-
-const char* const cnrun::CN_VarNames_SynapseAB[] = {
-        "Amount of neurotransmitter released S"
-};
-const char* const cnrun::CN_VarSyms_SynapseAB[] = {
-        "S"
-};
-const double cnrun::CN_Vars_SynapseAB[] = {
-        0.
-};
-
-
-const char* const cnrun::CN_VarNames_SynapseRall[] = {
-        "Amount of neurotransmitter released S",
-        "Amount of neurotransmitter absorbed R",
-};
-const char* const cnrun::CN_VarSyms_SynapseRall[] = {
-        "S",
-        "R",
-};
-const double cnrun::CN_Vars_SynapseRall[] = {
-        0.,
-        0.
-};
-
-
-
-
-
-
-
-void
-cnrun::CSynapseAB_dd::
-derivative( vector<double>& x, vector<double>& dx)
-{
-        if ( x[0] - t_last_release_started <= P[_rtime_] ) {
-              // continue release from an old spike
-                dS(dx) = P[_alpha_] * (1 - S(x)) - P[_beta_] * S(x);
-        } else
-                if ( _source->E(x) > P[_Epre_] ) {
-                      // new spike ... start releasing
-                        t_last_release_started = x[0];
-                        dS(dx) = P[_alpha_] * (1 - S(x)) - P[_beta_] * S(x);
-                } else {
-                      // no release
-                        dS(dx) = -P[_beta_] * S(x);
-                }
-}
-
-
-
-
-void
-cnrun::CSynapseABMinus_dd::
-derivative( vector<double>& x, vector<double>& dx)
-{
-        if ( x[0] - t_last_release_started <= P[_rtime_] ) {
-              // continue release from an old spike
-                dS(dx) = P[_alpha_] * 1 - P[_beta_] * S(x);
-        } else
-                if ( _source->E(x) > P[_Epre_] ) {
-                      // new spike ... start releasing
-                        t_last_release_started = x[0];
-                        dS(dx) = P[_alpha_] * 1 - P[_beta_] * S(x);
-                } else {
-                      // no release
-                        dS(dx) = -P[_beta_] * S(x);
-                }
-}
-
-
-
-
-// -------- Multiplexing AB
-
-void
-cnrun::CSynapseMxAB_dd::
-derivative( vector<double>& x, vector<double>& dx)
-{
-//        printf( "%s %lu %d %g\n", _source->label, _source->serial_id, _source->idx, _source->E(x));
-
-        if ( q() > 0 ) {
-                size_t effective_q = q();
-              // as we nudge along a little within RK's operational
-              // dt, some spikes can expire in that brief while:
-              // decrement q then, just for this while
-                while ( effective_q  &&  M->model_time(x) - _kq[q()-effective_q] > P[_rtime_] )
-                        --effective_q;
-#ifdef CN_MORECODE__
-                if ( effective_q < q() )
-                        M->vp( 6, "YMxAB %s smacks %zu spike(s) of %zu at %g(+%g)\n", label,
-                               (size_t)q() - effective_q, (size_t)q(),
-                               M->model_time(),
-                               M->model_time(x) - M->model_time());
-#endif
-                dS(dx) = P[_alpha_] * effective_q - P[_beta_] * S(x);
-        } else
-              // no release, decay
-                dS(dx) = -P[_beta_] * S(x);
-}
-
-
-
-void
-cnrun::CSynapseMxAB_dd::
-update_queue()
-{
-        size_t k = _source -> n_spikes_in_last_dt();
-        while ( k-- )
-                _kq.push_back( model_time());
-
-      // see if the oldest spike has gone past synapse release time
-      // disregard spike duration, measure time from saved spike_start
-      // (which is == spike_end)
-        while ( true ) {
-                if ( q() > 0 && model_time() - _kq.front() > P[_rtime_] )
-                        _kq.erase( _kq.begin());
-                else
-                        break;
-        }
-}
-
-
-
-
-
-
-
-
-
-
-void
-cnrun::CSynapseAB_rr::
-derivative( vector<double>& x, vector<double>& dx)
-{
-        // if ( source()->F(x) > 0 )
-        //         printf( "%s->F(x) = %g\n", _source->label, source()->F(x));
-        dS(dx) = -P[_beta_] * S(x)
-                + P[_alpha_] * _numerator / (exp( P[_beta_] / source()->F(x)) + 1);
-}
-
-
-
-
-
-
-
-inline int Heaviside( double val)  { return (val >= 0) ? 1 : 0; }
-
-void
-cnrun::CSynapseRall_dd::
-derivative( vector<double>& x, vector<double>& dx)
-{
-        dR(dx) = 1 / P[_tau_] * (-R(x) + Heaviside( _source->E(x) - P[_Epre_]));
-        dS(dx) = 1 / P[_tau_] * (-S(x) + R(x));
-}
-
-// Local Variables:
-// Mode: c++
-// indent-tabs-mode: nil
-// tab-width: 8
-// c-basic-offset: 8
-// End:
diff --git a/upstream/src/libcnrun/hosted-synapses.hh b/upstream/src/libcnrun/hosted-synapses.hh
deleted file mode 100644 (file)
index db2cc59..0000000
+++ /dev/null
@@ -1,318 +0,0 @@
-/*
- *       File name:  libcn/hosted-synapses.hh
- *         Project:  cnrun
- *          Author:  Andrei Zavada <johnhommer@gmail.com>
- *                   building on original work by Thomas Nowotny <tnowotny@ucsd.edu>
- * Initial version:  2009-04-01
- *
- *         Purpose:  hosted synapse classes (those having their
- *                   state vars on parent model's integration vectors)
- *
- *         License:  GPL-2+
- */
-
-#ifndef CNRUN_LIBCN_HOSTEDSYNAPSES_H_
-#define CNRUN_LIBCN_HOSTEDSYNAPSES_H_
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-#include <vector>
-#include <queue>
-#include <cfloat>
-
-#include "base-synapse.hh"
-#include "hosted-attr.hh"
-#include "mx-attr.hh"
-#include "hosted-neurons.hh"
-#include "standalone-neurons.hh"
-
-
-using namespace std;
-
-namespace cnrun {
-
-class C_HostedSynapse
-  : public C_BaseSynapse, public C_HostedAttributes {
-
-        DELETE_DEFAULT_METHODS (C_HostedSynapse)
-
-    protected:
-        C_HostedSynapse (TUnitType type_,
-                         C_BaseNeuron *source_, C_BaseNeuron *target_,
-                         double g_, CModel*, int s_mask = 0,
-                         TIncludeOption = TIncludeOption::is_last);
-    public:
-        void reset_vars();
-        double &var_value( size_t);
-        const double &get_var_value( size_t) const;
-
-        double   S() const; // needs access to parent model var vector, defined in model.h
-        double   S( vector<double> &b) const        { return b[idx+0]; }
-        double& dS( vector<double> &b) const        { return b[idx+0]; }
-};
-
-
-
-
-// Note on synapses classification per source/target being a tonic
-// (rate) or phasic (discrete) unit:
-//
-// * Where a synapse connects _to_ a Rate neuron, it will have Ediff
-//   in lieu of Esyn and compute Isyn accordingly, otherwise inheriting
-//   its parameters.
-//
-// * Where a synapse connects _from_ a Rate unit, its derivative
-//   method follows a completely different equation.  It now has a
-//   different set of parameters, too.
-
-// The suffix in a class name, _xy, means x, source, y, target, with
-// `d' for discrete, `r' for rate.
-
-
-// The `multiplexing' part has a relevance for the source of the
-// synapse, with magic to collect and multiplex more than a single
-// spike per dt.
-//
-// * The source is a specialized (`dot'), and inherently phasic, unit.
-// * All parameters are inherited from the base class.
-
-
-// Alpha-Beta family
-
-class CSynapseAB_dd
-  : public C_HostedSynapse {
-
-        DELETE_DEFAULT_METHODS (CSynapseAB_dd)
-
-    public:
-        CSynapseAB_dd (C_BaseNeuron *insource, C_BaseNeuron *intarget,
-                       double ing, CModel *inM, int s_mask = 0,
-                       TIncludeOption include_option = TIncludeOption::is_last,
-                       TUnitType alt_type = YT_AB_DD)
-              : C_HostedSynapse (alt_type, insource, intarget,
-                                 ing, inM, s_mask, include_option)
-                {}
-
-        enum {
-                _Esyn_, _Epre_, _alpha_, _beta_, _rtime_
-        };
-
-        double Isyn( const C_BaseNeuron &with_neuron, double g) const  __attribute__ ((hot))
-                {
-                        return -g * S() * (with_neuron.E() - P[_Esyn_]);
-//                        return -P[_gsyn_] * S() * (_target->E() - P[_Esyn_]);
-                }
-        double Isyn( vector<double>& b, const C_BaseNeuron &with_neuron, double g) const  __attribute__ ((hot))
-                {
-                        return -g * S(b) * (with_neuron.E(b) - P[_Esyn_]);
-//                        return -P[_gsyn_] * S(b) * (_target->E(b) - P[_Esyn_]);
-                }
-
-        void derivative( vector<double>&, vector<double>&)  __attribute__ ((hot));
-};
-
-
-class CNeuronHHRate;
-
-// TODO
-class CSynapseAB_dr;
-class CSynapseAB_rd;
-
-
-class CSynapseAB_rr
-  : public C_HostedSynapse {
-
-        DELETE_DEFAULT_METHODS (CSynapseAB_rr)
-
-    public:
-        CSynapseAB_rr (C_BaseNeuron *insource, C_BaseNeuron *intarget,
-                       double ing, CModel *inM, int s_mask = 0,
-                       TIncludeOption include_option = TIncludeOption::is_last,
-                       TUnitType alt_type = YT_AB_RR)
-              : C_HostedSynapse( alt_type, insource, intarget,
-                                 ing, inM, s_mask, include_option)
-                {}
-
-        enum {
-                _Ediff_, _alpha_, _beta_, _T_, _sigma_
-        };
-
-      // supply own Isyn to avoid referencing target->E
-        double Isyn( const C_BaseNeuron &with_neuron, double g) const
-                {
-                        return -g * S() * P[_Ediff_];
-                }
-        double Isyn( vector<double>& x, const C_BaseNeuron &with_neuron, double g) const
-                {
-                        return -g * S(x) * P[_Ediff_];
-                }
-
-        void derivative( vector<double>&, vector<double>&);
-
-        void param_changed_hook()
-                {
-                        _numerator = exp( P[_beta_] * P[_T_]) + 1;
-                }
-    private:
-        double  _numerator;
-};
-
-
-
-
-
-
-
-
-class CSynapseMxAB_dd
-  : public CSynapseAB_dd, public C_MultiplexingAttributes {
-
-        DELETE_DEFAULT_METHODS (CSynapseMxAB_dd)
-
-    public:
-        CSynapseMxAB_dd (C_BaseNeuron *insource, C_BaseNeuron *intarget,
-                         double ing, CModel *inM, int s_mask = 0,
-                         TIncludeOption include_option = TIncludeOption::is_last,
-                         TUnitType alt_type = YT_MXAB_DD)
-              : CSynapseAB_dd (insource, intarget,
-                               ing, inM, s_mask, include_option,
-                               alt_type)
-                {}
-
-        void reset_state()
-                {
-                        C_HostedSynapse::reset_state();
-                        C_MultiplexingAttributes::reset();
-                }
-
-      // because Mx*'s synapse source is always a standalone, non-integratable neuron,
-      // which don't propagate vars onto M->V, we fold S(x) to make the actual S value available
-      // from within the integrator
-        double S() const                        { return C_HostedSynapse::S(); }
-        double S( vector<double> &unused) const { return C_HostedSynapse::S(); }
-
-        void derivative( vector<double>&, vector<double>&)  __attribute__ ((hot));
-
-    private:
-        friend class CModel;
-        void update_queue();
-};
-
-
-
-
-
-class CSynapseMxAB_dr
-  : public CSynapseMxAB_dd {
-
-        DELETE_DEFAULT_METHODS (CSynapseMxAB_dr)
-
-    public:
-        CSynapseMxAB_dr (C_BaseNeuron *insource, C_BaseNeuron *intarget,
-                         double ing, CModel *inM, int s_mask = 0,
-                         TIncludeOption include_option = TIncludeOption::is_last)
-              : CSynapseMxAB_dd (insource, intarget,
-                                 ing, inM, s_mask, include_option,
-                                 YT_MXAB_DR)
-                {}
-
-        enum { _Ediff_, /* ... */ };
-        double Isyn( const C_BaseNeuron &with_neuron, double g) const
-                {
-                        return -g * S() * P[_Ediff_];
-                }
-        double Isyn( vector<double>& unused, const C_BaseNeuron &with_neuron, double g) const
-                {
-                        return -g * S() * P[_Ediff_];
-                }
-};
-
-
-
-
-
-
-
-
-
-class CSynapseABMinus_dd
-  : public CSynapseAB_dd {
-
-        DELETE_DEFAULT_METHODS (CSynapseABMinus_dd)
-
-    public:
-        CSynapseABMinus_dd (C_BaseNeuron *insource, C_BaseNeuron *intarget,
-                            double ing, CModel *inM, int s_mask = 0,
-                            TIncludeOption include_option = TIncludeOption::is_last)
-              : CSynapseAB_dd (insource, intarget,
-                               ing, inM, s_mask, include_option,
-                               YT_ABMINUS_DD)
-                {}
-
-        enum {
-                _Esyn_, _Epre_, _alpha_, _beta_, _rtime_, _sigma_
-        };
-
-        void derivative( vector<double>&, vector<double>&);
-};
-
-
-// TODO
-class CSynapseABMinus_dr;
-class CSynapseABMinus_rd;
-class CSynapseABMinus_rr;
-
-
-
-
-// Rall
-
-class CSynapseRall_dd
-  : public C_HostedSynapse {
-
-        DELETE_DEFAULT_METHODS (CSynapseRall_dd)
-
-    public:
-        CSynapseRall_dd (C_BaseNeuron *insource, C_BaseNeuron *intarget,
-                         double ing, CModel *inM, int s_mask = 0,
-                         TIncludeOption include_option = TIncludeOption::is_last)
-              : C_HostedSynapse (YT_RALL_DD, insource, intarget,
-                                 ing, inM, s_mask, include_option)
-                {}
-
-        double&  R( vector<double>& b)        { return b[idx+1]; }
-        double& dR( vector<double>& b)        { return b[idx+1]; }
-
-        enum {
-                _Esyn_, _Epre_, _tau_, _sigma_
-        };
-
-        double Isyn( const C_BaseNeuron &with_neuron, double g) const
-                {
-                        return -g * S() * (with_neuron.E() - P[_Esyn_]);
-                }
-        double Isyn( vector<double>&b, const C_BaseNeuron &with_neuron, double g) const
-                {
-                        return -g * S(b) * (with_neuron.E(b) - P[_Esyn_]);
-                }
-
-        void derivative( vector<double>&, vector<double>&);
-};
-
-// TODO
-class CSynapseRall_rd;
-class CSynapseRall_dr;
-class CSynapseRall_rr;
-
-}
-
-#endif
-
-// Local Variables:
-// Mode: c++
-// indent-tabs-mode: nil
-// tab-width: 8
-// c-basic-offset: 8
-// End:
diff --git a/upstream/src/libcnrun/integrate-base.hh b/upstream/src/libcnrun/integrate-base.hh
deleted file mode 100644 (file)
index e4d6399..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- *       File name:  libcn/integrate-base.hh
- *         Project:  cnrun
- *          Author:  Andrei Zavada <johnhommer@gmail.com>
- *                   building on original work by Thomas Nowotny
- * Initial version:  2008-09-23
- *
- *         Purpose:  base class for integrators, to be plugged into CModel.
- *
- *         License:  GPL-2+
- */
-
-#ifndef CNRUN_LIBCN_INTEGRATE_BASE_H_
-#define CNRUN_LIBCN_INTEGRATE_BASE_H_
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-#include "libstilton/lang.hh"
-#include "forward-decls.hh"
-
-
-namespace cnrun {
-
-class CIntegrate_base {
-
-        DELETE_DEFAULT_METHODS (CIntegrate_base)
-
-    public:
-        double  _dt_min, _dt_max, _dt_cap,
-                _eps, _eps_abs, _eps_rel,
-                dt;  // that which is current
-
-        bool    is_owned;
-
-        CModel *model;
-
-        CIntegrate_base (const double& dt_min, const double& dt_max, const double& dt_cap,
-                         const double& eps, const double& eps_abs, const double& eps_rel,
-                         bool inis_owned)
-              : _dt_min (dt_min), _dt_max (dt_max), _dt_cap (dt_cap),
-                _eps (eps), _eps_abs (eps_abs), _eps_rel (eps_rel),
-                dt (dt_min),
-                is_owned (inis_owned)
-                {}
-        virtual ~CIntegrate_base()
-                {}
-
-        virtual void cycle() = 0;
-        virtual void fixate() = 0;
-        virtual void prepare() = 0;
-};
-
-}
-
-#endif
-
-// Local Variables:
-// Mode: c++
-// indent-tabs-mode: nil
-// tab-width: 8
-// c-basic-offset: 8
-// End:
diff --git a/upstream/src/libcnrun/integrate-rk65.hh b/upstream/src/libcnrun/integrate-rk65.hh
deleted file mode 100644 (file)
index ae1d033..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- *       File name:  libcn/integrate-rk65.hh
- *         Project:  cnrun
- *          Author:  Andrei Zavada <johnhommer@gmail.com>
- *                   building on original work by Thomas Nowotny
- * Initial version:  2008-09-23
- *
- *         Purpose:  A Runge-Kutta 6-5 integrator.
- *
- *         License:  GPL-2+
- */
-
-#ifndef CNRUN_LIBCN_INTEGRATERK65_H_
-#define CNRUN_LIBCN_INTEGRATERK65_H_
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-#include <vector>
-#include "libstilton/lang.hh"
-#include "forward-decls.hh"
-#include "integrate-base.hh"
-
-using namespace std;
-
-namespace cnrun {
-
-class CIntegrateRK65
-  : public CIntegrate_base {
-
-        DELETE_DEFAULT_METHODS (CIntegrateRK65)
-
-    public:
-        CIntegrateRK65 (double dt_min_ = 1e-6, double dt_max_ = .5, double dt_cap_ = 5,
-                        double eps_ = 1e-8,  double eps_abs_ = 1e-12, double eps_rel_ = 1e-6,
-                        bool is_owned_ = true)
-              : CIntegrate_base (dt_min_, dt_max_, dt_cap_,
-                                 eps_, eps_abs_, eps_rel_, is_owned_)
-                {}
-
-        void cycle() __attribute__ ((hot));
-        void fixate() __attribute__ ((hot));
-        void prepare();
-
-    private:
-        vector<double> Y[9], F[9], y5;
-};
-
-}
-
-#endif
-
-// Local Variables:
-// Mode: c++
-// indent-tabs-mode: nil
-// tab-width: 8
-// c-basic-offset: 8
-// End:
diff --git a/upstream/src/libcnrun/integrator/base.hh b/upstream/src/libcnrun/integrator/base.hh
new file mode 100644 (file)
index 0000000..1fe3696
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ *       File name:  libcnrun/integrator/base.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer@gmail.com>
+ *                   building on original work by Thomas Nowotny
+ * Initial version:  2008-09-23
+ *
+ *         Purpose:  base class for integrators, to be plugged into CModel.
+ *
+ *         License:  GPL-2+
+ */
+
+#ifndef CNRUN_LIBCNRUN_INTEGRATOR_BASE_H_
+#define CNRUN_LIBCNRUN_INTEGRATOR_BASE_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include "libstilton/lang.hh"
+#include "model/forward-decls.hh"
+
+
+namespace cnrun {
+
+class CIntegrate_base {
+
+        DELETE_DEFAULT_METHODS (CIntegrate_base)
+
+    public:
+        double  _dt_min, _dt_max, _dt_cap,
+                _eps, _eps_abs, _eps_rel,
+                dt;  // that which is current
+
+        bool    is_owned;
+
+        CModel *model;
+
+        CIntegrate_base (const double& dt_min, const double& dt_max, const double& dt_cap,
+                         const double& eps, const double& eps_abs, const double& eps_rel,
+                         bool inis_owned)
+              : _dt_min (dt_min), _dt_max (dt_max), _dt_cap (dt_cap),
+                _eps (eps), _eps_abs (eps_abs), _eps_rel (eps_rel),
+                dt (dt_min),
+                is_owned (inis_owned)
+                {}
+        virtual ~CIntegrate_base()
+                {}
+
+        virtual void cycle() = 0;
+        virtual void fixate() = 0;
+        virtual void prepare() = 0;
+};
+
+}
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/integrator/rk65.hh b/upstream/src/libcnrun/integrator/rk65.hh
new file mode 100644 (file)
index 0000000..c1d2310
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ *       File name:  libcnrun/integrator/rk65.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer@gmail.com>
+ *                   building on original work by Thomas Nowotny
+ * Initial version:  2008-09-23
+ *
+ *         Purpose:  A Runge-Kutta 6-5 integrator.
+ *
+ *         License:  GPL-2+
+ */
+
+#ifndef CNRUN_LIBCNRUN_INTEGRATOR_RK65_H_
+#define CNRUN_LIBCNRUN_INTEGRATOR_RK65_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <vector>
+#include "libstilton/lang.hh"
+#include "base.hh"
+
+using namespace std;
+
+namespace cnrun {
+
+class CIntegrateRK65
+  : public CIntegrate_base {
+
+        DELETE_DEFAULT_METHODS (CIntegrateRK65)
+
+    public:
+        CIntegrateRK65 (double dt_min_ = 1e-6, double dt_max_ = .5, double dt_cap_ = 5,
+                        double eps_ = 1e-8,  double eps_abs_ = 1e-12, double eps_rel_ = 1e-6,
+                        bool is_owned_ = true)
+              : CIntegrate_base (dt_min_, dt_max_, dt_cap_,
+                                 eps_, eps_abs_, eps_rel_, is_owned_)
+                {}
+
+        void cycle() __attribute__ ((hot));
+        void fixate() __attribute__ ((hot));
+        void prepare();
+
+    private:
+        vector<double> Y[9], F[9], y5;
+};
+
+}
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/model-cycle.cc b/upstream/src/libcnrun/model-cycle.cc
deleted file mode 100644 (file)
index 5610f48..0000000
+++ /dev/null
@@ -1,561 +0,0 @@
-/*
- *       File name:  cnrun/model-cycle.cc
- *         Project:  cnrun
- *          Author:  Andrei Zavada <johnhommer@gmail.com>
- * Initial version:  2008-08-02
- *
- *         Purpose:  CModel top cycle
- *
- *         License:  GPL-2+
- */
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-#include <ctime>
-#include <cstdlib>
-#include <iostream>
-
-#include "libstilton/lang.hh"
-#include "integrate-rk65.hh"
-#include "model.hh"
-
-
-using namespace std;
-
-
-/*--------------------------------------------------------------------------
-  Implementation of a 6-5 Runge-Kutta method with adaptive time step
-  mostly taken from the book "The numerical analysis of ordinary differential
-  equations - Runge-Kutta and general linear methods" by J.C. Butcher, Wiley,
-  Chichester, 1987 and a free adaptation to a 6 order Runge Kutta method
-  of an ODE system with additive white noise
---------------------------------------------------------------------------*/
-
-inline namespace {
-
-double __Butchers_a[9][8] = {
-        { },
-        { 1./9 },
-        { .5/9,        .5/9 },
-        { 0.416666666666667e-1,        0., 0.125 },
-        { 1./6, 0., -0.5, 2./3 },
-        { 0.1875e+1, 0., -0.7875e+1, 0.7e+1, -0.5 },
-        { -0.4227272727272727e+1, 0., 0.176995738636364e+2, -0.142883522727273e+2, 0.522017045454545, 0.104403409090909e+1 },
-        { 0.840622673179752e+1, 0., -0.337303717185049e+2, 0.271460231129622e+2, 0.342046929709216, -0.184653767923258e+1, 0.577349465373733 },
-        { 0.128104575163399, 0., 0., -0.108433734939759, 0.669375, -0.146666666666667, 0.284444444444444, 0.173176381998583 },
-};
-
-
-double __Butchers_b[9] = {
-        0.567119155354449e-1,
-        0.,
-        0.,
-        0.210909572355356,
-        0.141490384615385,
-        0.202051282051282,
-        0.253186813186813,
-        0.843679809736684e-1,
-        0.512820512820513e-1
-};
-} // inline namespace
-
-
-
-void
-cnrun::CIntegrateRK65::
-prepare()
-{
-        for ( unsigned short i = 0; i < 9; ++i ) {
-                Y[i].resize( model->_var_cnt);
-                F[i].resize( model->_var_cnt);
-        }
-        y5.resize( model->_var_cnt);
-
-        if ( model->n_standalone_units() > 0 )
-                if ( _dt_max > model->_discrete_dt ) {
-                        _dt_max = model->_discrete_dt;
-                        model->vp( 1, "CIntegrateRK65: Set dt_max to model->discrete_dt: %g\n", _dt_max);
-                }
-}
-
-
-void
-__attribute__ ((hot))
-cnrun::CIntegrateRK65::
-cycle()
-{
-      // omp stuff found inapplicable due to considerable overhead in sys time
-      // (thread creation)
-        unsigned int i, j, k;
-
-        double  aF;
-
-      // calculate iterative terms rk65_Y[__i] and rk65_F[__i] (to sixth order)
-        for ( i = 0; i < 9; ++i ) {
-//#pragma omp parallel for schedule(static,model->_var_cnt/2+1) firstprivate(aF,j,i)
-                for ( k = 0; k < model->_var_cnt; ++k ) {
-                        aF = 0.0;
-                        for ( j = 0; j < i; ++j )
-                                aF += __Butchers_a[i][j] * F[j][k];
-                        Y[i][k] = model->V[k] + dt * aF;
-                }
-              // see to this vector's dt
-                F[i][0] = 1.;
-
-//#pragma omp consider...
-                for ( auto& N : model->hosted_neurons )
-                        N -> derivative( Y[i], F[i]);
-                for ( auto& S : model->hosted_synapses )
-                        S -> derivative( Y[i], F[i]);
-        }
-
-      // sum up Y[i] and F[i] to build 5th order scheme -> y5
-//#pragma omp parallel for private(aF,j)
-        for ( k = 0; k < model->_var_cnt; ++k ) {
-                aF = 0.0;
-                for ( j = 0; j < 8; ++j )
-                        aF += __Butchers_a[8][j] * F[j][k];
-                y5[k] = model->V[k] + dt * aF;
-        }
-
-      // sum up Y[i] and F[i] to build 6th order scheme -> W
-//#pragma omp parallel for schedule(static,model->_var_cnt/2+1) private(aF,j)
-        for ( k = 0; k < model->_var_cnt; ++k ) {
-                aF = 0.0;
-                for ( j = 0; j < 9; ++j )
-                        aF += __Butchers_b[j] * F[j][k];
-                model->W[k] = model->V[k] + dt * aF;
-        }
-
-      // kinkiness in synapses causes dt to rocket
-        double  dtx = min( _dt_max, dt * _dt_cap);
-
-      // determine minimal necessary new dt to get error < eps based on the
-      // difference between results in y5 and W
-        double try_eps, delta, try_dt;
-      // exclude time (at index 0)
-//#pragma omp parallel for private(try_eps,delta,try_dtj)
-        for ( k = 1; k < model->_var_cnt; ++k ) {
-                try_eps = max( _eps_abs, min (_eps, abs(_eps_rel * model->W[k])));
-                delta = abs( model->W[k] - y5[k]);
-                if ( delta > DBL_EPSILON * y5[k] ) {
-                        try_dt = exp( (log(try_eps) - log(delta)) / 6) * dt;
-                        if ( try_dt < dtx )
-                                dtx = try_dt;
-                }
-        }
-      // make sure we don't grind to a halt
-        if ( dtx < _dt_min )
-                dtx = _dt_min;
-
-      // set the new step
-        dt = dtx;
-}
-
-
-
-
-
-
-
-
-// -------------- CModel::advance and dependents
-
-unsigned int
-cnrun::CModel::
-advance( const double dist, double * const cpu_time_used_p)
-{
-        if ( units.size() == 0 ) {
-                vp( 1, "Model is empty\n");
-                return 0;
-        }
-        if ( is_ready )
-                prepare_advance();
-
-        bool    have_hosted_units = !!n_hosted_units(),
-                have_standalone_units = !!n_standalone_units(),
-                have_ddtbound_units = !!n_ddtbound_units();
-
-        if ( have_hosted_units && !have_standalone_units && !have_ddtbound_units )
-                return _do_advance_on_pure_hosted( dist, cpu_time_used_p);
-        if ( !have_hosted_units && have_standalone_units && !have_ddtbound_units )
-                return _do_advance_on_pure_standalone( dist, cpu_time_used_p);
-        if ( !have_hosted_units && !have_standalone_units && have_ddtbound_units )
-                return _do_advance_on_pure_ddtbound( dist, cpu_time_used_p);
-
-        unsigned int retval = _do_advance_on_mixed( dist, cpu_time_used_p);
-        return retval;
-}
-
-void
-__attribute__ ((hot))
-cnrun::CModel::
-_setup_schedulers()
-{
-        regular_periods.clear();
-        regular_periods_last_checked.clear();
-        if ( units_with_periodic_sources.size() ) { // determine period(s) at which to wake up reader update loop
-                for ( auto& U : units_with_periodic_sources )
-                        for ( auto& S : U -> _sources )
-                                regular_periods.push_back(
-                                        (reinterpret_cast<CSourcePeriodic*>(S.source)) -> period());
-                regular_periods.unique();
-                regular_periods.sort();
-                regular_periods_last_checked.resize( regular_periods.size());
-        }
-
-        if ( regular_periods.size() > 0 )
-                vp( 2, "%zd timepoint(s) in scheduler_update_periods: %s\n\n",
-                    regular_periods.size(),
-                    stilton::str::join( regular_periods).c_str());
-
-      // ensure all schedulers are effective at the beginning, too
-        for ( auto& U : units_with_periodic_sources )
-                U->apprise_from_sources();
-}
-
-
-void
-cnrun::CModel::
-prepare_advance()
-{
-        if ( options.log_dt && !_dt_logger )
-                _dt_logger = new ofstream( string(name + ".dt").c_str());
-        if ( options.log_spikers && !_spike_logger )
-                _spike_logger = new ofstream( string(name + ".spikes").c_str());
-
-        _setup_schedulers();
-
-        if ( !n_hosted_units() )
-                _integrator->dt = _discrete_dt;
-
-        is_ready = true;
-
-        vp( 5, stderr, "Model prepared\n");
-}
-
-
-
-// comment concerning for_all_conscious_neurons loop:
-// these have no next_time_E or suchlike, have `fixate' implicit herein; also,
-// conscious neurons fire irrespective of whatever happens elsewhere in the model, and
-// they, logically, have no inputs
-
-#define _DO_ADVANCE_COMMON_INLOOP_BEGIN \
-        make_units_with_continuous_sources_apprise_from_sources();      \
-        {                                                               \
-                auto I = regular_periods.begin();                       \
-                auto Ic = regular_periods_last_checked.begin();         \
-                for ( ; I != regular_periods.end(); ++I, ++Ic )         \
-                        if ( unlikely (model_time() >= *I * (*Ic + 1)) ) { \
-                                (*Ic)++;                                \
-                                make_units_with_periodic_sources_apprise_from_sources(); \
-                        }                                               \
-        }                                                               \
-        make_conscious_neurons_possibly_fire();                         \
-                                                                        \
-        for ( auto& Y : multiplexing_synapses ) \
-                if ( Y->_source )                                        \
-                        Y->update_queue();
-
-
-#define _DO_ADVANCE_COMMON_INLOOP_MID \
-        if ( have_listeners ) {                                         \
-                if ( have_discrete_listen_dt ) {                        \
-                        if ( model_time() - last_made_listen >= options.listen_dt ) { \
-                                make_listening_units_tell();            \
-                                last_made_listen += options.listen_dt;  \
-                        }                                               \
-                } else                                                  \
-                        make_listening_units_tell();                    \
-        }                                                               \
-        if ( unlikely (options.log_dt) )                                \
-                (*_dt_logger) << model_time() << "\t" << dt() << endl;  \
-                                                                        \
-        for ( auto& N : spikelogging_neurons ) {                        \
-                N -> do_detect_spike_or_whatever();                     \
-                if ( !is_diskless &&                                    \
-                     N->n_spikes_in_last_dt() &&                        \
-                     options.log_spikers ) {                            \
-                        (*_spike_logger) << model_time() << "\t";       \
-                        if ( options.log_spikers_use_serial_id )        \
-                                (*_spike_logger) << N->_serial_id << endl; \
-                        else                                            \
-                                (*_spike_logger) << N->_label << endl;  \
-                }                                                       \
-        }
-
-
-#define _DO_ADVANCE_COMMON_INLOOP_END \
-        ++_cycle;                                                       \
-        ++steps;                                                        \
-        if ( options.verbosely != 0 ) {                                 \
-                if ( unlikely (((double)(clock() - cpu_time_lastchecked)) / CLOCKS_PER_SEC > 2) ) { \
-                        cpu_time_lastchecked = clock();                 \
-                        if ( options.display_progress_percent && !options.display_progress_time ) \
-                                fprintf( stderr, "\r\033[%dC%4.1f%%\r", \
-                                         (options.verbosely < 0) ? -(options.verbosely+1)*8 : 0, \
-                                         100 - (model_time() - time_ending) / (time_started - time_ending) * 100); \
-                        else if ( options.display_progress_time && !options.display_progress_percent ) \
-                                fprintf( stderr, "\r\033[%dC%'6.0fms\r", \
-                                         (options.verbosely < 0) ? -(options.verbosely+1)*16 : 0, \
-                                         model_time());                 \
-                        else if ( options.display_progress_percent && options.display_progress_time ) \
-                                fprintf( stderr, "\r\033[%dC%'6.0fms %4.1f%%\r", \
-                                         (options.verbosely < 0) ? -(options.verbosely+1)*24 : 0, \
-                                         model_time(),                  \
-                                         100 - (model_time() - time_ending) / (time_started - time_ending) * 100); \
-                        fflush( stderr);                                \
-                }                                                       \
-        }
-
-
-#define _DO_ADVANCE_COMMON_EPILOG \
-        make_spikeloggers_sync_history();                               \
-        cpu_time_ended = clock();                                        \
-        double cpu_time_taken_seconds = ((double) (cpu_time_ended - cpu_time_started)) / CLOCKS_PER_SEC; \
-        if ( cpu_time_used_p )                                                \
-                *cpu_time_used_p = cpu_time_taken_seconds;                \
-        if ( options.verbosely > 0 || options.verbosely <= -1 ) {                        \
-                fprintf( stderr, "\r\033[K");                                \
-                fflush( stderr);                                        \
-        }                                                                \
-        vp( 0, "@%.1fmsec (+%.1f in %lu cycles in %.2f sec CPU time:"   \
-            " avg %.3g \302\265s/cyc, ratio to CPU time %.2g)\n\n",     \
-            model_time(), dist, steps, cpu_time_taken_seconds,          \
-            model_time()/_cycle * 1e3, model_time() / cpu_time_taken_seconds / 1e3);
-
-
-
-
-
-unsigned int
-__attribute__ ((hot))
-cnrun::CModel::
-_do_advance_on_pure_hosted( const double dist, double * const cpu_time_used_p)
-{
-        bool    have_listeners = (listening_units.size() > 0),
-                have_discrete_listen_dt = (options.listen_dt > 0.);
-
-        clock_t cpu_time_started = clock(),
-                cpu_time_ended,
-                cpu_time_lastchecked = cpu_time_started;
-
-        double  time_started = model_time(),
-                time_ending = time_started + dist,
-                last_made_listen = time_started;
-
-        unsigned long steps = 0;
-        do {
-                _DO_ADVANCE_COMMON_INLOOP_BEGIN
-
-                _integrator->cycle();
-
-                _DO_ADVANCE_COMMON_INLOOP_MID
-
-              // fixate
-                _integrator->fixate();
-
-                _DO_ADVANCE_COMMON_INLOOP_END
-
-              // model_time is advanced implicitly in _integrator->cycle()
-        } while ( model_time() < time_ending );
-
-        _DO_ADVANCE_COMMON_EPILOG
-
-        return steps;
-}
-
-
-
-unsigned int
-__attribute__ ((hot))
-cnrun::CModel::
-_do_advance_on_pure_standalone( const double dist, double * const cpu_time_used_p)
-{
-        bool    have_listeners = !!listening_units.size(),
-                have_discrete_listen_dt = (options.listen_dt > 0.);
-
-        clock_t cpu_time_started = clock(),
-                cpu_time_ended,
-                cpu_time_lastchecked = cpu_time_started;
-
-        double  time_started = model_time(),
-                time_ending = time_started + dist,
-                last_made_listen = time_started;
-
-        unsigned long steps = 0;
-        do {
-                _DO_ADVANCE_COMMON_INLOOP_BEGIN
-
-              // service simple units w/out any vars on the integration vector V
-                for ( auto& N : standalone_neurons )
-                        if ( !N->is_conscious() )
-                                N -> preadvance();
-                for ( auto& Y : standalone_synapses )
-                        Y -> preadvance();
-
-              // even in the case of n_hosted_{neurons,units} == 0, we would need _integrator->cycle() to advance V[0],
-              // which is our model_time(); which is kind of expensive, so here's a shortcut
-                V[0] += _discrete_dt;
-                // _discrete_time += _discrete_dt;  // not necessary
-
-                _DO_ADVANCE_COMMON_INLOOP_MID
-
-              // fixate
-                for ( auto& N : standalone_neurons )
-                        if ( !N->is_conscious() )
-                                N -> fixate();
-                for ( auto& Y : standalone_synapses )
-                        Y -> fixate();
-
-                _DO_ADVANCE_COMMON_INLOOP_END
-
-        } while ( model_time() < time_ending );
-
-        _DO_ADVANCE_COMMON_EPILOG
-
-        return steps;
-}
-
-
-
-
-
-
-
-unsigned int
-__attribute__ ((hot))
-cnrun::CModel::
-_do_advance_on_pure_ddtbound( const double dist, double * const cpu_time_used_p)
-{
-        bool    have_listeners = (listening_units.size() > 0),
-                have_discrete_listen_dt = (options.listen_dt > 0.);
-
-        clock_t cpu_time_started = clock(),
-                cpu_time_ended,
-                cpu_time_lastchecked = cpu_time_started;
-
-        double  time_started = model_time(),
-                time_ending = time_started + dist,
-                last_made_listen = time_started;
-
-        unsigned long steps = 0;
-        do {
-                _DO_ADVANCE_COMMON_INLOOP_BEGIN
-
-              // lastly, service units only serviceable at discrete dt
-                for ( auto& N : ddtbound_neurons )
-                        if ( !N->is_conscious() )
-                                N -> preadvance();
-                for ( auto& Y : ddtbound_synapses )
-                        Y -> preadvance();
-
-                V[0] += _discrete_dt;
-                _discrete_time += _discrete_dt;
-
-                _DO_ADVANCE_COMMON_INLOOP_MID
-
-              // fixate
-                for ( auto& N : ddtbound_neurons )
-                        if ( !N->is_conscious() )
-                                N -> fixate();
-                for ( auto& Y : ddtbound_synapses )
-                        Y -> fixate();
-
-                _DO_ADVANCE_COMMON_INLOOP_END
-
-        } while ( model_time() < time_ending );
-
-        _DO_ADVANCE_COMMON_EPILOG
-
-        return steps;
-}
-
-
-
-
-
-unsigned int
-__attribute__ ((hot))
-cnrun::CModel::
-_do_advance_on_mixed( const double dist, double * const cpu_time_used_p)
-{
-        bool    have_hosted_units = !!n_hosted_units(),
-                have_listeners = !!listening_units.size(),
-                have_discrete_listen_dt = (options.listen_dt > 0.),
-                need_fixate_ddtbound_units;
-
-        clock_t cpu_time_started = clock(),
-                cpu_time_ended,
-                cpu_time_lastchecked = cpu_time_started;
-
-        double  time_started = model_time(),
-                time_ending = time_started + dist,
-                last_made_listen = time_started;
-
-        unsigned long steps = 0;
-        do {
-                _DO_ADVANCE_COMMON_INLOOP_BEGIN
-
-                _integrator->cycle();
-
-              // service simple units w/out any vars on the integration vector V
-                for ( auto& N : standalone_neurons )
-                        if ( !N->is_conscious() )
-                                N -> preadvance();
-                for ( auto& Y : standalone_synapses )
-                        Y -> preadvance();
-
-              // lastly, service units only serviceable at discrete dt
-                if ( this->have_ddtb_units && model_time() >= _discrete_time ) {
-                        for ( auto& N : ddtbound_neurons )
-                                if ( !N->is_conscious() )
-                                        N -> preadvance();
-                        for ( auto& Y : ddtbound_synapses )
-                                Y -> preadvance();
-
-                        _discrete_time += _discrete_dt;
-                        need_fixate_ddtbound_units = true;
-                } else
-                        need_fixate_ddtbound_units = false;
-
-                if ( !have_hosted_units )
-                        V[0] += _discrete_dt;
-
-                _DO_ADVANCE_COMMON_INLOOP_MID
-
-              // fixate
-                _integrator->fixate();
-
-                for ( auto& N : standalone_neurons )
-                        if ( !N->is_conscious() )
-                                N -> fixate();
-                for ( auto& Y : standalone_synapses )
-                        Y -> fixate();
-
-                if ( need_fixate_ddtbound_units ) {
-                        for ( auto& N : ddtbound_neurons )
-                                if ( !N->is_conscious() )
-                                        N -> fixate();
-                        for ( auto& Y : ddtbound_synapses )
-                                Y -> fixate();
-                }
-
-                _DO_ADVANCE_COMMON_INLOOP_END
-
-        } while ( model_time() < time_ending );
-
-        _DO_ADVANCE_COMMON_EPILOG
-
-        return steps;
-}
-
-// Local Variables:
-// Mode: c++
-// indent-tabs-mode: nil
-// tab-width: 8
-// c-basic-offset: 8
-// End:
diff --git a/upstream/src/libcnrun/model-nmlio.cc b/upstream/src/libcnrun/model-nmlio.cc
deleted file mode 100644 (file)
index 2286c7b..0000000
+++ /dev/null
@@ -1,495 +0,0 @@
-/*
- *       File name:  libcn/model-nmlio.cc
- *         Project:  cnrun
- *          Author:  Andrei Zavada <johnhommer@gmail.com>
- * Initial version:  2008-09-02
- *
- *         Purpose:  NeuroML import/export.
- *
- *         License:  GPL-2+
- */
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-#include <string>
-#include <iostream>
-#include <regex.h>
-
-#include "forward-decls.hh"
-#include "model.hh"
-
-
-using namespace std;
-
-#ifdef LIBXML_READER_ENABLED
-
-
-int
-cnrun::CModel::
-import_NetworkML( const string& fname, TNMLImportOption import_option)
-{
-        // LIBXML_TEST_VERSION;
-
-        xmlDoc *doc = xmlReadFile( fname.c_str(), nullptr, 0);
-        if ( !doc )
-                return TNMLIOResult::nofile;
-
-        int retval = import_NetworkML( doc, fname, import_option);
-
-        xmlFreeDoc( doc);
-
-        return retval;
-}
-
-
-
-
-
-inline namespace {
-
-xmlNode*
-find_named_root_child_elem( xmlNode *node,     // node to start search from
-                            const char *elem)  // name of the element searched for
-{
-        xmlNode *n;
-        for ( n = node->children; n; n = n->next ) {
-                if ( n->type == XML_ELEMENT_NODE ) {
-                        if ( xmlStrEqual( n->name, BAD_CAST elem) )
-                                return n;
-// the <populations> and <projections> nodes are expected to appear as
-// direct children of the root node; so don't go search deeper than that
-
-//                        if ( n->children ) { // go search deeper
-//                                ni = find_named_elem( n->children, elem);
-//                                if ( ni )
-//                                        return ni;
-//                        }
-                }
-        }
-        return nullptr;
-}
-
-}
-
-int
-cnrun::CModel::
-import_NetworkML( xmlDoc *doc, const string& fname,
-                  TNMLImportOption import_option)
-{
-        int retval = 0;
-
-        // we pass up on validation (for which we would need to keep a
-        // .dtd or Schema at hand), and proceed to extracting elements
-
-        xmlNode *root_node = xmlDocGetRootElement( doc),
-                *n;
-
-      // read meta:notes and make out a name for the model
-        if ( !root_node ) {
-                vp( 0, stderr, "import_NetworkML(\"%s\"): No root element\n", fname.c_str());
-                retval = TNMLIOResult::noelem;
-                goto out;
-        }
-
-      // give it a name: assume it's generated by neuroConstruct for now
-        if ( import_option == TNMLImportOption::reset ) {
-                reset();
-                if ( !(n = find_named_root_child_elem( root_node, "notes")) ) {
-                        vp( 1, stderr, "<notes> element not found; model will be unnamed\n");
-                        // this is not critical, so just keep the user
-                        // informed and proceed
-                } else
-                        if ( n->type == XML_ELEMENT_NODE ) {  // only concern ourselves with nodes of this type
-                                xmlChar *notes_s = xmlNodeGetContent( n);
-                                // look for a substring specific to neuroConstruct, which is obviously speculative
-                                regex_t RE;
-                                regcomp( &RE, ".*project: (\\w*).*", REG_EXTENDED);
-                                regmatch_t M[1+1];
-                                name = (0 == regexec( &RE, (char*)notes_s, 1+1, M, 0))
-                    ? string ((char*)notes_s + M[1].rm_so, M[1].rm_eo - M[1].rm_so)
-                    : "(unnamed)";
-                                xmlFree( notes_s);
-                        } else
-                                name = "(unnamed)";
-        }
-
-        vp( 1, "Model \"%s\": %s topology from %s\n",
-            name.c_str(),
-            (import_option == TNMLImportOption::merge) ?"Merging" :"Importing",
-            fname.c_str());
-
-        // In the following calls to _process_{populations,instances}
-        // functions, the actual order of appearance of these nodes in
-        // the xml file doesn't matter, thanks to the xml contents
-        // being already wholly read and available to us as a tree.
-
-      // process <populations>
-        if ( !(n = find_named_root_child_elem( root_node, "populations")) ) {
-                retval = TNMLIOResult::noelem;
-                goto out;
-        } // assume there is only one <populations> element: don't loop to catch more
-        if ( (retval = _process_populations( n->children)) < 0)        // note n->children, which is in fact a pointer to the first child
-                goto out;
-
-      // process <projections>
-      // don't strictly require any projections as long as there are some neurons
-        if ( (n = find_named_root_child_elem( root_node, "projections")) ) {
-                if ( (retval = _process_projections( n->children)) < 0 )
-                        goto out;
-        } else
-                vp( 2, "No projections found\n");
-
-out:
-        // we are done with topology; now put units' variables on a vector
-        finalize_additions();
-        // can call time_step only after finalize_additions
-
-        return retval;
-}
-
-
-
-
-
-int
-cnrun::CModel::
-_process_populations( xmlNode *n)
-{
-        xmlChar *group_id_s = nullptr,
-                *cell_type_s = nullptr;
-
-        int     pop_cnt = 0;
-
-        try {
-                for ( ; n; n = n->next ) {  // if is nullptr (parent had no children), we won't do a single loop
-                        if ( n->type != XML_ELEMENT_NODE || !xmlStrEqual( n->name, BAD_CAST "population") )
-                                continue;
-
-                        group_id_s = xmlGetProp( n, BAD_CAST "name");
-                        // BAD_CAST is just a cast to xmlChar*
-                        // with a catch that libxml functions
-                        // expect strings pointed to to be good UTF
-                        if ( !group_id_s ) {
-                                vp( 0, stderr, "<population> element missing a \"name\" attribute near line %d\n", n->line);
-                                return TNMLIOResult::badattr;
-                        }
-                      // probably having an unnamed popuation isn't an error so serious as to abort the
-                      // operation, but discipline is above all
-
-                        cell_type_s = xmlGetProp( n, BAD_CAST "cell_type");
-                        // now we know the type of cells included in this population; remember it to pass on to
-                        // _process_population_instances, where it is used to select an appropriate unit type
-                        // when actually adding a neuron to the model
-
-                      // but well, let's check if we have units of that species in stock
-                        if ( !unit_species_is_neuron((char*)cell_type_s) && !unit_family_is_neuron((char*)cell_type_s) ) {
-                                vp( 0, stderr, "Bad cell species or family (\"%s\") in population \"%s\"\n",
-                                    (char*)cell_type_s, group_id_s);
-                                throw TNMLIOResult::badcelltype;
-                        }
-
-                        xmlNode *nin = n->children;  // again, ->children means ->first
-                        if ( nin )
-                                for ( ; nin; nin = nin->next )  // deal with multiple <instances> nodes
-                                        if ( nin->type == XML_ELEMENT_NODE && xmlStrEqual( nin->name, BAD_CAST "instances") ) {
-                                                int subretval = _process_population_instances(
-                                                        nin->children,
-                                                        group_id_s, cell_type_s);
-                                                if ( subretval < 0 )
-                                                        throw subretval;
-
-                                                vp( 2, " %5d instance(s) of type \"%s\" in population \"%s\"\n",
-                                                    subretval, cell_type_s,  group_id_s);
-                                                ++pop_cnt;
-                                        }
-
-                        xmlFree( cell_type_s), xmlFree( group_id_s);
-                }
-
-                vp( 1, "\tTotal %d population(s)\n", pop_cnt);
-
-        } catch (int ex) {
-                xmlFree( cell_type_s), xmlFree( group_id_s);
-
-                return ex;
-        }
-
-        return pop_cnt;
-}
-
-
-
-
-
-
-int
-cnrun::CModel::
-_process_projections( xmlNode *n)
-{
-        // much the same code as in _process_populations
-
-        xmlChar *prj_name_s = nullptr,
-                *prj_src_s = nullptr,
-                *prj_tgt_s = nullptr,
-                *synapse_type_s = nullptr;
-
-        size_t pop_cnt = 0;
-
-        try {
-                for ( ; n; n = n->next ) {
-                        if ( n->type != XML_ELEMENT_NODE || !xmlStrEqual( n->name, BAD_CAST "projection") )
-                                continue;
-
-                        prj_name_s = xmlGetProp( n, BAD_CAST "name");
-                        if ( !prj_name_s ) {
-                                fprintf( stderr, "<projection> element missing a \"name\" attribute near line %u\n", n->line);
-                                return TNMLIOResult::badattr;
-                        }
-
-                        prj_src_s  = xmlGetProp( n, BAD_CAST "source");
-                        prj_tgt_s  = xmlGetProp( n, BAD_CAST "target");
-                        if ( !prj_src_s || !prj_tgt_s ) {
-                                fprintf( stderr, "Projection \"%s\" missing a \"source\" and/or \"target\" attribute near line %u\n",
-                                         prj_name_s, n->line);
-                                throw TNMLIOResult::badattr;
-                        }
-
-                        xmlNode *nin;
-                        nin = n->children;
-                        if ( !nin )
-                                fprintf( stderr, "Empty <projection> node near line %d\n", n->line);
-
-                        for ( ; nin; nin = nin->next )
-                                if ( nin->type == XML_ELEMENT_NODE && xmlStrEqual( nin->name, BAD_CAST "synapse_props") ) {
-                                        synapse_type_s = xmlGetProp( nin, BAD_CAST "synapse_type");
-                                        if ( !unit_species_is_synapse( (char*)synapse_type_s) &&
-                                             !unit_family_is_synapse( (char*)synapse_type_s) ) {
-                                                fprintf( stderr, "Bad synapse type \"%s\" near line %u\n",
-                                                         (char*)synapse_type_s, nin->line);
-                                                throw TNMLIOResult::badcelltype;
-                                        }
-                                }
-
-                        for ( nin = n->children; nin; nin = nin->next )
-                                if ( nin->type == XML_ELEMENT_NODE && xmlStrEqual( nin->name, BAD_CAST "connections") ) {
-                                        int subretval = _process_projection_connections(
-                                                nin->children,
-                                                prj_name_s, synapse_type_s,
-                                                prj_src_s, prj_tgt_s);
-                                        if ( subretval < 0 )
-                                                throw subretval;
-
-                                        vp( 2, " %5d connection(s) of type \"%s\" in projection \"%s\"\n",
-                                            subretval, synapse_type_s,  prj_name_s);
-                                        ++pop_cnt;
-                                }
-                        xmlFree( prj_name_s), xmlFree( prj_src_s), xmlFree( prj_tgt_s);
-                }
-
-                vp( 1, "\tTotal %zd projection(s)\n", pop_cnt);
-
-        } catch (int ex) {
-                xmlFree( prj_name_s), xmlFree( prj_src_s), xmlFree( prj_tgt_s);
-                return ex;
-        }
-
-        return (int)pop_cnt;
-}
-
-
-
-
-
-
-
-int
-cnrun::CModel::
-_process_population_instances(
-        xmlNode *n,
-        const xmlChar *group_prefix,
-        const xmlChar *type_s)
-{
-        int     retval = 0;  // also keeps a count of added neurons
-
-        double  x, y, z;
-        char    cell_id[C_BaseUnit::max_label_size];
-
-        xmlNode *nin;
-
-        xmlChar *id_s = nullptr;
-        try {
-                for ( ; n; n = n->next ) {
-                        if ( n->type != XML_ELEMENT_NODE || !xmlStrEqual( n->name, BAD_CAST "instance") )
-                                continue;
-
-                        xmlChar *id_s = xmlGetProp( n, BAD_CAST "id");
-                        if ( !id_s ) {
-                              // could be less strict here and allow empty ids, which will then be composed
-                              // from group_prefix + id (say, "LN0", "LN1" and so on); but then, as
-                              // individual <projection>s would have to reference both endpoints by explicit
-                              // ids, it is obviously prone to error to have <instance> ids depend solely on
-                              // their order of appearance.
-                              // So we bark at empty ids.
-                                fprintf( stderr, "<instance> element without an \"id\" attribute near line %u\n", n->line);
-                                return TNMLIOResult::badattr;
-                        }
-
-                        size_t total_len = xmlStrlen( group_prefix) + xmlStrlen( id_s);
-                        if ( total_len >= C_BaseUnit::max_label_size ) {
-                                fprintf( stderr, "Combined label for an <instance> (\"%s%s\") exceeding %zu characters near line %d\n",
-                                         group_prefix, id_s, C_BaseUnit::max_label_size, n->line);
-                                throw TNMLIOResult::biglabel;
-                        }
-                        _longest_label = max(
-                                _longest_label,
-                                (unsigned short)snprintf(
-                                        cell_id, C_BaseUnit::max_label_size-1, "%s.%s",
-                                        group_prefix, id_s));  // here, a new instance is given a name
-                        xmlFree( id_s);
-
-                        if ( !(nin = n->children) )
-                                return retval;
-
-                        for ( ; nin; nin = nin->next ) {
-                                if ( !(nin->type == XML_ELEMENT_NODE &&
-                                       xmlStrEqual( nin->name, BAD_CAST "location")) )
-                                        continue;
-
-                                xmlChar *x_s = xmlGetProp( nin, BAD_CAST "x"),
-                                        *y_s = xmlGetProp( nin, BAD_CAST "y"),
-                                        *z_s = xmlGetProp( nin, BAD_CAST "z");
-                              // here we do actually insert neurons into the model
-                                if ( !(x_s && y_s && z_s) )
-                                        vp( 1, stderr, "<location> element missing full set of coordinates near line %d\n", nin->line);
-                                        // not an error
-                                x = strtod( (char*)x_s, nullptr), y = strtod( (char*)y_s, nullptr), z = strtod( (char*)z_s, nullptr);
-                                xmlFree( x_s), xmlFree( y_s), xmlFree( z_s);
-
-                                C_BaseNeuron *neu = add_neuron_species(
-                                        (char*)type_s, cell_id,
-                                        TIncludeOption::is_notlast);
-
-                                if ( !neu || neu->_status & CN_UERROR ) {
-                                        if ( neu )
-                                                delete neu;
-                                        fprintf( stderr, "Failed to add a neuron \"%s\" near line %u\n", cell_id, n->line);
-                                        return TNMLIOResult::structerror;
-                                } else {
-                                        neu->_serial_id = _global_unit_id_reservoir++;
-                                        neu->pos = make_tuple( x, y, z);
-                                        ++retval;
-                                }
-                        }
-                }
-        } catch (int ex) {
-                xmlFree( id_s);
-                return ex;
-        }
-
-        return retval;
-}
-
-
-
-
-int
-cnrun::CModel::
-_process_projection_connections(
-        xmlNode *n,
-        const xmlChar *synapse_name,
-        const xmlChar *type_s,
-        const xmlChar *src_grp_prefix,
-        const xmlChar *tgt_grp_prefix)
-{
-        // similar to _process_population_instances, except that we read some more attributes (source and
-        // target units)
-
-        int     retval = 0;  // is also a counter of synapses
-
-        char    //synapse_id [C_BaseUnit::max_label_size],
-                src_s[C_BaseUnit::max_label_size],
-                tgt_s[C_BaseUnit::max_label_size];
-        double  weight;
-
-        C_BaseSynapse *y;
-
-        xmlChar *src_cell_id_s = nullptr,
-                *tgt_cell_id_s = nullptr,
-                *weight_s      = nullptr;
-        try {
-                for ( ; n; n = n->next ) {
-                        if ( n->type != XML_ELEMENT_NODE || !xmlStrEqual( n->name, BAD_CAST "connection") )
-                                continue;
-
-                        src_cell_id_s = xmlGetProp( n, BAD_CAST "pre_cell_id"),
-                        tgt_cell_id_s = xmlGetProp( n, BAD_CAST "post_cell_id"),
-                        weight_s      = xmlGetProp( n, BAD_CAST "weight");
-                        if ( /*!synapse_id_s || */ !src_cell_id_s || !tgt_cell_id_s ) {
-                                fprintf( stderr, "A <connection> element without \"pre_cell_id\" and/or \"post_cell_id\" attribute near line %u\n", n->line);
-                                throw TNMLIOResult::badattr;
-                        }
-
-                        snprintf( src_s, C_BaseUnit::max_label_size-1, "%s.%s", src_grp_prefix, src_cell_id_s);
-                        snprintf( tgt_s, C_BaseUnit::max_label_size-1, "%s.%s", tgt_grp_prefix, tgt_cell_id_s);
-
-                        if ( !weight_s ) {
-                                vp( 3, stderr, "Assuming 0 for a synapse of \"%s.%s\" to \"%s%s\" without a \"weight\" attribute near line %u\n",
-                                    src_grp_prefix, src_cell_id_s, tgt_grp_prefix, tgt_cell_id_s, n->line);
-                                weight = 0.;
-                        }
-                        /* xmlFree( synapse_id_s), */ xmlFree( src_cell_id_s), xmlFree( tgt_cell_id_s),
-                                xmlFree( weight_s);
-
-                        y = add_synapse_species(
-                                (char*)type_s, src_s, tgt_s,
-                                weight,
-                                TSynapseCloningOption::yes,
-                                TIncludeOption::is_notlast);
-
-                        if ( !y || y->_status & CN_UERROR ) {
-                                if ( y )
-                                        delete y;
-                                fprintf( stderr, "Failed to add an \"%s\" synapse from \"%s\" to \"%s\" near line %u\n",
-                                         (char*)type_s, src_s, tgt_s, n->line);
-                                return TNMLIOResult::structerror;
-                        } else
-                                ++retval;
-                }
-
-        } catch (int ex) {
-                /* xmlFree( synapse_id_s), */ xmlFree( src_cell_id_s), xmlFree( tgt_cell_id_s),
-                        xmlFree( weight_s);
-                return ex;
-        }
-
-        return retval;
-}
-
-
-
-int
-cnrun::CModel::
-export_NetworkML( const string& fname)
-{
-        int retval = 0;
-
-        LIBXML_TEST_VERSION;
-
-        fprintf( stderr, "export_NetworkML() not implemented yet\n");
-
-        return retval;
-}
-
-
-#else
-# error Need an XMLREADER-enabled libxml2 (>2.6)
-#endif // LIBXML_READER_ENABLED
-
-// Local Variables:
-// Mode: c++
-// indent-tabs-mode: nil
-// tab-width: 8
-// c-basic-offset: 8
-// End:
diff --git a/upstream/src/libcnrun/model-struct.cc b/upstream/src/libcnrun/model-struct.cc
deleted file mode 100644 (file)
index e747583..0000000
+++ /dev/null
@@ -1,1042 +0,0 @@
-/*
- *       File name:  libcn/mmodel-struct.cc
- *         Project:  cnrun
- *          Author:  Andrei Zavada <johnhommer@gmail.com>
- *                   building on original work by Thomas Nowotny
- * Initial version:  2008-09-02
- *
- *         Purpose:  CModel household.
- *
- *         License:  GPL-2+
- */
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-#include <sys/time.h>
-#include <csignal>
-#include <iostream>
-#include <set>
-#include <algorithm>
-#include <functional>
-
-#include "libstilton/string.hh"
-#include "model.hh"
-
-
-using namespace std;
-using namespace cnrun::stilton::str;
-
-
-cnrun::CModel::
-CModel (const string& inname,
-        CIntegrate_base *inintegrator,
-        const SModelOptions& inoptions)
-      : name (inname),
-        options (inoptions),
-        _global_unit_id_reservoir (0l),
-        V (1),
-        W (1),
-        _var_cnt (1),                        // reserve [0] for model_time
-        _cycle (0),
-        _discrete_time (0.),  _discrete_dt (NAN),
-        _dt_logger (nullptr),
-        _spike_logger (nullptr),        // open these streams at first write instead in prepare_advance()
-        is_ready (false),
-        is_diskless (false),
-        have_ddtb_units (false),
-        _longest_label (1)
-{
-        V[0] = 0.;
-
-        (_integrator = inintegrator) -> model = this;
-
-        {
-                const gsl_rng_type * T;
-                gsl_rng_env_setup();
-                T = gsl_rng_default;
-                if ( gsl_rng_default_seed == 0 ) {
-                        struct timeval tp = { 0L, 0L };
-                        gettimeofday( &tp, nullptr);
-                        gsl_rng_default_seed = tp.tv_usec;
-                }
-                _rng = gsl_rng_alloc( T);
-        }
-
-      // don't abort interpreter with ^C
-        signal( SIGINT, SIG_IGN);
-}
-
-
-cnrun::CModel::
-~CModel()
-{
-        vp( 4, "Deleting all units...\n");
-
-        while (units.size())
-                if ( units.back() -> is_owned() )
-                        delete units.back();
-                else
-                        units.pop_back();
-
-        if ( _integrator->is_owned )
-                delete _integrator;
-
-        delete _dt_logger;
-        delete _spike_logger;
-
-        while ( _sources.size() ) {
-                delete _sources.back();
-                _sources.pop_back();
-        }
-
-        gsl_rng_free( _rng);
-}
-
-
-void
-cnrun::CModel::
-reset( TResetOption option)
-{
-        _cycle = 0;
-        V[0] = 0.;
-
-        _integrator->dt = _integrator->_dt_min;
-
-        reset_state_all_units();
-        if ( option == TResetOption::with_params )
-                for_each ( units.begin(), units.end(),
-                           [] (C_BaseUnit* u) { u->reset_params(); });
-
-        regular_periods.clear();
-        regular_periods_last_checked.clear();
-        // this will cause scheduler_update_periods_* to be recomputed by prepare_advance()
-
-        is_ready = false;
-
-        if ( options.log_dt ) {
-                delete _dt_logger;
-                _dt_logger = new ofstream( (name + ".dtlog").data());
-        }
-        if ( options.log_spikers ) {
-                delete _spike_logger;
-                _spike_logger = new ofstream( (name + ".spikes").data());
-        }
-}
-
-
-
-cnrun::C_BaseUnit*
-cnrun::CModel::
-unit_by_label( const string& label) const
-{
-        for ( const auto& U : units )
-                if ( label == U->_label )
-                        return U;
-        return nullptr;
-}
-
-
-cnrun::C_BaseNeuron*
-cnrun::CModel::
-neuron_by_label( const string& label) const
-{
-        for ( const auto& U : units )
-                if ( U->is_neuron() && label == U->label() )
-                        return static_cast<C_BaseNeuron*>(U);
-        return nullptr;
-}
-
-
-cnrun::C_BaseSynapse*
-cnrun::CModel::
-synapse_by_label( const string& label) const
-{
-        for ( const auto& U : units )
-                if ( U->is_synapse() && label == U->label() )
-                        return static_cast<C_BaseSynapse*>(U);
-        return nullptr;
-}
-
-
-
-
-
-// ----- registering units with core lists
-void
-cnrun::CModel::
-_include_base_unit( C_BaseUnit* u)
-{
-        if ( any_of( units.begin(), units.end(),
-                     bind(equal_to<C_BaseUnit*>(), placeholders::_1, u)) )
-                vp( 1, stderr, "Unit %s found already included in model %s\n",
-                    u->_label, name.c_str());
-        else
-                units.push_back( u);
-
-        vp( 5, "  registered base unit %s\n", u->_label);
-
-        if ( u->has_sources() )
-                register_unit_with_sources( u);
-
-        if ( u->is_listening() ) {
-                if ( count( listening_units.begin(), listening_units.end(), u) )
-                        vp( 1, stderr, "Unit \"%s\" already on listening list\n",
-                            u->_label);
-                else
-                        listening_units.push_back( u);
-        }
-
-        u->M = this;
-        u->_serial_id = _global_unit_id_reservoir++;
-}
-
-
-
-
-int
-cnrun::CModel::
-include_unit( C_HostedNeuron *u, const TIncludeOption option)
-{
-        _include_base_unit( u);
-
-        u->idx = _var_cnt;
-        _var_cnt += u->v_no();
-
-        hosted_neurons.push_back( u);
-
-        // if ( u->_spikelogger_agent  &&  !(u->_spikelogger_agent->_status & CN_KL_IDLE) )
-        //         spikelogging_neurons.push_back( u);
-
-        if ( u->is_conscious() )
-                conscious_neurons.push_back( u);
-
-        if ( option == TIncludeOption::is_last )
-                finalize_additions();
-
-        return 0;
-}
-
-int
-cnrun::CModel::
-include_unit( C_HostedSynapse *u, const TIncludeOption option)
-{
-        _include_base_unit( u);
-
-        u->idx = _var_cnt;
-        _var_cnt += u->v_no();
-
-        hosted_synapses.push_back( u);
-
-        if ( u->traits() & UT_MULTIPLEXING )
-                multiplexing_synapses.push_back( u);
-
-        if ( option == TIncludeOption::is_last )
-                finalize_additions();
-
-        return 0;
-}
-
-
-
-int
-cnrun::CModel::
-include_unit( C_StandaloneNeuron *u)
-{
-        _include_base_unit( u);
-
-        // if ( u->_spikelogger_agent  &&  !(u->_spikelogger_agent->_status & CN_KL_IDLE) )
-        //         spikelogging_neurons.push_back( u);
-
-        if ( u->is_conscious() )
-                conscious_neurons.push_back( u);
-
-        if ( u->is_ddtbound() )
-                ddtbound_neurons.push_back( u);
-        else
-                standalone_neurons.push_back( u);
-
-        return 0;
-}
-
-
-int
-cnrun::CModel::
-include_unit( C_StandaloneSynapse *u)
-{
-/*
-        if ( _check_new_synapse( u) ) {
-//                u->enable( false);
-                u->M = nullptr;
-                return -1;
-        }
-*/
-        _include_base_unit( u);
-
-        if ( u->is_ddtbound() )
-                ddtbound_synapses.push_back( u);
-        else
-                standalone_synapses.push_back( u);
-
-        if ( u->traits() & UT_MULTIPLEXING )
-                multiplexing_synapses.push_back( u);
-
-        return 0;
-}
-
-
-
-// preserve the unit if !do_delete, so it can be re-included again
-cnrun::C_BaseUnit*
-cnrun::CModel::
-exclude_unit( C_BaseUnit *u, const TExcludeOption option)
-{
-        vp( 5, stderr, "-excluding unit \"%s\"", u->_label);
-
-        if ( u->has_sources() )
-                unregister_unit_with_sources( u);
-
-        if ( u->is_listening() )
-                u->stop_listening();  // also calls unregister_listener
-
-        if ( u->is_synapse() && u->traits() & UT_MULTIPLEXING )
-                multiplexing_synapses.erase( find( multiplexing_synapses.begin(), multiplexing_synapses.end(), u));
-
-        if ( u->is_conscious() )
-                conscious_neurons.erase(
-                        find( conscious_neurons.begin(), conscious_neurons.end(),
-                              u));
-
-        if ( u->is_hostable() ) {
-                size_t  our_idx;
-                if ( u->is_neuron() ) {
-                        hosted_neurons.erase( find( hosted_neurons.begin(), hosted_neurons.end(), u));
-                        our_idx = ((C_HostedNeuron*)u) -> idx;
-                } else {
-                        hosted_synapses.erase( find( hosted_synapses.begin(), hosted_synapses.end(), u));
-                        our_idx = ((C_HostedSynapse*)u) -> idx;
-                }
-
-              // shrink V
-                vp( 5, stderr, " (shrink V by %d)", u->v_no());
-                for ( auto& N : hosted_neurons )
-                        if ( N->idx > our_idx )
-                                N->idx -= u->v_no();
-                for ( auto& Y : hosted_synapses )
-                        if ( Y->idx > our_idx )
-                                Y->idx -= u->v_no();
-                memmove( &V[our_idx], &V[our_idx+u->v_no()],
-                         (_var_cnt - our_idx - u->v_no()) * sizeof(double));
-                V.resize( _var_cnt -= u->v_no());
-        }
-
-        if ( u->is_ddtbound() ) {
-                if ( u->is_neuron() )
-                        ddtbound_neurons.erase( find( ddtbound_neurons.begin(), ddtbound_neurons.end(), u));
-                else
-                        ddtbound_synapses.erase( find( ddtbound_synapses.begin(), ddtbound_synapses.end(), u));
-        }
-
-        if ( !u->is_hostable() ) {
-                if ( u->is_neuron() )
-                        standalone_neurons.remove(
-                                static_cast<C_StandaloneNeuron*>(u));
-                else
-                        standalone_synapses.remove(
-                                static_cast<C_StandaloneSynapse*>(u));
-        }
-
-        units.remove( u);
-
-        if ( option == TExcludeOption::with_delete ) {
-                delete u;
-                u = nullptr;
-        } else
-                u->M = nullptr;
-
-        vp( 5, stderr, ".\n");
-        return u;
-}
-
-
-
-
-
-
-
-// listeners & spikeloggers
-
-void
-cnrun::CModel::
-register_listener( C_BaseUnit *u)
-{
-        if ( not count( listening_units.begin(), listening_units.end(), u) )
-                listening_units.push_back( u);
-}
-
-void
-cnrun::CModel::
-unregister_listener( C_BaseUnit *u)
-{
-        listening_units.remove( u);
-}
-
-
-
-
-
-
-
-void
-cnrun::CModel::
-register_spikelogger( C_BaseNeuron *n)
-{
-        spikelogging_neurons.push_back( n);
-        spikelogging_neurons.sort();
-        spikelogging_neurons.unique();
-}
-
-void
-cnrun::CModel::
-unregister_spikelogger( C_BaseNeuron *n)
-{
-        spikelogging_neurons.remove(
-                static_cast<decltype(spikelogging_neurons)::value_type>(n));
-}
-
-
-
-
-
-// units with sources
-
-void
-cnrun::CModel::
-register_unit_with_sources( C_BaseUnit *u)
-{
-        for ( auto& I : u->_sources )
-                if ( I.source->is_periodic() )
-                        units_with_periodic_sources.push_back( u);
-                else
-                        units_with_continuous_sources.push_back( u);
-        units_with_continuous_sources.unique();
-        units_with_periodic_sources.unique();
-}
-
-void
-cnrun::CModel::
-unregister_unit_with_sources( C_BaseUnit *u)
-{
-        units_with_continuous_sources.remove(
-                static_cast<decltype(units_with_continuous_sources)::value_type>(u));
-        units_with_periodic_sources.remove(
-                static_cast<decltype(units_with_periodic_sources)::value_type>(u));
-}
-
-
-
-
-
-
-
-
-cnrun::C_BaseNeuron*
-cnrun::CModel::
-add_neuron_species( const string& type_s, const string& label,
-                    const TIncludeOption include_option,
-                    const double x, const double y, const double z)
-{
-        TUnitType t = unit_species_by_string( type_s);
-        if ( unlikely (t == NT_VOID || !unit_species_is_neuron(type_s)) ) {
-                fprintf( stderr, "Unrecognised neuron species: \"%s\"\n", type_s.c_str());
-                return nullptr;
-        } else
-                return add_neuron_species( t, label, include_option, x, y, z);
-}
-
-cnrun::C_BaseNeuron*
-cnrun::CModel::
-add_neuron_species( TUnitType type, const string& label,
-                    const TIncludeOption include_option,
-                    double x, double y, double z)
-{
-        C_BaseNeuron *n;
-        switch ( type ) {
-        case NT_HH_D:
-                n = new CNeuronHH_d( label, x, y, z, this, CN_UOWNED, include_option);
-            break;
-        case NT_HH_R:
-                n = new CNeuronHH_r( label, x, y, z, this, CN_UOWNED);
-            break;
-
-        case NT_HH2_D:
-                n = new CNeuronHH2_d( label, x, y, z, this, CN_UOWNED, include_option);
-            break;
-        // case NT_HH2_R:
-        //         n = new CNeuronHH2_r( label, x, y, z, this, CN_UOWNED, include_option);
-        //     break;
-//#ifdef CN_WANT_MORE_NEURONS
-        case NT_EC_D:
-                n = new CNeuronEC_d( label, x, y, z, this, CN_UOWNED, include_option);
-            break;
-        case NT_ECA_D:
-                n = new CNeuronECA_d( label, x, y, z, this, CN_UOWNED, include_option);
-            break;
-/*
-        case NT_LV:
-                n = new COscillatorLV( label, x, y, z, this, CN_UOWNED, include_option);
-            break;
- */
-        case NT_COLPITTS:
-                n = new COscillatorColpitts( label, x, y, z, this, CN_UOWNED, include_option);
-            break;
-        case NT_VDPOL:
-                n = new COscillatorVdPol( label, x, y, z, this, CN_UOWNED, include_option);
-            break;
-//#endif
-        case NT_DOTPOISSON:
-                n = new COscillatorDotPoisson( label, x, y, z, this, CN_UOWNED);
-            break;
-        case NT_POISSON:
-                n = new COscillatorPoisson( label, x, y, z, this, CN_UOWNED);
-            break;
-
-        case NT_DOTPULSE:
-                n = new CNeuronDotPulse( label, x, y, z, this, CN_UOWNED);
-            break;
-
-        case NT_MAP:
-                n = new CNeuronMap( label, x, y, z, this, CN_UOWNED);
-            break;
-
-        default:
-                return nullptr;
-        }
-        if ( n && n->_status & CN_UERROR ) {
-                delete n;
-                return nullptr;
-        }
-        return n;
-}
-
-
-
-
-
-
-
-
-cnrun::C_BaseSynapse*
-cnrun::CModel::
-add_synapse_species( const string& type_s,
-                     const string& src_l, const string& tgt_l,
-                     const double g,
-                     const TSynapseCloningOption cloning_option,
-                     const TIncludeOption include_option)
-{
-        TUnitType ytype = unit_species_by_string( type_s);
-        bool    given_species = true;
-        if ( ytype == NT_VOID && (given_species = false, ytype = unit_family_by_string( type_s)) == NT_VOID ) {
-                vp( 0, stderr, "Unrecognised synapse species or family: \"%s\"\n", type_s.c_str());
-                return nullptr;
-        }
-
-        C_BaseNeuron
-                *src = neuron_by_label( src_l),
-                *tgt = neuron_by_label( tgt_l);
-        if ( !src || !tgt ) {
-                vp( 0, stderr, "Phony source (\"%s\") or target (\"%s\")\n", src_l.c_str(), tgt_l.c_str());
-                return nullptr;
-        }
-
-        if ( given_species )  // let lower function do the checking
-                return add_synapse_species( ytype, src, tgt, g, cloning_option, include_option);
-
-        switch ( ytype ) {
-      // catch by first entry in __CNUDT, assign proper species per source and target traits
-        case YT_AB_DD:
-                if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED )
-                        ytype = YT_AB_RR;
-                else if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) )
-                        ytype = YT_AB_RD;
-                else if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED )
-                        if ( src->traits() & UT_DOT )
-                                ytype = YT_MXAB_DR;
-                        else
-                                ytype = YT_AB_DR;
-                else
-                        if ( src->traits() & UT_DOT )
-                                ytype = YT_MXAB_DD;
-                        else
-                                ytype = YT_AB_DD;
-            break;
-
-        case YT_ABMINUS_DD:
-                if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED )
-                        ytype = YT_ABMINUS_RR;
-                else if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) )
-                        ytype = YT_ABMINUS_RD;
-                else if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED )
-                        if ( src->traits() & UT_DOT )
-                                ytype = YT_MXABMINUS_DR;
-                        else
-                                ytype = YT_ABMINUS_DR;
-                else
-                        if ( src->traits() & UT_DOT )
-                                ytype = YT_MXABMINUS_DD;
-                        else
-                                ytype = YT_ABMINUS_DD;
-            break;
-
-        case YT_RALL_DD:
-                if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED )
-                        ytype = YT_RALL_RR;
-                else if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) )
-                        ytype = YT_RALL_RD;
-                else if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED )
-                        if ( src->traits() & UT_DOT )
-                                ytype = YT_MXRALL_DR;
-                        else
-                                ytype = YT_RALL_DR;
-                else
-                        if ( src->traits() & UT_DOT )
-                                ytype = YT_MXRALL_DD;
-                        else
-                                ytype = YT_RALL_DD;
-            break;
-
-        case YT_MAP:
-                if ( src->traits() & UT_DDTSET)
-                        if ( src->traits() & UT_DOT )
-                                ytype = YT_MXMAP;
-                        else
-                                ytype = YT_MAP;
-                else {
-                        vp( 0, stderr, "Map synapses can only connect Map neurons\n");
-                        return nullptr;
-                }
-            break;
-        default:
-                vp( 0, stderr, "Bad synapse type: %s\n", type_s.c_str());
-                return nullptr;
-        }
-
-        return add_synapse_species( ytype, src, tgt, g, cloning_option, include_option);
-}
-
-
-
-
-cnrun::C_BaseSynapse*
-cnrun::CModel::
-add_synapse_species( TUnitType ytype,
-                     C_BaseNeuron *src, C_BaseNeuron *tgt,
-                     double g,
-                     TSynapseCloningOption cloning_option, TIncludeOption include_option)
-{
-        vp( 5, "add_synapse_species( \"%s\", \"%s\", \"%s\", %g, %d, %d)\n",
-            __CNUDT[ytype].species, src->_label, tgt->_label, g, cloning_option, include_option);
-
-        C_BaseSynapse *y = nullptr;
-
-      // consider cloning
-        if ( cloning_option == TSynapseCloningOption::yes && src->_axonal_harbour.size() )
-                for ( auto& L : src->_axonal_harbour )
-                        if ( L->_type == ytype &&
-                             L->is_not_altered() )
-                                return L->clone_to_target( tgt, g);
-
-        switch ( ytype ) {
-      // the __CNUDT entry at first TUnitType element whose
-      // 'name' matches the type id supplied, captures all cases for a given synapse family
-        case YT_AB_RR:
-                if (  src->traits() & UT_RATEBASED &&  tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
-                        y = new CSynapseAB_rr( src, tgt, g, this, CN_UOWNED, include_option);
-            break;
-        case YT_AB_RD:
-                if (  src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
-                        // y = new CSynapseAB_rd( synapse_id, src, tgt, this, CN_UOWNED, false);
-                        throw "AB_rd not implemented";
-            break;
-        case YT_AB_DR:
-                if ( !(src->traits() & UT_RATEBASED) &&  tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
-                        // y = new CSynapseAB_rr( synapse_id, src, tgt, this, CN_UOWNED, false);
-                        throw "AB_dr not implemented";
-            break;
-        case YT_AB_DD:
-                if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
-                        y = new CSynapseAB_dd( src, tgt, g, this, CN_UOWNED, include_option);
-            break;
-        case YT_MXAB_DR:
-                if ( !(src->traits() & UT_RATEBASED) &&  tgt->traits() & UT_RATEBASED &&  src->traits() & UT_DOT )
-                        y = new CSynapseMxAB_dr( src, tgt, g, this, CN_UOWNED, include_option);
-            break;
-        case YT_MXAB_DD:
-                if (  !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) &&  src->traits() & UT_DOT )
-                        y = new CSynapseMxAB_dd( src, tgt, g, this, CN_UOWNED, include_option);
-            break;
-
-
-        case YT_ABMINUS_RR:
-                if (  src->traits() & UT_RATEBASED &&  tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
-                        // y = new CSynapseABMINUS_rr( src, tgt, g, this, CN_UOWNED, include_option);
-                        throw "ABMINUS_rr not implemented";
-            break;
-        case YT_ABMINUS_RD:
-                if (  src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
-                        // y = new CSynapseABMINUS_rd( synapse_id, src, tgt, this, CN_UOWNED, false);
-                        throw "ABMINUS_rd not implemented";
-            break;
-        case YT_ABMINUS_DR:
-                if ( !(src->traits() & UT_RATEBASED) &&  tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
-                        // y = new CSynapseABMINUS_rr( synapse_id, src, tgt, this, CN_UOWNED, false);
-                        throw "ABMINUS_dr not implemented";
-            break;
-        case YT_ABMINUS_DD:
-                if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
-                        y = new CSynapseABMinus_dd( src, tgt, g, this, CN_UOWNED, include_option);
-            break;
-        case YT_MXABMINUS_DR:
-                if ( !(src->traits() & UT_RATEBASED) &&  tgt->traits() & UT_RATEBASED &&  src->traits() & UT_DOT )
-                        // y = new CSynapseMxABMinus_dr( src, tgt, g, this, CN_UOWNED, include_option);
-                        throw "MxABMinus_dr not implemented";
-            break;
-        case YT_MXABMINUS_DD:
-                if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) &&  src->traits() & UT_DOT )
-                        // y = new CSynapseMxABMinus_dd( src, tgt, g, this, CN_UOWNED, include_option);
-                        throw "MxABMinus_dd not implemented";
-            break;
-
-
-        case YT_RALL_RR:
-                if (  src->traits() & UT_RATEBASED &&  tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
-                        // y = new CSynapseRall_rr( src, tgt, g, this, CN_UOWNED, include_option);
-                        throw "Rall_rr not implemented";
-            break;
-        case YT_RALL_RD:
-                if (  src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
-                        // y = new CSynapseRall_rd( synapse_id, src, tgt, this, CN_UOWNED, false);
-                        throw "Rall_rd not implemented";
-            break;
-        case YT_RALL_DR:
-                if ( !(src->traits() & UT_RATEBASED) &&  tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
-                        // y = new CSynapseRall_rr( synapse_id, src, tgt, this, CN_UOWNED, false);
-                        throw "Rall_dr not implemented";
-            break;
-        case YT_RALL_DD:
-                if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
-                        y = new CSynapseRall_dd( src, tgt, g, this, CN_UOWNED, include_option);
-            break;
-        case YT_MXRALL_DR:
-                if ( !(src->traits() & UT_RATEBASED) &&  tgt->traits() & UT_RATEBASED &&  src->traits() & UT_DOT )
-                        // y = new CSynapseMxRall_dr( src, tgt, g, this, CN_UOWNED, include_option);
-                        throw "MxRall_dr not implemented";
-            break;
-        case YT_MXRALL_DD:
-                if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) &&  src->traits() & UT_DOT )
-                        // y = new CSynapseMxRall_dd( src, tgt, g, this, CN_UOWNED, include_option);
-                        throw "MxRall_dd not implemented";
-            break;
-
-
-        case YT_MAP:
-                if ( src->traits() & UT_DDTSET)
-                        if ( src->traits() & UT_DOT )
-                                y = new CSynapseMxMap( src, tgt, g, this, CN_UOWNED);
-                        else
-                                y = new CSynapseMap( src, tgt, g, this, CN_UOWNED);
-                else
-                        throw "Map synapses can only connect Map neurons";
-            break;
-
-        default:
-                return nullptr;
-        }
-
-        if ( !y || y->_status & CN_UERROR ) {
-                if ( y )
-                        delete y;
-                return nullptr;
-        }
-
-        vp( 5, "new synapse \"%s->%s\"\n", y->_label, tgt->label());
-        y->set_g_on_target( *tgt, g);
-
-        return y;
-}
-
-
-void
-cnrun::CModel::
-finalize_additions()
-{
-        V.resize( _var_cnt);
-        W.resize( _var_cnt);
-
-        for ( auto& U : hosted_neurons )
-                U->reset_vars();
-        for ( auto& U : hosted_synapses )
-                U->reset_vars();
-
-        // if ( options.sort_units ) {
-        //         units.sort(
-        //                 [] (C_BaseUnit *&lv, C_BaseUnit *&rv) {
-        //                         return strcmp( lv->label(), rv->label()) < 0;
-        //                 });
-        // }
-
-        _integrator->prepare();
-}
-
-
-void
-cnrun::CModel::
-cull_deaf_synapses()
-{
-      // 1. Need to traverse synapses backwards due to shifts its
-      //    vector will undergo on element deletions;
-      // 2. Omit those with a param reader, scheduler or range, but
-      //    only if it is connected to parameter "gsyn"
-        auto Yi = hosted_synapses.rbegin();
-        while ( Yi != hosted_synapses.rend() ) {
-                auto& Y = **Yi;
-                if ( Y.has_sources() )
-                        continue;
-                auto Ti = Y._targets.begin();
-                while ( Ti != Y._targets.end() ) {
-                        auto& T = **Ti;
-                        if ( Y.g_on_target( T) == 0  ) {
-                                vp( 3, stderr, " (deleting dendrite to \"%s\" of a synapse \"%s\" with gsyn == 0)\n",
-                                    T._label, Y._label);
-                                T._dendrites.erase( &Y);
-                                ++Ti;
-                                Y._targets.erase( prev(Ti));
-
-                                snprintf( Y._label, C_BaseUnit::max_label_size-1,
-                                          "%s:%zu", Y._source->_label, Y._targets.size());
-                        }
-                }
-                ++Yi;
-                if ( (*prev(Yi))->_targets.empty() )
-                        delete *prev(Yi);
-        }
-
-        // older stuff
-/*
-        for_all_synapses_reversed (Y) {
-                int gsyn_pidx = (*Y) -> param_idx_by_sym( "gsyn");
-                if ( ((*Y)->param_schedulers && device_list_concerns_parm( (*Y)->param_schedulers, gsyn_pidx)) ||
-                     ((*Y)->param_readers    && device_list_concerns_parm( (*Y)->param_readers,    gsyn_pidx)) ||
-                     ((*Y)->param_ranges     && device_list_concerns_parm( (*Y)->param_ranges,     gsyn_pidx)) ) {
-                        vp( 2, " (preserving doped synapse with zero gsyn: \"%s\")\n", (*Y)->_label);
-                        continue;
-                }
-                if ( gsyn_pidx > -1 && (*Y)->param_value( gsyn_pidx) == 0. ) {
-                        vp( 2, " (deleting synapse with zero gsyn: \"%s\")\n", (*Y)->_label);
-                        delete (*Y);
-                        ++cnt;
-                }
-        }
-        if ( cnt )
-                vp( 0, "Deleted %zd deaf synapses\n", cnt);
-*/
-}
-
-
-
-// needs to be called after a neuron is put out
-void
-cnrun::CModel::
-cull_blind_synapses()
-{
-        auto Yi = hosted_synapses.begin();
-        // units remove themselves from all lists, including the one
-        // iterated here
-        while ( Yi != hosted_synapses.end() ) {
-                auto& Y = **Yi;
-                if ( !Y._source && !Y.has_sources() ) {
-                        vp( 3, " (deleting synapse with NULL source: \"%s\")\n", Y._label);
-                        delete &Y;  // units are smart, self-erase
-                                    // themselves from the list we are
-                                    // iterating over here
-                } else
-                        ++Yi;
-        }
-        auto Zi = standalone_synapses.begin();
-        while ( Zi != standalone_synapses.end() ) {
-                auto& Y = **Zi;
-                if ( !Y._source && !Y.has_sources() ) {
-                        vp( 3, " (deleting synapse with NULL source: \"%s\")\n", Y._label);
-                        delete &Y;
-                } else
-                        ++Zi;
-        }
-}
-
-
-void
-cnrun::CModel::
-reset_state_all_units()
-{
-        for ( auto& U : units )
-                U -> reset_state();
-}
-
-
-
-
-void
-cnrun::CModel::
-coalesce_synapses()
-{
-startover:
-        for ( auto& U1i : units ) {
-                if ( not U1i->is_synapse() )
-                        continue;
-                auto& U1 = *static_cast<C_BaseSynapse*>(U1i);
-                for ( auto& U2i : units ) {
-                        auto& U2 = *static_cast<C_BaseSynapse*>(U2i);
-                        if ( &U2 == &U1 )
-                                continue;
-
-                        if ( U1._source == U2._source && U1.is_identical( U2) ) {
-                                vp( 5, "coalescing \"%s\" and \"%s\"\n", U1.label(), U2.label());
-                                for ( auto& T : U2._targets ) {
-                                        U1._targets.push_back( T);
-                                        T->_dendrites[&U1] = T->_dendrites[&U2];
-                                }
-                                snprintf( U1._label, C_BaseUnit::max_label_size-1,
-                                          "%s:%zu", U1._source->label(), U1._targets.size());
-
-                                delete &U2;
-
-                                goto startover;  // because we have messed with both iterators
-                        }
-                }
-        }
-}
-
-
-
-
-
-inline const char*
-__attribute__ ((pure))
-pl_ending( size_t cnt)
-{
-        return cnt == 1 ? "" : "s";
-}
-
-void
-cnrun::CModel::
-dump_metrics( FILE *strm) const
-{
-        fprintf( strm,
-                 "\nModel \"%s\"%s:\n"
-                 "  %5zd unit%s total (%zd Neuron%s, %zd Synapse%s):\n"
-                 "    %5zd hosted,\n"
-                 "    %5zd standalone\n"
-                 "    %5zd discrete dt-bound\n"
-                 "  %5zd Listening unit%s\n"
-                 "  %5zd Spikelogging neuron%s\n"
-                 "  %5zd Unit%s being tuned continuously\n"
-                 "  %5zd Unit%s being tuned periodically\n"
-                 "  %5zd Spontaneously firing neuron%s\n"
-                 "  %5zd Multiplexing synapse%s\n"
-                 " %6zd vars on integration vector\n\n",
-                 name.c_str(), is_diskless ? " (diskless)" : "",
-                 units.size(), pl_ending(units.size()),
-                 n_total_neurons(), pl_ending(n_total_neurons()),
-                 n_total_synapses(), pl_ending(n_total_synapses()),
-                 n_hosted_units(),
-                 n_standalone_units(),
-                 ddtbound_neurons.size() + ddtbound_synapses.size(),
-                 listening_units.size(), pl_ending(listening_units.size()),
-                 spikelogging_neurons.size(), pl_ending(spikelogging_neurons.size()),
-                 units_with_continuous_sources.size(), pl_ending(units_with_continuous_sources.size()),
-                 units_with_periodic_sources.size(), pl_ending(units_with_periodic_sources.size()),
-                 conscious_neurons.size(), pl_ending(conscious_neurons.size()),
-                 multiplexing_synapses.size(), pl_ending(multiplexing_synapses.size()),
-                 _var_cnt-1);
-        if ( have_ddtb_units )
-                fprintf( strm, "Discrete dt: %g msec\n", discrete_dt());
-}
-
-void
-cnrun::CModel::
-dump_state( FILE *strm) const
-{
-        fprintf( strm,
-                 "Model time: %g msec\n"
-                 "Integrator dt_min: %g msec, dt_max: %g msec\n"
-                 "Logging at: %g msec\n\n",
-                 model_time(),
-                 dt_min(), dt_max(),
-                 options.listen_dt);
-}
-
-
-
-void
-cnrun::CModel::
-dump_units( FILE *strm) const
-{
-        fprintf( strm, "\nUnit types in the model:\n");
-
-        set<int> found_unit_types;
-        unsigned p = 0;
-
-        fprintf( strm, "\n===== Neurons:\n");
-        for ( auto& U : units )
-                if ( U->is_neuron() && found_unit_types.count( U->type()) == 0 ) {
-                        found_unit_types.insert( U->type());
-
-                        fprintf( strm, "--- %s: %s\nParameters: ---\n",
-                                 U->species(), U->type_description());
-                        for ( p = 0; p < U->p_no(); ++p )
-                                if ( *U->param_sym(p) != '.' || options.verbosely > 5 )
-                                        fprintf( strm, " %-12s %s %s\n",
-                                                 U->param_sym(p),
-                                                 double_dot_aligned_s( U->param_value(p), 4, 6).c_str(),
-                                                 U->param_name(p));
-                        fprintf( strm, "Variables: ---\n");
-                        for ( p = 0; p < U->v_no(); ++p )
-                                if ( *U->var_sym(p) != '.' || options.verbosely > 5 )
-                                        fprintf( strm, "%-12s\t= %s %s\n",
-                                                 U->var_sym(p),
-                                                 double_dot_aligned_s( U->var_value(p), 4, 6).c_str(),
-                                                 U->var_name(p));
-                }
-        fprintf( strm, "\n===== Synapses:\n");
-        for ( auto& U : units )
-                if ( U->is_synapse() && found_unit_types.count( U->type()) == 0 ) {
-                        found_unit_types.insert( U->type());
-
-                        fprintf( strm, "--- %s: %s\nParameters: ---\n",
-                                 U->species(), U->type_description());
-                        fprintf( strm, "    parameters:\n");
-                        for ( p = 0; p < U->p_no(); ++p )
-                                if ( *U->param_sym(p) != '.' || options.verbosely > 5 )
-                                        fprintf( strm, "%-12s\t= %s %s\n",
-                                                 U->param_sym(p),
-                                                 double_dot_aligned_s( U->param_value(p), 4, 6).c_str(),
-                                                 U->param_name(p));
-                        fprintf( strm, "Variables: ---\n");
-                        for ( p = 0; p < U->v_no(); ++p )
-                                if ( *U->var_sym(p) != '.' || options.verbosely > 5 )
-                                        fprintf( strm, "%-12s\t= %s %s\n",
-                                                 U->var_sym(p),
-                                                 double_dot_aligned_s( U->var_value(p), 4, 6).c_str(),
-                                                 U->var_name(p));
-
-                }
-        fprintf( strm, "\n");
-}
-
-// Local Variables:
-// Mode: c++
-// indent-tabs-mode: nil
-// tab-width: 8
-// c-basic-offset: 8
-// End:
diff --git a/upstream/src/libcnrun/model-tags.cc b/upstream/src/libcnrun/model-tags.cc
deleted file mode 100644 (file)
index 847529d..0000000
+++ /dev/null
@@ -1,422 +0,0 @@
-/*
- *       File name:  libcn/mmodel-tags.cc
- *         Project:  cnrun
- *          Author:  Andrei Zavada <johnhommer@gmail.com>
- *                   building on original work by Thomas Nowotny
- * Initial version:  2014-09-25
- *
- *         Purpose:  CModel household (process_*_tags(), and other methods using regexes).
- *
- *         License:  GPL-2+
- */
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-#include <regex.h>
-
-#include "libstilton/string.hh"
-#include "model.hh"
-
-
-using namespace std;
-
-vector<cnrun::C_BaseUnit*>
-cnrun::CModel::
-list_units( const string& label) const
-{
-        vector<C_BaseUnit*> Q;
-
-        regex_t RE;
-        if ( 0 != regcomp( &RE, label.c_str(), REG_EXTENDED | REG_NOSUB)) {
-                vp( 0, stderr, "Invalid regexp in list_units: \"%s\"\n", label.c_str());
-                return move(Q);
-        }
-
-        for ( auto& U : units )
-                if ( regexec( &RE, U->label(), 0, 0, 0) != REG_NOMATCH )
-                        Q.push_back(U);
-
-        return move(Q);
-}
-
-
-// tags
-
-size_t
-cnrun::CModel::
-process_listener_tags( const list<STagGroupListener> &Listeners)
-{
-        size_t count = 0;
-        regex_t RE;
-        for ( auto& P : Listeners ) {
-                if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
-                        vp( 0, stderr, "Invalid regexp in process_listener_tags: \"%s\"\n", P.pattern.c_str());
-                        continue;
-                }
-                for ( auto& Ui : units ) {
-                        auto& U = *Ui;
-                        if ( regexec( &RE, U._label, 0, 0, 0) == 0 ) {
-                                if ( P.invert_option == STagGroup::TInvertOption::no ) {
-                                        U.start_listening( P.bits);
-                                        vp( 3, " (unit \"%s\" listening%s)\n",
-                                            U._label, P.bits & CN_ULISTENING_1VARONLY ? ", to one var only" :"");
-                                } else {
-                                        U.stop_listening();
-                                        vp( 3, " (unit \"%s\" not listening)\n", U._label);
-                                }
-                                ++count;
-                        }
-                }
-        }
-
-        return count;
-}
-
-
-size_t
-cnrun::CModel::
-process_spikelogger_tags( const list<STagGroupSpikelogger> &Spikeloggers)
-{
-        size_t count = 0;
-        regex_t RE;
-        for ( auto& P : Spikeloggers ) {
-                if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
-                        vp( 0, stderr, "Invalid regexp in process_spikelogger_tags: \"%s\"\n", P.pattern.c_str());
-                        continue;
-                }
-                for ( auto& Ni : standalone_neurons ) {
-                        auto& N = *Ni;
-                        if ( regexec( &RE, N._label, 0, 0, 0) == 0 ) {
-                                if ( P.invert_option == STagGroup::TInvertOption::no ) {
-                                        bool log_sdf = !(P.period == 0. || P.sigma == 0.);
-                                        if ( ( log_sdf && !N.enable_spikelogging_service(
-                                                       P.period, P.sigma, P.from))
-                                             or
-                                             (!log_sdf && !N.enable_spikelogging_service()) ) {
-                                                vp( 0, stderr, "Cannot have \"%s\" log spikes because it is not a conductance-based neuron (of type %s)\n",
-                                                         N._label, N.species());
-                                                continue;
-                                        }
-                                } else
-                                        N.disable_spikelogging_service();
-                                ++count;
-
-                                vp( 3, " (%sabling spike logging for standalone neuron \"%s\")\n",
-                                    (P.invert_option == STagGroup::TInvertOption::no) ? "en" : "dis", N._label);
-                        }
-                }
-                for ( auto& Ni : hosted_neurons ) {
-                        auto& N = *Ni;
-                        if ( regexec( &RE, N._label, 0, 0, 0) == 0 ) {
-                                if ( P.invert_option == STagGroup::TInvertOption::no ) {
-                                        bool log_sdf = !(P.period == 0. || P.sigma == 0.);
-                                        if ( ( log_sdf && !N.enable_spikelogging_service( P.period, P.sigma, P.from))
-                                             or
-                                             (!log_sdf && !N.enable_spikelogging_service()) ) {
-                                                vp( 1, stderr, "Cannot have \"%s\" log spikes because it is not a conductance-based neuron (of type %s)\n",
-                                                    N._label, N.species());
-                                                return -1;
-                                        }
-                                } else
-                                        N.disable_spikelogging_service();
-                                ++count;
-
-                                vp( 3, " (%sabling spike logging for hosted neuron \"%s\")\n",
-                                    (P.invert_option == STagGroup::TInvertOption::no) ? "en" : "dis", N._label);
-                        }
-                }
-        }
-
-        return count;
-}
-
-
-size_t
-cnrun::CModel::
-process_putout_tags( const list<STagGroup> &ToRemove)
-{
-        size_t count = 0;
-      // execute some
-        regex_t RE;
-        for ( auto& P : ToRemove ) {
-                if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
-                        vp( 0,  stderr, "Invalid regexp in process_putout_tags: \"%s\"\n", P.pattern.c_str());
-                        continue;
-                }
-                auto Ui = units.rbegin();
-                while ( Ui != units.rend() ) {
-                        ++Ui;
-                        auto& U = **prev(Ui);
-                        if ( regexec( &RE, U._label, 0, 0, 0) == 0 ) {
-                                vp( 2, " (put out unit \"%s\")\n", U._label);
-                                delete &U;
-                                ++count;
-                        }
-                }
-        }
-
-        cull_blind_synapses();
-
-        return count;
-}
-
-
-size_t
-cnrun::CModel::
-process_decimate_tags( const list<STagGroupDecimate> &ToDecimate)
-{
-        size_t count = 0;
-      // decimate others
-        regex_t RE;
-        for ( auto& P : ToDecimate ) {
-                if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
-                        vp( 0, stderr, "Invalid regexp in process_decimate_tags: \"%s\"\n", P.pattern.c_str());
-                        continue;
-                }
-
-              // collect group
-                vector<C_BaseUnit*> dcmgroup;
-                for ( auto& U : units )
-                        if ( regexec( &RE, U->_label, 0, 0, 0) == 0 )
-                                dcmgroup.push_back( U);
-                random_shuffle( dcmgroup.begin(), dcmgroup.end());
-
-              // execute
-                size_t  to_execute = rint( dcmgroup.size() * P.fraction), n = to_execute;
-                while ( n-- ) {
-                        delete dcmgroup[n];
-                        ++count;
-                }
-
-                vp( 3, " (decimated %4.1f%% (%zu units) of %s)\n",
-                    P.fraction*100, to_execute, P.pattern.c_str());
-
-        }
-
-        cull_blind_synapses();
-
-        return count;
-}
-
-
-
-
-
-
-size_t
-cnrun::CModel::
-process_paramset_static_tags( const list<STagGroupNeuronParmSet> &tags)
-{
-        size_t count = 0;
-        regex_t RE;
-        for ( auto& P : tags ) {
-                if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
-                        vp( 0, stderr, "Invalid regexp in process_paramset_static_tags: \"%s\"\n", P.pattern.c_str());
-                        continue;
-                }
-
-                vector<string> current_tag_assigned_labels;
-
-                for ( auto& Ui : units ) {
-                        if ( not Ui->is_neuron() )
-                                continue;
-                        auto& N = *static_cast<C_BaseNeuron*>(Ui);
-                        if ( regexec( &RE, N.label(), 0, 0, 0) == REG_NOMATCH )
-                                continue;
-                      // because a named parameter can map to a different param_id in different units, rather
-                      // do lookup every time
-
-                        int p_d = -1;
-                        C_BaseUnit::TSinkType kind = (C_BaseUnit::TSinkType)-1;
-                        if ( (p_d = N.param_idx_by_sym( P.parm)) != -1 )
-                                kind = C_BaseUnit::SINK_PARAM;
-                        else if ( (p_d = N.var_idx_by_sym( P.parm)) != -1 )
-                                kind = C_BaseUnit::SINK_VAR;
-                        if ( p_d == -1 ) {
-                                vp( 1, stderr, "%s \"%s\" (type \"%s\") has no parameter or variable named \"%s\"\n",
-                                    N.class_name(), N.label(), N.species(), P.parm.c_str());
-                                continue;
-                        }
-
-                        switch ( kind ) {
-                        case C_BaseUnit::SINK_PARAM:
-                                N.param_value(p_d) = (P.invert_option == STagGroup::TInvertOption::no)
-                                        ? P.value : __CNUDT[N.type()].stock_param_values[p_d];
-                                N.param_changed_hook();
-                            break;
-                        case C_BaseUnit::SINK_VAR:
-                                N.var_value(p_d) = P.value;
-                            break;
-                        }
-                        ++count;
-
-                        current_tag_assigned_labels.push_back( N.label());
-                }
-
-                if ( current_tag_assigned_labels.empty() ) {
-                        vp( 1,  stderr, "No neuron labelled matching \"%s\"\n", P.pattern.c_str());
-                        continue;
-                }
-
-                vp( 3, " set [%s]{%s} = %g\n",
-                    stilton::str::join(current_tag_assigned_labels, ", ").c_str(),
-                    P.parm.c_str(), P.value);
-        }
-
-        return count;
-}
-
-
-
-
-
-size_t
-cnrun::CModel::
-process_paramset_static_tags( const list<STagGroupSynapseParmSet> &tags)
-{
-        size_t count = 0;
-        auto process_tag = [&] (const STagGroupSynapseParmSet& P,
-                                regex_t& REsrc, regex_t& REtgt) -> void {
-                vector<string> current_tag_assigned_labels;
-
-                bool do_gsyn = (P.parm == "gsyn");
-
-                vp( 5, "== setting %s -> %s {%s} = %g...\n",
-                    P.pattern.c_str(), P.target.c_str(), P.parm.c_str(), P.value);
-
-                for ( auto& Uai : units ) {
-                        if ( not Uai->is_neuron() )
-                                continue;
-                        if ( regexec( &REsrc, Uai->label(), 0, 0, 0) == REG_NOMATCH )
-                                continue;
-                        auto& Ua = *static_cast<C_BaseNeuron*>(Uai);
-
-                        for ( auto& Ubi : units ) {
-                                if ( not Ubi->is_neuron() )
-                                        continue;
-                                if ( regexec( &REtgt, Ubi->label(), 0, 0, 0) == REG_NOMATCH ) /* || Ua == Ub */
-                                        continue;
-                                auto& Ub = *static_cast<C_BaseNeuron*>(Ubi);
-                                auto y = Ua.connects_via(Ub);
-                                if ( !y )
-                                        continue;
-
-                                if ( do_gsyn ) {
-                                        y->set_g_on_target( Ub, P.value);
-                                        current_tag_assigned_labels.push_back( y->label());
-                                        continue;
-                                }
-
-                                int p_d = -1;
-                                C_BaseUnit::TSinkType kind = (C_BaseUnit::TSinkType)-1;
-                                if ( (p_d = y->param_idx_by_sym( P.parm)) > -1 )
-                                        kind = C_BaseUnit::SINK_PARAM;
-                                else if ( (p_d = y->var_idx_by_sym( P.parm)) > -1 )
-                                        kind = C_BaseUnit::SINK_VAR;
-                                if ( p_d == -1 ) {
-                                        vp( 1, stderr, "%s \"%s\" (type \"%s\") has no parameter or variable named \"%s\"\n",
-                                            y->class_name(), y->label(), y->species(), P.parm.c_str());
-                                        continue;
-                                }
-
-                                switch ( kind ) {
-                                case C_BaseUnit::SINK_PARAM:
-                                        if ( y->_targets.size() > 1 )
-                                                y = y->make_clone_independent(
-                                                        &Ub);  // lest brethren synapses to other targets be clobbered
-                                        y->param_value(p_d) = (P.invert_option == STagGroup::TInvertOption::no)
-                                                ? P.value : __CNUDT[y->type()].stock_param_values[p_d];
-                                        y->param_changed_hook();
-                                    break;
-                                case C_BaseUnit::SINK_VAR:
-                                        y->var_value(p_d) = P.value;
-                                    break;
-                                }
-                                ++count;
-
-                                current_tag_assigned_labels.push_back( y->label());
-                        }
-                }
-                if ( current_tag_assigned_labels.empty() ) {
-                        vp( 1, stderr, "No synapse connecting any of \"%s\" to \"%s\"\n", P.pattern.c_str(), P.target.c_str());
-                        return;
-                }
-
-                vp( 3, " set [%s]{%s} = %g\n",
-                    stilton::str::join(current_tag_assigned_labels, ", ").c_str(),
-                    P.parm.c_str(), P.value);
-        };
-
-        for ( auto& P : tags ) {
-                regex_t REsrc, REtgt;
-                if (0 != regcomp( &REsrc, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB) ) {  // P->pattern acting as src
-                        vp( 0, stderr, "Invalid regexp in process_paramset_static_tags (src): \"%s\"\n", P.pattern.c_str());
-                        continue;
-                }
-                if (0 != regcomp( &REtgt, P.target.c_str(), REG_EXTENDED | REG_NOSUB) ) {
-                        vp( 0, stderr, "Invalid regexp in process_paramset_static_tags (tgt): \"%s\"\n", P.target.c_str());
-                        continue;
-                }
-
-                process_tag( P, REsrc, REtgt);
-        }
-
-        coalesce_synapses();
-
-        return count;
-}
-
-
-
-size_t
-cnrun::CModel::
-process_paramset_source_tags( const list<STagGroupSource> &tags)
-{
-        size_t count = 0;
-        regex_t RE;
-        for ( auto& P : tags ) {
-                if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
-                        vp( 0, stderr, "Invalid regexp in process_paramset_source_tags: \"%s\"\n", P.pattern.c_str());
-                        continue;
-                }
-
-                for ( auto& U : units ) {
-                        if ( regexec( &RE, U->label(), 0, 0, 0) == REG_NOMATCH )
-                                continue;
-
-                        int p_d = -1;
-                        C_BaseUnit::TSinkType kind = (C_BaseUnit::TSinkType)-1;
-                        if ( (p_d = U->param_idx_by_sym( P.parm)) > -1 )
-                                kind = C_BaseUnit::SINK_PARAM;
-                        else if ( (p_d = U->var_idx_by_sym( P.parm)) > -1 )
-                                kind = C_BaseUnit::SINK_VAR;
-                        if ( p_d == -1 ) {
-                                vp( 1, stderr, "%s \"%s\" (type \"%s\") has no parameter or variable named \"%s\"\n",
-                                    U->class_name(), U->label(), U->species(), P.parm.c_str());
-                                continue;
-                        }
-
-                        if ( P.invert_option == STagGroup::TInvertOption::no ) {
-                                U -> attach_source( P.source, kind, p_d);
-                                vp( 3, "Connected source \"%s\" to \"%s\"{%s}\n",
-                                    P.source->name(), U->label(), P.parm.c_str());
-                        } else {
-                                U -> detach_source( P.source, kind, p_d);
-                                vp( 3, "Disconnected source \"%s\" from \"%s\"{%s}\n",
-                                    P.source->name(), U->label(), P.parm.c_str());
-                        }
-                        ++count;
-                }
-        }
-
-        return count;
-}
-
-// Local Variables:
-// Mode: c++
-// indent-tabs-mode: nil
-// tab-width: 8
-// c-basic-offset: 8
-// End:
diff --git a/upstream/src/libcnrun/model.hh b/upstream/src/libcnrun/model.hh
deleted file mode 100644 (file)
index a0468ea..0000000
+++ /dev/null
@@ -1,715 +0,0 @@
-/*
- *       File name:  libcn/model.hh
- *         Project:  cnrun
- *          Author:  Andrei Zavada <johnhommer@gmail.com>
- * Initial version:  2008-09-02
- *
- *         Purpose:  Main model class.
- *
- *         License:  GPL-2+
- */
-
-/*--------------------------------------------------------------------------
-
-The wrapper class which takes lists of pointers to neurons and synapses
-which are networked to a neural system and assembles a common state
-vector and handles the derivatives. At the same time it serves the neurons
-and synapses their state at any given time and allows them to adjust their
-parameters.
-
---------------------------------------------------------------------------*/
-
-
-#ifndef CNRUN_LIBCN_MODEL_H_
-#define CNRUN_LIBCN_MODEL_H_
-
-#if HAVE_CONFIG_H && !defined(VERSION)
-#  include "config.h"
-#endif
-
-#include <list>
-#include <vector>
-#include <string>
-
-#include "libxml/parser.h"
-#include "libxml/tree.h"
-
-#include "gsl/gsl_rng.h"
-
-#include "libstilton/misc.hh"
-#include "forward-decls.hh"
-#include "base-neuron.hh"
-#include "base-synapse.hh"
-#include "hosted-neurons.hh"
-#include "hosted-synapses.hh"
-#include "standalone-neurons.hh"
-#include "standalone-synapses.hh"
-#include "integrate-rk65.hh"
-
-
-using namespace std;
-
-namespace cnrun {
-
-struct SModelOptions {
-        bool    listen_1varonly:1,
-                listen_deferwrite:1,
-                listen_binary:1,
-                log_dt:1,
-                log_spikers:1,
-                log_spikers_use_serial_id:1,
-                log_sdf:1,
-                display_progress_percent:1,
-                display_progress_time:1;
-        int     precision;
-        double  spike_threshold,
-                spike_lapse,
-                listen_dt;
-        double  //discrete_dt,
-                integration_dt_max,
-                integration_dt_min,
-                integration_dt_cap;
-        double  sxf_start_delay,
-                sxf_period,
-                sdf_sigma;
-        int     verbosely;
-
-        SModelOptions ()
-              : listen_1varonly (true), listen_deferwrite (false), listen_binary (false),
-                log_dt (false),
-                log_spikers (false), log_spikers_use_serial_id (false),
-                log_sdf (false),
-                display_progress_percent (true),
-                display_progress_time (false),
-                precision (8),
-                spike_threshold (0.), spike_lapse (3.),
-                listen_dt(1.),
-                //discrete_dt(.5),
-                integration_dt_max (.5), integration_dt_min (1e-5), integration_dt_cap (5.),
-                sxf_start_delay (0.), sxf_period (0.), sdf_sigma (0.),
-                verbosely (1)
-                {}
-
-        SModelOptions (const SModelOptions& rv)
-                {
-                        memmove(this, &rv, sizeof(SModelOptions));
-                }
-};
-
-
-class CModel : public cnrun::stilton::C_verprintf {
-
-    public:
-      // ctor, dtor
-        CModel (const string& name, CIntegrate_base*, const SModelOptions&);
-        virtual ~CModel ();
-
-        string  name;
-
-        SModelOptions
-                options;
-
-      // Unit list and lookup
-        vector<C_BaseUnit*>
-        list_units() const
-                {  return move(vector<C_BaseUnit*> (units.begin(), units.end()));  }
-        vector<C_BaseUnit*>
-        list_units( const string& label) const;
-        C_BaseUnit    *unit_by_label( const string&) const;
-        C_BaseNeuron  *neuron_by_label( const string&) const;
-        C_BaseSynapse *synapse_by_label( const string&) const;
-        unsigned short longest_label() const
-                {  return _longest_label;  }
-
-      // Unit tally
-        size_t n_hosted_units() const
-                {  return hosted_neurons.size() + hosted_synapses.size();                }
-        size_t n_standalone_units() const
-                {  return standalone_neurons.size() + standalone_synapses.size();        }
-        size_t n_ddtbound_units() const
-                {  return ddtbound_neurons.size() + ddtbound_synapses.size();            }
-        size_t n_total_neurons() const
-                {
-                        return hosted_neurons.size()
-                                + standalone_neurons.size()
-                                + ddtbound_neurons.size();
-                }
-        size_t n_total_synapses() const
-                {
-                        return hosted_synapses.size()
-                                + standalone_synapses.size()
-                                + ddtbound_synapses.size();
-                }
-
-      // 0. Model composition
-
-      // There are two ways of adding units:
-      // - create units outside, then 'include' them in a model;
-      // - specify which unit you want, by type, and creating
-      //   them directly in the model ('add').
-
-        //enum class TIncludeOption { is_last, is_notlast, };  // defined in hosted-unit.hh
-        // if option == is_last, do allocations of hosted units' vars immediately
-        // otherwise defer until addition is done with option == is_notlast
-        // or the user calls finalize_additions
-        int include_unit( C_HostedNeuron*, TIncludeOption option = TIncludeOption::is_last);
-        int include_unit( C_HostedSynapse*, TIncludeOption option = TIncludeOption::is_last);
-        int include_unit( C_StandaloneNeuron*);
-        int include_unit( C_StandaloneSynapse*);
-
-        C_BaseNeuron*
-        add_neuron_species( TUnitType, const string& label,
-                            TIncludeOption = TIncludeOption::is_last,
-                            double x = 0., double y = 0., double z = 0.);
-        C_BaseNeuron*
-        add_neuron_species( const string& type, const string& label,
-                            TIncludeOption = TIncludeOption::is_last,
-                            double x = 0., double y = 0., double z = 0.);
-
-        enum class TSynapseCloningOption { yes, no, };
-        C_BaseSynapse*
-        add_synapse_species( const string& type, const string& src, const string& tgt,
-                             double g,
-                             TSynapseCloningOption = TSynapseCloningOption::yes,
-                             TIncludeOption = TIncludeOption::is_last);
-        void finalize_additions();
-
-        C_BaseSynapse*
-        add_synapse_species( TUnitType, C_BaseNeuron *src, C_BaseNeuron *tgt,
-                             double g,
-                             TSynapseCloningOption = TSynapseCloningOption::yes,
-                             TIncludeOption = TIncludeOption::is_last);
-
-        enum class TExcludeOption { with_delete, no_delete, };
-        C_BaseUnit*
-        exclude_unit( C_BaseUnit*, TExcludeOption option = TExcludeOption::no_delete);
-        // return nullptr if option == do_delete, the excluded unit otherwise, even if it was not owned
-        void delete_unit( C_BaseUnit* u)
-                {  exclude_unit( u, TExcludeOption::with_delete);  }
-
-      // 1. NeuroMl interface
-        enum class TNMLImportOption { merge, reset, };
-        enum TNMLIOResult {
-                ok = 0, nofile, noelem, badattr, badcelltype, biglabel, structerror,
-        };
-        int import_NetworkML( const string& fname, TNMLImportOption);
-        int import_NetworkML( xmlDoc*, const string& fname, TNMLImportOption);  // fname is merely informational here
-        int export_NetworkML( const string& fname);
-        int export_NetworkML( xmlDoc*);
-
-      // 2. Bulk operations
-        enum class TResetOption { with_params, no_params, };
-        void reset( TResetOption = TResetOption::no_params);
-        void reset_state_all_units();
-
-        void cull_deaf_synapses();  // those with gsyn == 0
-        void cull_blind_synapses(); // those with _source == nullptr
-
-      // 3. Informational
-        size_t vars() const  { return _var_cnt; }
-        void dump_metrics( FILE *strm = stdout) const;
-        void dump_state( FILE *strm = stdout) const;
-        void dump_units( FILE *strm = stdout) const;
-
-      // 4. Set unit parameters
-      // high-level functions to manipulate unit behaviour, set params, & connect sources
-        struct STagGroup {
-                string pattern;
-                enum class TInvertOption { yes, no, };
-                TInvertOption invert_option;
-                STagGroup( const string& a,
-                           TInvertOption b = STagGroup::TInvertOption::no)
-                      : pattern (a), invert_option (b)
-                        {}
-        };
-        struct STagGroupListener : STagGroup {
-                int bits;
-                STagGroupListener( const string& a,
-                                   int c = 0,
-                                   STagGroup::TInvertOption b = STagGroup::TInvertOption::no)
-                      : STagGroup (a, b), bits (c)
-                        {}
-        };
-        size_t process_listener_tags( const list<STagGroupListener>&);
-
-        struct STagGroupSpikelogger : STagGroup {
-                double period, sigma, from;
-                STagGroupSpikelogger( const string& a,
-                                      double c = 0., double d = 0., double e = 0.,  // defaults disable sdf computation
-                                      STagGroup::TInvertOption b = STagGroup::TInvertOption::no)
-                      : STagGroup (a, b), period (c), sigma (d), from (e)
-                        {}
-        };
-        size_t process_spikelogger_tags( const list<STagGroupSpikelogger>&);
-        size_t process_putout_tags( const list<STagGroup>&);
-
-        struct STagGroupDecimate : STagGroup {
-                float fraction;
-                STagGroupDecimate( const string& a, double c)
-                      : STagGroup (a, TInvertOption::no), fraction (c)
-                        {}
-        };
-        size_t process_decimate_tags( const list<STagGroupDecimate>&);
-
-        struct STagGroupNeuronParmSet : STagGroup {
-                string parm;
-                double value;
-                STagGroupNeuronParmSet( const string& a,
-                                        const string& c, double d,
-                                        STagGroup::TInvertOption b = STagGroup::TInvertOption::no)
-                      : STagGroup (a, b),
-                        parm (c), value (d)
-                        {}
-        };
-        struct STagGroupSynapseParmSet : STagGroupNeuronParmSet {
-                string target;
-                STagGroupSynapseParmSet( const string& a,
-                                         const string& z, const string& c, double d,
-                                         STagGroup::TInvertOption b = STagGroup::TInvertOption::no)
-                      : STagGroupNeuronParmSet (a, c, d, b), target (z)
-                        {}
-        };
-        size_t process_paramset_static_tags( const list<STagGroupNeuronParmSet>&);
-        size_t process_paramset_static_tags( const list<STagGroupSynapseParmSet>&);
-
-        struct STagGroupSource : STagGroup {
-                string parm;
-                C_BaseSource *source;
-                STagGroupSource( const string& a,
-                                 const string& c, C_BaseSource *d,
-                                 STagGroup::TInvertOption b = STagGroup::TInvertOption::no)  // b == false to revert to stock
-                      :  STagGroup (a, b), parm (c), source (d)
-                        {}
-        };
-        size_t process_paramset_source_tags( const list<STagGroupSource>&);
-
-        C_BaseSource*
-        source_by_id( const string& id) const
-                {
-                        for ( auto& S : _sources )
-                                if ( id == S->name() )
-                                        return S;
-                        return nullptr;
-                }
-        const list<C_BaseSource*>&
-        sources() const
-                {  return _sources;  }
-        void
-        add_source( C_BaseSource* s)
-                {
-                        _sources.push_back( s);
-                }
-        // no (straight) way to delete a source
-
-      // 5. Running
-        unsigned advance( double dist, double *cpu_time_p = nullptr) __attribute__ ((hot));
-        double model_time() const  { return V[0]; }
-
-        double dt() const      { return _integrator->dt; }
-        double dt_min() const  { return _integrator->_dt_min; }
-        double dt_max() const  { return _integrator->_dt_max; }
-        double dt_cap() const  { return _integrator->_dt_cap; }
-        void set_dt(double v)  { _integrator->dt = v; }
-        void set_dt_min(double v)  { _integrator->_dt_min = v; }
-        void set_dt_max(double v)  { _integrator->_dt_max = v; }
-        void set_dt_cap(double v)  { _integrator->_dt_cap = v; }
-
-        unsigned long cycle()         const { return _cycle;          }
-        double model_discrete_time()  const { return _discrete_time;  }
-        double discrete_dt()          const { return _discrete_dt;    }
-
-      // 9. misc
-        gsl_rng *rng() const
-                {  return _rng;  }
-        double rng_sample() const
-                {
-                        return gsl_rng_uniform_pos( _rng);
-                }
-    private:
-        friend class C_BaseUnit;
-        friend class C_BaseNeuron;
-        friend class C_BaseSynapse;
-        friend class C_HostedNeuron;
-        friend class C_HostedConductanceBasedNeuron;
-        friend class C_HostedRateBasedNeuron;
-        friend class C_HostedSynapse;
-        friend class CNeuronMap;
-        friend class CSynapseMap;
-        friend class CSynapseMxAB_dd;
-        friend class SSpikeloggerService;
-
-        friend class CIntegrate_base;
-        friend class CIntegrateRK65;
-
-      // supporting functions
-        void register_listener( C_BaseUnit*);
-        void unregister_listener( C_BaseUnit*);
-        void register_spikelogger( C_BaseNeuron*);
-        void unregister_spikelogger( C_BaseNeuron*);
-        void register_mx_synapse( C_BaseSynapse*);
-        void unregister_mx_synapse( C_BaseSynapse*);
-
-        void register_unit_with_sources( C_BaseUnit*);
-        void unregister_unit_with_sources( C_BaseUnit*);
-        void _include_base_unit( C_BaseUnit*);
-
-        int _process_populations( xmlNode*);
-        int _process_population_instances( xmlNode*, const xmlChar*, const xmlChar*);
-
-        int _process_projections( xmlNode*);
-        int _process_projection_connections( xmlNode*, const xmlChar*, const xmlChar*,
-                                             const xmlChar *src_grp_prefix,
-                                             const xmlChar *tgt_grp_prefix);
-
-        void _setup_schedulers();
-        void coalesce_synapses();
-        void prepare_advance();
-        unsigned _do_advance_on_pure_hosted( double, double*)  __attribute__ ((hot));
-        unsigned _do_advance_on_pure_standalone( double, double*) __attribute__ ((hot));
-        unsigned _do_advance_on_pure_ddtbound( double, double*) __attribute__ ((hot));
-        unsigned _do_advance_on_mixed( double, double*) __attribute__ ((hot));
-
-        void make_listening_units_tell()
-                {
-                        for ( auto& U : listening_units )
-                                U -> tell();
-                }
-        void make_conscious_neurons_possibly_fire()
-                {
-                        for ( auto& U : conscious_neurons )
-                                U->possibly_fire();
-                }
-        void make_units_with_periodic_sources_apprise_from_sources()
-                {
-                        for ( auto& U : units_with_periodic_sources )
-                                U->apprise_from_sources();
-                }
-        void make_units_with_continuous_sources_apprise_from_sources()
-                {
-                        for ( auto& U : units_with_continuous_sources )
-                                U->apprise_from_sources();
-                }
-        void make_spikeloggers_sync_history()
-                {
-                        for ( auto& N : spikelogging_neurons )
-                                N->sync_spikelogging_history();
-                }
-
-        static double
-        model_time( vector<double> &x)
-                {
-                        return x[0];
-                }
-
-      // contents
-        list<C_BaseUnit*>
-                units; // all units together
-      // these have derivative(), are churned in _integrator->cycle()
-        list<C_HostedNeuron*>
-                hosted_neurons;
-        list<C_HostedSynapse*>
-                hosted_synapses;
-      // these need preadvance() and fixate()
-        list<C_StandaloneNeuron*>
-                standalone_neurons;
-        list<C_StandaloneSynapse*>
-                standalone_synapses;
-      // ... also these, but at discrete dt only
-      // (only the standalone map units currently)
-        list<C_StandaloneNeuron*>
-                ddtbound_neurons;
-        list<C_StandaloneSynapse*>
-                ddtbound_synapses;
-
-      // neurons that can possibly_fire() (various oscillators), and
-      // have no inputs, and hence not dependent on anything else
-        list<C_BaseNeuron*>
-                conscious_neurons;
-
-      // various lists to avoid traversing all of them in units:
-      // listeners, spikeloggers & readers
-        list<C_BaseUnit*>
-                listening_units;
-      // uses a meaningful do_spikelogging_or_whatever
-        list<C_BaseNeuron*>
-                spikelogging_neurons;
-      // `Multiplexing AB' synapses are treated very specially
-        list<C_BaseSynapse*>
-                multiplexing_synapses;
-
-      // those for which apprise_from_source( model_time()) will be called
-        list<C_BaseUnit*>
-                units_with_continuous_sources;
-      // same, but not every cycle
-        list<C_BaseUnit*>
-                units_with_periodic_sources;
-        list<double>
-                regular_periods;
-        list<unsigned>
-                regular_periods_last_checked;
-
-        unsigned long
-                _global_unit_id_reservoir;
-
-      // the essential mechanical parts: ----
-      // hosted unit variables
-        vector<double> V,        // contains catenated var vectors of all constituent neurons and synapses
-                       W;        // V and W alternate in the capacity of the main vector, so avoiding many a memcpy
-        size_t  _var_cnt;        // total # of variables (to be) allocated in V an W, plus one for model_time
-
-      // integrator interface
-        CIntegrate_base
-                *_integrator;
-
-        unsigned long
-                _cycle;
-        double  _discrete_time;
-        double  _discrete_dt;
-
-        list<C_BaseSource*>
-                _sources;
-
-        ofstream
-                *_dt_logger,
-                *_spike_logger;
-
-        bool    is_ready:1,
-                is_diskless:1,
-                have_ddtb_units:1;
-
-        unsigned short
-                _longest_label;
-
-        gsl_rng *_rng;
-
-        int verbose_threshold() const
-                {
-                        return options.verbosely;
-                }
-};
-
-
-
-
-
-inline void
-CIntegrateRK65::fixate()
-{
-        swap( model->V, model->W);
-}
-
-
-// various CUnit & CNeuron methods accessing CModel members
-// that we want to have inline
-
-inline double
-C_BaseUnit::model_time() const
-{
-        return M->model_time();
-}
-
-inline void
-C_BaseUnit::pause_listening()
-{
-        if ( !M )
-                throw "pause_listening() called on NULL model";
-        M->unregister_listener( this);
-}
-
-inline void
-C_BaseUnit::resume_listening()
-{
-        if ( !M )
-                throw "resume_listening() called on NULL model";
-        M->register_listener( this);
-}
-
-
-
-template <class T>
-void
-C_BaseUnit::attach_source( T *s, TSinkType t, unsigned short idx)
-{
-        _sources.push_back( SSourceInterface<T>( s, t, idx));
-        M->register_unit_with_sources(this);
-}
-
-
-
-
-
-inline SSpikeloggerService*
-C_BaseNeuron::
-enable_spikelogging_service( int s_mask)
-{
-        if ( !_spikelogger_agent )
-                _spikelogger_agent =
-                        new SSpikeloggerService( this, s_mask);
-        M->register_spikelogger( this);
-        return _spikelogger_agent;
-}
-inline SSpikeloggerService*
-C_BaseNeuron::
-enable_spikelogging_service( double sample_period,
-                             double sigma,
-                             double from, int s_mask)
-{
-        if ( !_spikelogger_agent )
-                _spikelogger_agent =
-                        new SSpikeloggerService( this, sample_period, sigma, from, s_mask);
-        M->register_spikelogger( this);
-        return _spikelogger_agent;
-}
-
-inline void
-C_BaseNeuron::
-disable_spikelogging_service()
-{
-        if ( _spikelogger_agent && !(_spikelogger_agent->_status & CN_KL_PERSIST)) {
-                _spikelogger_agent->sync_history();
-                M->unregister_spikelogger( this);
-
-                delete _spikelogger_agent;
-                _spikelogger_agent = nullptr;
-        }
-}
-
-
-
-
-
-
-inline void
-C_HostedNeuron::
-reset_vars()
-{
-        if ( M && idx < M->_var_cnt )
-                memcpy( &M->V[idx],
-                        __CNUDT[_type].stock_var_values,
-                        __CNUDT[_type].vno * sizeof(double));
-}
-
-inline double&
-C_HostedNeuron::
-var_value( size_t v)
-{
-        return M->V[idx + v];
-}
-
-inline const double&
-C_HostedNeuron::
-get_var_value( size_t v) const
-{
-        return M->V[idx + v];
-}
-
-
-
-inline size_t
-C_HostedConductanceBasedNeuron::
-n_spikes_in_last_dt() const
-{
-        return E() >= M->options.spike_threshold;
-}
-
-inline size_t
-C_HostedRateBasedNeuron::
-n_spikes_in_last_dt() const
-{
-        return round(E() * M->dt() * M->rng_sample());
-}
-
-
-inline size_t
-C_StandaloneConductanceBasedNeuron::
-n_spikes_in_last_dt() const
-{
-        return E() >= M->options.spike_threshold;
-}
-
-inline size_t
-C_StandaloneRateBasedNeuron::
-n_spikes_in_last_dt() const
-{
-        return round(E() * M->dt() * M->rng_sample());
-}
-
-
-
-
-
-inline void
-C_HostedSynapse::
-reset_vars()
-{
-        if ( M && M->_var_cnt > idx )
-                memcpy( &M->V[idx],
-                        __CNUDT[_type].stock_var_values,
-                        __CNUDT[_type].vno * sizeof(double));
-}
-
-
-
-inline double&
-C_HostedSynapse::
-var_value( size_t v)
-{
-        return M->V[idx + v];
-}
-
-inline const double&
-C_HostedSynapse::
-get_var_value( size_t v) const
-{
-        return M->V[idx + v];
-}
-
-
-
-inline double
-C_HostedConductanceBasedNeuron::
-E() const
-{
-        return M->V[idx+0];
-}
-
-// F is computed on the fly, so far usually
-
-
-inline double
-C_HostedSynapse::
-S() const
-{
-        return M->V[idx+0];
-}
-
-
-
-
-inline void
-CSynapseMap::
-preadvance()
-{
-        V_next[0] = S() * exp( -M->discrete_dt() / P[_tau_])
-                + (_source->n_spikes_in_last_dt() ? P[_delta_] : 0);
-}
-
-
-
-inline void
-CSynapseMxMap::
-preadvance()
-{
-        V_next[0] = S() * exp( -M->discrete_dt() / P[_tau_]) + q() * P[_delta_];
-}
-
-}
-
-#endif
-
-// Local Variables:
-// Mode: c++
-// indent-tabs-mode: nil
-// tab-width: 8
-// c-basic-offset: 8
-// End:
diff --git a/upstream/src/libcnrun/model/.dirstamp b/upstream/src/libcnrun/model/.dirstamp
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/upstream/src/libcnrun/model/cycle.cc b/upstream/src/libcnrun/model/cycle.cc
new file mode 100644 (file)
index 0000000..5247bae
--- /dev/null
@@ -0,0 +1,561 @@
+/*
+ *       File name:  libcnrun/model/cycle.cc
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer@gmail.com>
+ * Initial version:  2008-08-02
+ *
+ *         Purpose:  CModel top cycle
+ *
+ *         License:  GPL-2+
+ */
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <ctime>
+#include <cstdlib>
+#include <iostream>
+
+#include "libstilton/lang.hh"
+#include "integrator/rk65.hh"
+#include "model.hh"
+
+
+using namespace std;
+
+
+/*--------------------------------------------------------------------------
+  Implementation of a 6-5 Runge-Kutta method with adaptive time step
+  mostly taken from the book "The numerical analysis of ordinary differential
+  equations - Runge-Kutta and general linear methods" by J.C. Butcher, Wiley,
+  Chichester, 1987 and a free adaptation to a 6 order Runge Kutta method
+  of an ODE system with additive white noise
+--------------------------------------------------------------------------*/
+
+inline namespace {
+
+double __Butchers_a[9][8] = {
+        { },
+        { 1./9 },
+        { .5/9,        .5/9 },
+        { 0.416666666666667e-1,        0., 0.125 },
+        { 1./6, 0., -0.5, 2./3 },
+        { 0.1875e+1, 0., -0.7875e+1, 0.7e+1, -0.5 },
+        { -0.4227272727272727e+1, 0., 0.176995738636364e+2, -0.142883522727273e+2, 0.522017045454545, 0.104403409090909e+1 },
+        { 0.840622673179752e+1, 0., -0.337303717185049e+2, 0.271460231129622e+2, 0.342046929709216, -0.184653767923258e+1, 0.577349465373733 },
+        { 0.128104575163399, 0., 0., -0.108433734939759, 0.669375, -0.146666666666667, 0.284444444444444, 0.173176381998583 },
+};
+
+
+double __Butchers_b[9] = {
+        0.567119155354449e-1,
+        0.,
+        0.,
+        0.210909572355356,
+        0.141490384615385,
+        0.202051282051282,
+        0.253186813186813,
+        0.843679809736684e-1,
+        0.512820512820513e-1
+};
+} // inline namespace
+
+
+
+void
+cnrun::CIntegrateRK65::
+prepare()
+{
+        for ( unsigned short i = 0; i < 9; ++i ) {
+                Y[i].resize( model->_var_cnt);
+                F[i].resize( model->_var_cnt);
+        }
+        y5.resize( model->_var_cnt);
+
+        if ( model->n_standalone_units() > 0 )
+                if ( _dt_max > model->_discrete_dt ) {
+                        _dt_max = model->_discrete_dt;
+                        model->vp( 1, "CIntegrateRK65: Set dt_max to model->discrete_dt: %g\n", _dt_max);
+                }
+}
+
+
+void
+__attribute__ ((hot))
+cnrun::CIntegrateRK65::
+cycle()
+{
+      // omp stuff found inapplicable due to considerable overhead in sys time
+      // (thread creation)
+        unsigned int i, j, k;
+
+        double  aF;
+
+      // calculate iterative terms rk65_Y[__i] and rk65_F[__i] (to sixth order)
+        for ( i = 0; i < 9; ++i ) {
+//#pragma omp parallel for schedule(static,model->_var_cnt/2+1) firstprivate(aF,j,i)
+                for ( k = 0; k < model->_var_cnt; ++k ) {
+                        aF = 0.0;
+                        for ( j = 0; j < i; ++j )
+                                aF += __Butchers_a[i][j] * F[j][k];
+                        Y[i][k] = model->V[k] + dt * aF;
+                }
+              // see to this vector's dt
+                F[i][0] = 1.;
+
+//#pragma omp consider...
+                for ( auto& N : model->hosted_neurons )
+                        N -> derivative( Y[i], F[i]);
+                for ( auto& S : model->hosted_synapses )
+                        S -> derivative( Y[i], F[i]);
+        }
+
+      // sum up Y[i] and F[i] to build 5th order scheme -> y5
+//#pragma omp parallel for private(aF,j)
+        for ( k = 0; k < model->_var_cnt; ++k ) {
+                aF = 0.0;
+                for ( j = 0; j < 8; ++j )
+                        aF += __Butchers_a[8][j] * F[j][k];
+                y5[k] = model->V[k] + dt * aF;
+        }
+
+      // sum up Y[i] and F[i] to build 6th order scheme -> W
+//#pragma omp parallel for schedule(static,model->_var_cnt/2+1) private(aF,j)
+        for ( k = 0; k < model->_var_cnt; ++k ) {
+                aF = 0.0;
+                for ( j = 0; j < 9; ++j )
+                        aF += __Butchers_b[j] * F[j][k];
+                model->W[k] = model->V[k] + dt * aF;
+        }
+
+      // kinkiness in synapses causes dt to rocket
+        double  dtx = min( _dt_max, dt * _dt_cap);
+
+      // determine minimal necessary new dt to get error < eps based on the
+      // difference between results in y5 and W
+        double try_eps, delta, try_dt;
+      // exclude time (at index 0)
+//#pragma omp parallel for private(try_eps,delta,try_dtj)
+        for ( k = 1; k < model->_var_cnt; ++k ) {
+                try_eps = max( _eps_abs, min (_eps, abs(_eps_rel * model->W[k])));
+                delta = abs( model->W[k] - y5[k]);
+                if ( delta > DBL_EPSILON * y5[k] ) {
+                        try_dt = exp( (log(try_eps) - log(delta)) / 6) * dt;
+                        if ( try_dt < dtx )
+                                dtx = try_dt;
+                }
+        }
+      // make sure we don't grind to a halt
+        if ( dtx < _dt_min )
+                dtx = _dt_min;
+
+      // set the new step
+        dt = dtx;
+}
+
+
+
+
+
+
+
+
+// -------------- CModel::advance and dependents
+
+unsigned int
+cnrun::CModel::
+advance( const double dist, double * const cpu_time_used_p)
+{
+        if ( units.size() == 0 ) {
+                vp( 1, "Model is empty\n");
+                return 0;
+        }
+        if ( is_ready )
+                prepare_advance();
+
+        bool    have_hosted_units = !!n_hosted_units(),
+                have_standalone_units = !!n_standalone_units(),
+                have_ddtbound_units = !!n_ddtbound_units();
+
+        if ( have_hosted_units && !have_standalone_units && !have_ddtbound_units )
+                return _do_advance_on_pure_hosted( dist, cpu_time_used_p);
+        if ( !have_hosted_units && have_standalone_units && !have_ddtbound_units )
+                return _do_advance_on_pure_standalone( dist, cpu_time_used_p);
+        if ( !have_hosted_units && !have_standalone_units && have_ddtbound_units )
+                return _do_advance_on_pure_ddtbound( dist, cpu_time_used_p);
+
+        unsigned int retval = _do_advance_on_mixed( dist, cpu_time_used_p);
+        return retval;
+}
+
+void
+__attribute__ ((hot))
+cnrun::CModel::
+_setup_schedulers()
+{
+        regular_periods.clear();
+        regular_periods_last_checked.clear();
+        if ( units_with_periodic_sources.size() ) { // determine period(s) at which to wake up reader update loop
+                for ( auto& U : units_with_periodic_sources )
+                        for ( auto& S : U -> _sources )
+                                regular_periods.push_back(
+                                        (reinterpret_cast<CSourcePeriodic*>(S.source)) -> period());
+                regular_periods.unique();
+                regular_periods.sort();
+                regular_periods_last_checked.resize( regular_periods.size());
+        }
+
+        if ( regular_periods.size() > 0 )
+                vp( 2, "%zd timepoint(s) in scheduler_update_periods: %s\n\n",
+                    regular_periods.size(),
+                    stilton::str::join( regular_periods).c_str());
+
+      // ensure all schedulers are effective at the beginning, too
+        for ( auto& U : units_with_periodic_sources )
+                U->apprise_from_sources();
+}
+
+
+void
+cnrun::CModel::
+prepare_advance()
+{
+        if ( options.log_dt && !_dt_logger )
+                _dt_logger = new ofstream( string(name + ".dt").c_str());
+        if ( options.log_spikers && !_spike_logger )
+                _spike_logger = new ofstream( string(name + ".spikes").c_str());
+
+        _setup_schedulers();
+
+        if ( !n_hosted_units() )
+                _integrator->dt = _discrete_dt;
+
+        is_ready = true;
+
+        vp( 5, stderr, "Model prepared\n");
+}
+
+
+
+// comment concerning for_all_conscious_neurons loop:
+// these have no next_time_E or suchlike, have `fixate' implicit herein; also,
+// conscious neurons fire irrespective of whatever happens elsewhere in the model, and
+// they, logically, have no inputs
+
+#define _DO_ADVANCE_COMMON_INLOOP_BEGIN \
+        make_units_with_continuous_sources_apprise_from_sources();      \
+        {                                                               \
+                auto I = regular_periods.begin();                       \
+                auto Ic = regular_periods_last_checked.begin();         \
+                for ( ; I != regular_periods.end(); ++I, ++Ic )         \
+                        if ( unlikely (model_time() >= *I * (*Ic + 1)) ) { \
+                                (*Ic)++;                                \
+                                make_units_with_periodic_sources_apprise_from_sources(); \
+                        }                                               \
+        }                                                               \
+        make_conscious_neurons_possibly_fire();                         \
+                                                                        \
+        for ( auto& Y : multiplexing_synapses ) \
+                if ( Y->_source )                                        \
+                        Y->update_queue();
+
+
+#define _DO_ADVANCE_COMMON_INLOOP_MID \
+        if ( have_listeners ) {                                         \
+                if ( have_discrete_listen_dt ) {                        \
+                        if ( model_time() - last_made_listen >= options.listen_dt ) { \
+                                make_listening_units_tell();            \
+                                last_made_listen += options.listen_dt;  \
+                        }                                               \
+                } else                                                  \
+                        make_listening_units_tell();                    \
+        }                                                               \
+        if ( unlikely (options.log_dt) )                                \
+                (*_dt_logger) << model_time() << "\t" << dt() << endl;  \
+                                                                        \
+        for ( auto& N : spikelogging_neurons ) {                        \
+                N -> do_detect_spike_or_whatever();                     \
+                if ( !is_diskless &&                                    \
+                     N->n_spikes_in_last_dt() &&                        \
+                     options.log_spikers ) {                            \
+                        (*_spike_logger) << model_time() << "\t";       \
+                        if ( options.log_spikers_use_serial_id )        \
+                                (*_spike_logger) << N->_serial_id << endl; \
+                        else                                            \
+                                (*_spike_logger) << N->_label << endl;  \
+                }                                                       \
+        }
+
+
+#define _DO_ADVANCE_COMMON_INLOOP_END \
+        ++_cycle;                                                       \
+        ++steps;                                                        \
+        if ( options.verbosely != 0 ) {                                 \
+                if ( unlikely (((double)(clock() - cpu_time_lastchecked)) / CLOCKS_PER_SEC > 2) ) { \
+                        cpu_time_lastchecked = clock();                 \
+                        if ( options.display_progress_percent && !options.display_progress_time ) \
+                                fprintf( stderr, "\r\033[%dC%4.1f%%\r", \
+                                         (options.verbosely < 0) ? -(options.verbosely+1)*8 : 0, \
+                                         100 - (model_time() - time_ending) / (time_started - time_ending) * 100); \
+                        else if ( options.display_progress_time && !options.display_progress_percent ) \
+                                fprintf( stderr, "\r\033[%dC%'6.0fms\r", \
+                                         (options.verbosely < 0) ? -(options.verbosely+1)*16 : 0, \
+                                         model_time());                 \
+                        else if ( options.display_progress_percent && options.display_progress_time ) \
+                                fprintf( stderr, "\r\033[%dC%'6.0fms %4.1f%%\r", \
+                                         (options.verbosely < 0) ? -(options.verbosely+1)*24 : 0, \
+                                         model_time(),                  \
+                                         100 - (model_time() - time_ending) / (time_started - time_ending) * 100); \
+                        fflush( stderr);                                \
+                }                                                       \
+        }
+
+
+#define _DO_ADVANCE_COMMON_EPILOG \
+        make_spikeloggers_sync_history();                               \
+        cpu_time_ended = clock();                                        \
+        double cpu_time_taken_seconds = ((double) (cpu_time_ended - cpu_time_started)) / CLOCKS_PER_SEC; \
+        if ( cpu_time_used_p )                                                \
+                *cpu_time_used_p = cpu_time_taken_seconds;                \
+        if ( options.verbosely > 0 || options.verbosely <= -1 ) {                        \
+                fprintf( stderr, "\r\033[K");                                \
+                fflush( stderr);                                        \
+        }                                                                \
+        vp( 0, "@%.1fmsec (+%.1f in %lu cycles in %.2f sec CPU time:"   \
+            " avg %.3g \302\265s/cyc, ratio to CPU time %.2g)\n\n",     \
+            model_time(), dist, steps, cpu_time_taken_seconds,          \
+            model_time()/_cycle * 1e3, model_time() / cpu_time_taken_seconds / 1e3);
+
+
+
+
+
+unsigned int
+__attribute__ ((hot))
+cnrun::CModel::
+_do_advance_on_pure_hosted( const double dist, double * const cpu_time_used_p)
+{
+        bool    have_listeners = (listening_units.size() > 0),
+                have_discrete_listen_dt = (options.listen_dt > 0.);
+
+        clock_t cpu_time_started = clock(),
+                cpu_time_ended,
+                cpu_time_lastchecked = cpu_time_started;
+
+        double  time_started = model_time(),
+                time_ending = time_started + dist,
+                last_made_listen = time_started;
+
+        unsigned long steps = 0;
+        do {
+                _DO_ADVANCE_COMMON_INLOOP_BEGIN
+
+                _integrator->cycle();
+
+                _DO_ADVANCE_COMMON_INLOOP_MID
+
+              // fixate
+                _integrator->fixate();
+
+                _DO_ADVANCE_COMMON_INLOOP_END
+
+              // model_time is advanced implicitly in _integrator->cycle()
+        } while ( model_time() < time_ending );
+
+        _DO_ADVANCE_COMMON_EPILOG
+
+        return steps;
+}
+
+
+
+unsigned int
+__attribute__ ((hot))
+cnrun::CModel::
+_do_advance_on_pure_standalone( const double dist, double * const cpu_time_used_p)
+{
+        bool    have_listeners = !!listening_units.size(),
+                have_discrete_listen_dt = (options.listen_dt > 0.);
+
+        clock_t cpu_time_started = clock(),
+                cpu_time_ended,
+                cpu_time_lastchecked = cpu_time_started;
+
+        double  time_started = model_time(),
+                time_ending = time_started + dist,
+                last_made_listen = time_started;
+
+        unsigned long steps = 0;
+        do {
+                _DO_ADVANCE_COMMON_INLOOP_BEGIN
+
+              // service simple units w/out any vars on the integration vector V
+                for ( auto& N : standalone_neurons )
+                        if ( !N->is_conscious() )
+                                N -> preadvance();
+                for ( auto& Y : standalone_synapses )
+                        Y -> preadvance();
+
+              // even in the case of n_hosted_{neurons,units} == 0, we would need _integrator->cycle() to advance V[0],
+              // which is our model_time(); which is kind of expensive, so here's a shortcut
+                V[0] += _discrete_dt;
+                // _discrete_time += _discrete_dt;  // not necessary
+
+                _DO_ADVANCE_COMMON_INLOOP_MID
+
+              // fixate
+                for ( auto& N : standalone_neurons )
+                        if ( !N->is_conscious() )
+                                N -> fixate();
+                for ( auto& Y : standalone_synapses )
+                        Y -> fixate();
+
+                _DO_ADVANCE_COMMON_INLOOP_END
+
+        } while ( model_time() < time_ending );
+
+        _DO_ADVANCE_COMMON_EPILOG
+
+        return steps;
+}
+
+
+
+
+
+
+
+unsigned int
+__attribute__ ((hot))
+cnrun::CModel::
+_do_advance_on_pure_ddtbound( const double dist, double * const cpu_time_used_p)
+{
+        bool    have_listeners = (listening_units.size() > 0),
+                have_discrete_listen_dt = (options.listen_dt > 0.);
+
+        clock_t cpu_time_started = clock(),
+                cpu_time_ended,
+                cpu_time_lastchecked = cpu_time_started;
+
+        double  time_started = model_time(),
+                time_ending = time_started + dist,
+                last_made_listen = time_started;
+
+        unsigned long steps = 0;
+        do {
+                _DO_ADVANCE_COMMON_INLOOP_BEGIN
+
+              // lastly, service units only serviceable at discrete dt
+                for ( auto& N : ddtbound_neurons )
+                        if ( !N->is_conscious() )
+                                N -> preadvance();
+                for ( auto& Y : ddtbound_synapses )
+                        Y -> preadvance();
+
+                V[0] += _discrete_dt;
+                _discrete_time += _discrete_dt;
+
+                _DO_ADVANCE_COMMON_INLOOP_MID
+
+              // fixate
+                for ( auto& N : ddtbound_neurons )
+                        if ( !N->is_conscious() )
+                                N -> fixate();
+                for ( auto& Y : ddtbound_synapses )
+                        Y -> fixate();
+
+                _DO_ADVANCE_COMMON_INLOOP_END
+
+        } while ( model_time() < time_ending );
+
+        _DO_ADVANCE_COMMON_EPILOG
+
+        return steps;
+}
+
+
+
+
+
+unsigned int
+__attribute__ ((hot))
+cnrun::CModel::
+_do_advance_on_mixed( const double dist, double * const cpu_time_used_p)
+{
+        bool    have_hosted_units = !!n_hosted_units(),
+                have_listeners = !!listening_units.size(),
+                have_discrete_listen_dt = (options.listen_dt > 0.),
+                need_fixate_ddtbound_units;
+
+        clock_t cpu_time_started = clock(),
+                cpu_time_ended,
+                cpu_time_lastchecked = cpu_time_started;
+
+        double  time_started = model_time(),
+                time_ending = time_started + dist,
+                last_made_listen = time_started;
+
+        unsigned long steps = 0;
+        do {
+                _DO_ADVANCE_COMMON_INLOOP_BEGIN
+
+                _integrator->cycle();
+
+              // service simple units w/out any vars on the integration vector V
+                for ( auto& N : standalone_neurons )
+                        if ( !N->is_conscious() )
+                                N -> preadvance();
+                for ( auto& Y : standalone_synapses )
+                        Y -> preadvance();
+
+              // lastly, service units only serviceable at discrete dt
+                if ( this->have_ddtb_units && model_time() >= _discrete_time ) {
+                        for ( auto& N : ddtbound_neurons )
+                                if ( !N->is_conscious() )
+                                        N -> preadvance();
+                        for ( auto& Y : ddtbound_synapses )
+                                Y -> preadvance();
+
+                        _discrete_time += _discrete_dt;
+                        need_fixate_ddtbound_units = true;
+                } else
+                        need_fixate_ddtbound_units = false;
+
+                if ( !have_hosted_units )
+                        V[0] += _discrete_dt;
+
+                _DO_ADVANCE_COMMON_INLOOP_MID
+
+              // fixate
+                _integrator->fixate();
+
+                for ( auto& N : standalone_neurons )
+                        if ( !N->is_conscious() )
+                                N -> fixate();
+                for ( auto& Y : standalone_synapses )
+                        Y -> fixate();
+
+                if ( need_fixate_ddtbound_units ) {
+                        for ( auto& N : ddtbound_neurons )
+                                if ( !N->is_conscious() )
+                                        N -> fixate();
+                        for ( auto& Y : ddtbound_synapses )
+                                Y -> fixate();
+                }
+
+                _DO_ADVANCE_COMMON_INLOOP_END
+
+        } while ( model_time() < time_ending );
+
+        _DO_ADVANCE_COMMON_EPILOG
+
+        return steps;
+}
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/model/forward-decls.hh b/upstream/src/libcnrun/model/forward-decls.hh
new file mode 100644 (file)
index 0000000..10664e0
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ *       File name:  libcnrun/model/forward-decls.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer@gmail.com>
+ * Initial version:  2014-09-16
+ *
+ *         Purpose:  forward declarations
+ *
+ *         License:  GPL-2+
+ */
+
+#ifndef CNRUN_LIBCNRUN_MODEL_FORWARDDECLS_H_
+#define CNRUN_LIBCNRUN_MODEL_FORWARDDECLS_H_
+
+namespace cnrun {
+
+class CModel;
+
+}
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcnrun/model/model.hh b/upstream/src/libcnrun/model/model.hh
new file mode 100644 (file)
index 0000000..093a025
--- /dev/null
@@ -0,0 +1,715 @@
+/*
+ *       File name:  libcnrun/model/model.hh
+ *         Project:  cnrun
+ *          Author:  Andrei Zavada <johnhommer@gmail.com>
+ * Initial version:  2008-09-02
+ *
+ *         Purpose:  Main model class.
+ *
+ *         License:  GPL-2+
+ */
+
+/*--------------------------------------------------------------------------
+
+The wrapper class which takes lists of pointers to neurons and synapses
+which are networked to a neural system and assembles a common state
+vector and handles the derivatives. At the same time it serves the neurons
+and synapses their state at any given time and allows them to adjust their
+parameters.
+
+--------------------------------------------------------------------------*/
+
+
+#ifndef CNRUN_LIBCNRUN_MODEL_MODEL_H_
+#define CNRUN_LIBCNRUN_MODEL_MODEL_H_
+
+#if HAVE_CONFIG_H && !defined(VERSION)
+#  include "config.h"
+#endif
+
+#include <list>
+#include <vector>
+#include <string>
+
+#include "libxml/parser.h"
+#include "libxml/tree.h"
+
+#include "gsl/gsl_rng.h"
+
+#include "libstilton/misc.hh"
+#include "forward-decls.hh"
+#include "units/base-neuron.hh"
+#include "units/base-synapse.hh"
+#include "units/hosted-neurons.hh"
+#include "units/hosted-synapses.hh"
+#include "units/standalone-neurons.hh"
+#include "units/standalone-synapses.hh"
+#include "integrator/rk65.hh"
+
+
+using namespace std;
+
+namespace cnrun {
+
+struct SModelOptions {
+        bool    listen_1varonly:1,
+                listen_deferwrite:1,
+                listen_binary:1,
+                log_dt:1,
+                log_spikers:1,
+                log_spikers_use_serial_id:1,
+                log_sdf:1,
+                display_progress_percent:1,
+                display_progress_time:1;
+        int     precision;
+        double  spike_threshold,
+                spike_lapse,
+                listen_dt;
+        double  //discrete_dt,
+                integration_dt_max,
+                integration_dt_min,
+                integration_dt_cap;
+        double  sxf_start_delay,
+                sxf_period,
+                sdf_sigma;
+        int     verbosely;
+
+        SModelOptions ()
+              : listen_1varonly (true), listen_deferwrite (false), listen_binary (false),
+                log_dt (false),
+                log_spikers (false), log_spikers_use_serial_id (false),
+                log_sdf (false),
+                display_progress_percent (true),
+                display_progress_time (false),
+                precision (8),
+                spike_threshold (0.), spike_lapse (3.),
+                listen_dt(1.),
+                //discrete_dt(.5),
+                integration_dt_max (.5), integration_dt_min (1e-5), integration_dt_cap (5.),
+                sxf_start_delay (0.), sxf_period (0.), sdf_sigma (0.),
+                verbosely (1)
+                {}
+
+        SModelOptions (const SModelOptions& rv)
+                {
+                        memmove(this, &rv, sizeof(SModelOptions));
+                }
+};
+
+
+class CModel : public cnrun::stilton::C_verprintf {
+
+    public:
+      // ctor, dtor
+        CModel (const string& name, CIntegrate_base*, const SModelOptions&);
+        virtual ~CModel ();
+
+        string  name;
+
+        SModelOptions
+                options;
+
+      // Unit list and lookup
+        vector<C_BaseUnit*>
+        list_units() const
+                {  return move(vector<C_BaseUnit*> (units.begin(), units.end()));  }
+        vector<C_BaseUnit*>
+        list_units( const string& label) const;
+        C_BaseUnit    *unit_by_label( const string&) const;
+        C_BaseNeuron  *neuron_by_label( const string&) const;
+        C_BaseSynapse *synapse_by_label( const string&) const;
+        unsigned short longest_label() const
+                {  return _longest_label;  }
+
+      // Unit tally
+        size_t n_hosted_units() const
+                {  return hosted_neurons.size() + hosted_synapses.size();                }
+        size_t n_standalone_units() const
+                {  return standalone_neurons.size() + standalone_synapses.size();        }
+        size_t n_ddtbound_units() const
+                {  return ddtbound_neurons.size() + ddtbound_synapses.size();            }
+        size_t n_total_neurons() const
+                {
+                        return hosted_neurons.size()
+                                + standalone_neurons.size()
+                                + ddtbound_neurons.size();
+                }
+        size_t n_total_synapses() const
+                {
+                        return hosted_synapses.size()
+                                + standalone_synapses.size()
+                                + ddtbound_synapses.size();
+                }
+
+      // 0. Model composition
+
+      // There are two ways of adding units:
+      // - create units outside, then 'include' them in a model;
+      // - specify which unit you want, by type, and creating
+      //   them directly in the model ('add').
+
+        //enum class TIncludeOption { is_last, is_notlast, };  // defined in hosted-unit.hh
+        // if option == is_last, do allocations of hosted units' vars immediately
+        // otherwise defer until addition is done with option == is_notlast
+        // or the user calls finalize_additions
+        int include_unit( C_HostedNeuron*, TIncludeOption option = TIncludeOption::is_last);
+        int include_unit( C_HostedSynapse*, TIncludeOption option = TIncludeOption::is_last);
+        int include_unit( C_StandaloneNeuron*);
+        int include_unit( C_StandaloneSynapse*);
+
+        C_BaseNeuron*
+        add_neuron_species( TUnitType, const string& label,
+                            TIncludeOption = TIncludeOption::is_last,
+                            double x = 0., double y = 0., double z = 0.);
+        C_BaseNeuron*
+        add_neuron_species( const string& type, const string& label,
+                            TIncludeOption = TIncludeOption::is_last,
+                            double x = 0., double y = 0., double z = 0.);
+
+        enum class TSynapseCloningOption { yes, no, };
+        C_BaseSynapse*
+        add_synapse_species( const string& type, const string& src, const string& tgt,
+                             double g,
+                             TSynapseCloningOption = TSynapseCloningOption::yes,
+                             TIncludeOption = TIncludeOption::is_last);
+        void finalize_additions();
+
+        C_BaseSynapse*
+        add_synapse_species( TUnitType, C_BaseNeuron *src, C_BaseNeuron *tgt,
+                             double g,
+                             TSynapseCloningOption = TSynapseCloningOption::yes,
+                             TIncludeOption = TIncludeOption::is_last);
+
+        enum class TExcludeOption { with_delete, no_delete, };
+        C_BaseUnit*
+        exclude_unit( C_BaseUnit*, TExcludeOption option = TExcludeOption::no_delete);
+        // return nullptr if option == do_delete, the excluded unit otherwise, even if it was not owned
+        void delete_unit( C_BaseUnit* u)
+                {  exclude_unit( u, TExcludeOption::with_delete);  }
+
+      // 1. NeuroMl interface
+        enum class TNMLImportOption { merge, reset, };
+        enum TNMLIOResult {
+                ok = 0, nofile, noelem, badattr, badcelltype, biglabel, structerror,
+        };
+        int import_NetworkML( const string& fname, TNMLImportOption);
+        int import_NetworkML( xmlDoc*, const string& fname, TNMLImportOption);  // fname is merely informational here
+        int export_NetworkML( const string& fname);
+        int export_NetworkML( xmlDoc*);
+
+      // 2. Bulk operations
+        enum class TResetOption { with_params, no_params, };
+        void reset( TResetOption = TResetOption::no_params);
+        void reset_state_all_units();
+
+        void cull_deaf_synapses();  // those with gsyn == 0
+        void cull_blind_synapses(); // those with _source == nullptr
+
+      // 3. Informational
+        size_t vars() const  { return _var_cnt; }
+        void dump_metrics( FILE *strm = stdout) const;
+        void dump_state( FILE *strm = stdout) const;
+        void dump_units( FILE *strm = stdout) const;
+
+      // 4. Set unit parameters
+      // high-level functions to manipulate unit behaviour, set params, & connect sources
+        struct STagGroup {
+                string pattern;
+                enum class TInvertOption { yes, no, };
+                TInvertOption invert_option;
+                STagGroup( const string& a,
+                           TInvertOption b = STagGroup::TInvertOption::no)
+                      : pattern (a), invert_option (b)
+                        {}
+        };
+        struct STagGroupListener : STagGroup {
+                int bits;
+                STagGroupListener( const string& a,
+                                   int c = 0,
+                                   STagGroup::TInvertOption b = STagGroup::TInvertOption::no)
+                      : STagGroup (a, b), bits (c)
+                        {}
+        };
+        size_t process_listener_tags( const list<STagGroupListener>&);
+
+        struct STagGroupSpikelogger : STagGroup {
+                double period, sigma, from;
+                STagGroupSpikelogger( const string& a,
+                                      double c = 0., double d = 0., double e = 0.,  // defaults disable sdf computation
+                                      STagGroup::TInvertOption b = STagGroup::TInvertOption::no)
+                      : STagGroup (a, b), period (c), sigma (d), from (e)
+                        {}
+        };
+        size_t process_spikelogger_tags( const list<STagGroupSpikelogger>&);
+        size_t process_putout_tags( const list<STagGroup>&);
+
+        struct STagGroupDecimate : STagGroup {
+                float fraction;
+                STagGroupDecimate( const string& a, double c)
+                      : STagGroup (a, TInvertOption::no), fraction (c)
+                        {}
+        };
+        size_t process_decimate_tags( const list<STagGroupDecimate>&);
+
+        struct STagGroupNeuronParmSet : STagGroup {
+                string parm;
+                double value;
+                STagGroupNeuronParmSet( const string& a,
+                                        const string& c, double d,
+                                        STagGroup::TInvertOption b = STagGroup::TInvertOption::no)
+                      : STagGroup (a, b),
+                        parm (c), value (d)
+                        {}
+        };
+        struct STagGroupSynapseParmSet : STagGroupNeuronParmSet {
+                string target;
+                STagGroupSynapseParmSet( const string& a,
+                                         const string& z, const string& c, double d,
+                                         STagGroup::TInvertOption b = STagGroup::TInvertOption::no)
+                      : STagGroupNeuronParmSet (a, c, d, b), target (z)
+                        {}
+        };
+        size_t process_paramset_static_tags( const list<STagGroupNeuronParmSet>&);
+        size_t process_paramset_static_tags( const list<STagGroupSynapseParmSet>&);
+
+        struct STagGroupSource : STagGroup {
+                string parm;
+                C_BaseSource *source;
+                STagGroupSource( const string& a,
+                                 const string& c, C_BaseSource *d,
+                                 STagGroup::TInvertOption b = STagGroup::TInvertOption::no)  // b == false to revert to stock
+                      :  STagGroup (a, b), parm (c), source (d)
+                        {}
+        };
+        size_t process_paramset_source_tags( const list<STagGroupSource>&);
+
+        C_BaseSource*
+        source_by_id( const string& id) const
+                {
+                        for ( auto& S : _sources )
+                                if ( id == S->name() )
+                                        return S;
+                        return nullptr;
+                }
+        const list<C_BaseSource*>&
+        sources() const
+                {  return _sources;  }
+        void
+        add_source( C_BaseSource* s)
+                {
+                        _sources.push_back( s);
+                }
+        // no (straight) way to delete a source
+
+      // 5. Running
+        unsigned advance( double dist, double *cpu_time_p = nullptr) __attribute__ ((hot));
+        double model_time() const  { return V[0]; }
+
+        double dt() const      { return _integrator->dt; }
+        double dt_min() const  { return _integrator->_dt_min; }
+        double dt_max() const  { return _integrator->_dt_max; }
+        double dt_cap() const  { return _integrator->_dt_cap; }
+        void set_dt(double v)  { _integrator->dt = v; }
+        void set_dt_min(double v)  { _integrator->_dt_min = v; }
+        void set_dt_max(double v)  { _integrator->_dt_max = v; }
+        void set_dt_cap(double v)  { _integrator->_dt_cap = v; }
+
+        unsigned long cycle()         const { return _cycle;          }
+        double model_discrete_time()  const { return _discrete_time;  }
+        double discrete_dt()          const { return _discrete_dt;    }
+
+      // 9. misc
+        gsl_rng *rng() const
+                {  return _rng;  }
+        double rng_sample() const
+                {
+                        return gsl_rng_uniform_pos( _rng);
+                }
+    private:
+        friend class C_BaseUnit;
+        friend class C_BaseNeuron;
+        friend class C_BaseSynapse;
+        friend class C_HostedNeuron;
+        friend class C_HostedConductanceBasedNeuron;
+        friend class C_HostedRateBasedNeuron;
+        friend class C_HostedSynapse;
+        friend class CNeuronMap;
+        friend class CSynapseMap;
+        friend class CSynapseMxAB_dd;
+        friend class SSpikeloggerService;
+
+        friend class CIntegrate_base;
+        friend class CIntegrateRK65;
+
+      // supporting functions
+        void register_listener( C_BaseUnit*);
+        void unregister_listener( C_BaseUnit*);
+        void register_spikelogger( C_BaseNeuron*);
+        void unregister_spikelogger( C_BaseNeuron*);
+        void register_mx_synapse( C_BaseSynapse*);
+        void unregister_mx_synapse( C_BaseSynapse*);
+
+        void register_unit_with_sources( C_BaseUnit*);
+        void unregister_unit_with_sources( C_BaseUnit*);
+        void _include_base_unit( C_BaseUnit*);
+
+        int _process_populations( xmlNode*);
+        int _process_population_instances( xmlNode*, const&n