diff --git a/.clang-format.changes b/.clang-format.changes index 232524f846..0091d2af2f 100644 --- a/.clang-format.changes +++ b/.clang-format.changes @@ -2,9 +2,9 @@ SortIncludes: false Standard: c++17 StatementMacros: [MKDLL, MKDLLdec, MKDLLif, MKDLLvp, MKDLLvpf, PyObject_HEAD, declareActionCallback, declareAdjustStepper, declareArrowGlyph, declareFieldEditorCallback, -declareFieldSEditorCallback, declareFileChooserCallback, declareIOCallback, declareList, declarePool, +declareFieldSEditorCallback, declareFileChooserCallback, declareIOCallback, declareList, declarePtrList, declareRubberCallback, declareSelectionCallback, declareTable, declareTable2, implementActionCallback, implementAdjustStepper, implementArrowGlyph, implementFieldEditorCallback, implementFieldSEditorCallback, implementFileChooserCallback, implementIOCallback, implementList, -implementPool, implementPtrList, implementRubberCallback, implementSelectionCallback, implementTable, +implementPtrList, implementRubberCallback, implementSelectionCallback, implementTable, implementTable2, nrn_pragma_acc, nrn_pragma_omp, TBUF] diff --git a/.gitignore b/.gitignore index 41ce95342d..62594fc0bc 100644 --- a/.gitignore +++ b/.gitignore @@ -22,3 +22,4 @@ virtualenv *.lo docs/_build docs/_generated +.vscode diff --git a/docs/hoc/simctrl/cvode.rst b/docs/hoc/simctrl/cvode.rst index 41e4f03160..00940f6a26 100644 --- a/docs/hoc/simctrl/cvode.rst +++ b/docs/hoc/simctrl/cvode.rst @@ -224,6 +224,47 @@ CVode +.. hoc:method:: CVode.free_event_queues + + + Syntax: + ``cvode.free_event_queues()`` + + + Description: + This function takes cares of clearing and free all the event queues allocated in NEURON. + More specifically, it frees the `TQItemPool`, `SelfEventPool` and `SelfQueue` members of + the `NetCvodeThreadData`. + This method should be called only after the end of the NEURON simulation since calling it + will clear all the Event Queues and it should only be used for freeing up memory. + +---- + + + +.. hoc:method:: CVode.poolshrink + + + Syntax: + ``cvode.poolshrink()`` + + ``cvode.poolshrink(1)`` + + + Description: + This function is used to either print or free the `DoubleArrayPool` s and `DatumArrayPool` s + used by the mechanisms' data. + If the function is called with argument `1` it deletes the pools if the number of items used + is 0. + If the function is called without arguments or with argument `0` it prints current number of + items used and number of items allocated for double arrays and Datum arrays. + This method should be called only after the end of the NEURON simulation for freeing up + memory. + +---- + + + .. hoc:method:: CVode.rtol diff --git a/docs/python/simctrl/cvode.rst b/docs/python/simctrl/cvode.rst index 83540f13a0..67e24972b6 100755 --- a/docs/python/simctrl/cvode.rst +++ b/docs/python/simctrl/cvode.rst @@ -233,6 +233,47 @@ CVode +.. method:: CVode.free_event_queues + + + Syntax: + ``cvode.free_event_queues()`` + + + Description: + This function takes cares of clearing and free all the event queues allocated in NEURON. + More specifically, it frees the `TQItemPool`, `SelfEventPool` and `SelfQueue` members of + the `NetCvodeThreadData`. + This method should be called only after the end of the NEURON simulation since calling it + will clear all the Event Queues and it should only be used for freeing up memory. + +---- + + + +.. method:: CVode.poolshrink + + + Syntax: + ``cvode.poolshrink()`` + + ``cvode.poolshrink(1)`` + + + Description: + This function is used to either print or free the `DoubleArrayPool` s and `DatumArrayPool` s + used by the mechanisms' data. + If the function is called with argument `1` it deletes the pools if the number of items used + is 0. + If the function is called without arguments or with argument `0` it prints current number of + items used and number of items allocated for double arrays and Datum arrays. + This method should be called only after the end of the NEURON simulation for freeing up + memory. + +---- + + + .. method:: CVode.rtol diff --git a/src/nrncvode/cvodeobj.cpp b/src/nrncvode/cvodeobj.cpp index e2ed4bf0ba..6aff004248 100644 --- a/src/nrncvode/cvodeobj.cpp +++ b/src/nrncvode/cvodeobj.cpp @@ -17,6 +17,7 @@ extern int hoc_return_type_code; #include "netcvode.h" #include "membfunc.h" #include "nrn_ansi.h" +#include "nrncvode.h" #include "nrndaspk.h" #include "nrniv_mf.h" #include "tqueue.h" @@ -587,6 +588,11 @@ static double poolshrink(void*) { return double(i); } +static double free_event_queues(void*) { + free_event_queues(); + return 0; +} + static Member_func members[] = {{"solve", solve}, {"atol", nrn_atol}, {"rtol", rtol}, @@ -636,6 +642,7 @@ static Member_func members[] = {{"solve", solve}, {"extra_scatter_gather_remove", extra_scatter_gather_remove}, {"use_fast_imem", use_fast_imem}, {"poolshrink", poolshrink}, + {"free_event_queues", free_event_queues}, {nullptr, nullptr}}; static Member_ret_obj_func omembers[] = {{"netconlist", netconlist}, {nullptr, nullptr}}; diff --git a/src/nrncvode/cvodestb.cpp b/src/nrncvode/cvodestb.cpp index de7625bd5f..63e5e0d111 100644 --- a/src/nrncvode/cvodestb.cpp +++ b/src/nrncvode/cvodestb.cpp @@ -58,6 +58,12 @@ void clear_event_queue() { } } +void free_event_queues() { + if (net_cvode_instance) { + net_cvode_instance->free_event_pools(); + } +} + void init_net_events() { if (net_cvode_instance) { net_cvode_instance->init_events(); diff --git a/src/nrncvode/hocevent.cpp b/src/nrncvode/hocevent.cpp index 4cb2aa571c..49886c671e 100644 --- a/src/nrncvode/hocevent.cpp +++ b/src/nrncvode/hocevent.cpp @@ -4,8 +4,7 @@ #include #include -declarePool(HocEventPool, HocEvent) -implementPool(HocEventPool, HocEvent) +using HocEventPool = MutexPool; HocEventPool* HocEvent::hepool_; HocEvent::HocEvent() { diff --git a/src/nrncvode/netcon.h b/src/nrncvode/netcon.h index 22e7838d6d..c317b619f9 100644 --- a/src/nrncvode/netcon.h +++ b/src/nrncvode/netcon.h @@ -9,6 +9,7 @@ #include "htlist.h" #include "nrnmpi.h" #include "nrnneosm.h" +#include "pool.h" #include @@ -29,7 +30,8 @@ class TQueue; class TQItem; struct NrnThread; class NetCvode; -class HocEventPool; +class HocEvent; +using HocEventPool = MutexPool; class HocCommand; struct STETransition; class IvocVect; diff --git a/src/nrncvode/netcvode.cpp b/src/nrncvode/netcvode.cpp index 461e100bc1..49edb90c77 100644 --- a/src/nrncvode/netcvode.cpp +++ b/src/nrncvode/netcvode.cpp @@ -429,8 +429,7 @@ struct InterThreadEvent { }; typedef std::vector WatchList; -declarePool(SelfEventPool, SelfEvent) -implementPool(SelfEventPool, SelfEvent) +using SelfEventPool = MutexPool; typedef std::vector TQList; // allows marshalling of all items in the event queue that need to be @@ -2918,6 +2917,27 @@ void NetCvode::clear_events() { } } +// Frees the allocated memory for the SelfEvent pool and TQItemPool after cleaning them +void NetCvode::free_event_pools() { + clear_events(); + for (int i = 0; i < nrn_nthread; ++i) { + NetCvodeThreadData& d = p[i]; + if (d.sepool_) { + delete d.sepool_; + } + if (d.selfqueue_) { + delete d.selfqueue_; + } + if (d.tqe_) { + delete d.tqe_; + } + if (d.tpool_) { + d.tpool_->free_all(); + delete d.tpool_; + } + } +} + void NetCvode::init_events() { hoc_Item* q; int i, j; diff --git a/src/nrncvode/netcvode.h b/src/nrncvode/netcvode.h index abbd741406..33cf4d82ff 100644 --- a/src/nrncvode/netcvode.h +++ b/src/nrncvode/netcvode.h @@ -5,6 +5,7 @@ #include "mymath.h" +#include "cvodeobj.h" #include "tqueue.h" #include @@ -17,9 +18,8 @@ class HocDataPaths; using PreSynTable = std::unordered_map; class NetCon; class DiscreteEvent; -class TQItemPool; -class SelfEventPool; class SelfEvent; +using SelfEventPool = MutexPool; struct hoc_Item; class PlayRecord; class PlayRecList; @@ -113,6 +113,7 @@ class NetCvode { void deliver_events(double til, NrnThread*); // for initialization events void solver_prepare(); void clear_events(); + void free_event_pools(); void init_events(); void print_event_queue(); void event_queue_info(); diff --git a/src/nrncvode/pool.h b/src/nrncvode/pool.h index c1276b1dcb..5b2f7f5cae 100644 --- a/src/nrncvode/pool.h +++ b/src/nrncvode/pool.h @@ -13,145 +13,144 @@ #include -#define declarePool(Pool, T) \ - class Pool { \ - public: \ - Pool(long count, int mkmut = 0); \ - ~Pool(); \ - T* alloc(); \ - void hpfree(T*); \ - int maxget() { \ - return maxget_; \ - } \ - void free_all(); \ - \ - private: \ - void grow(); \ - \ - private: \ - T** items_; \ - T* pool_; \ - long pool_size_; \ - long count_; \ - long get_; \ - long put_; \ - long nget_; \ - long maxget_; \ - Pool* chain_; \ - MUTDEC \ - }; +#include +template +class MutexPool { + public: + MutexPool(long count, int mkmut = 0); + ~MutexPool(); + T* alloc(); + void hpfree(T*); + int maxget() { + return maxget_; + } + void free_all(); + + private: + void grow(); + T** items_{}; + T* pool_{}; + long pool_size_{}; + long count_{}; + long get_{}; + long put_{}; + long nget_{}; + long maxget_{}; + MutexPool* chain_{}; + MUTDEC +}; + +template +MutexPool::MutexPool(long count, int mkmut) { + count_ = count; + pool_ = new T[count_]; + pool_size_ = count; + items_ = new T*[count_]; + { + for (long i = 0; i < count_; ++i) + items_[i] = pool_ + i; + } + MUTCONSTRUCT(mkmut) +} + +template +void MutexPool::grow() { + assert(get_ == put_); + MutexPool* p = new MutexPool(count_); + p->chain_ = chain_; + chain_ = p; + long newcnt = 2 * count_; + T** itms = new T*[newcnt]; + long i, j; + put_ += count_; + { + for (i = 0; i < get_; ++i) { + itms[i] = items_[i]; + } + } + { + for (i = get_, j = 0; j < count_; ++i, ++j) { + itms[i] = p->items_[j]; + } + } + { + for (i = put_, j = get_; j < count_; ++i, ++j) { + itms[i] = items_[j]; + } + } + delete[] items_; + delete[] p->items_; + p->items_ = 0; + items_ = itms; + count_ = newcnt; +} + +template +MutexPool::~MutexPool() { + { + if (chain_) { + delete chain_; + } + } + delete[] pool_; + { + if (items_) { + delete[] items_; + } + } + MUTDESTRUCT +} + +template +T* MutexPool::alloc() { + MUTLOCK { + if (nget_ >= count_) { + grow(); + } + } + T* item = items_[get_]; + get_ = (get_ + 1) % count_; + ++nget_; + { + if (nget_ > maxget_) { + maxget_ = nget_; + } + } + MUTUNLOCK + return item; +} + +template +void MutexPool::hpfree(T* item) { + MUTLOCK + assert(nget_ > 0); + items_[put_] = item; + put_ = (put_ + 1) % count_; + --nget_; + MUTUNLOCK +} -#define implementPool(Pool, T) \ - Pool::Pool(long count, int mkmut) { \ - count_ = count; \ - pool_ = new T[count_]; \ - pool_size_ = count; \ - items_ = new T*[count_]; \ - { \ - for (long i = 0; i < count_; ++i) \ - items_[i] = pool_ + i; \ - } \ - get_ = 0; \ - put_ = 0; \ - nget_ = 0; \ - maxget_ = 0; \ - chain_ = 0; \ - MUTCONSTRUCT(mkmut) \ - } \ - \ - void Pool::grow() { \ - assert(get_ == put_); \ - Pool* p = new Pool(count_); \ - p->chain_ = chain_; \ - chain_ = p; \ - long newcnt = 2 * count_; \ - T** itms = new T*[newcnt]; \ - long i, j; \ - put_ += count_; \ - { \ - for (i = 0; i < get_; ++i) { \ - itms[i] = items_[i]; \ - } \ - } \ - { \ - for (i = get_, j = 0; j < count_; ++i, ++j) { \ - itms[i] = p->items_[j]; \ - } \ - } \ - { \ - for (i = put_, j = get_; j < count_; ++i, ++j) { \ - itms[i] = items_[j]; \ - } \ - } \ - delete[] items_; \ - delete[] p->items_; \ - p->items_ = 0; \ - items_ = itms; \ - count_ = newcnt; \ - } \ - \ - Pool::~Pool() { \ - { \ - if (chain_) { \ - delete chain_; \ - } \ - } \ - delete[] pool_; \ - { \ - if (items_) { \ - delete[] items_; \ - } \ - } \ - MUTDESTRUCT \ - } \ - \ - T* Pool::alloc() { \ - MUTLOCK { \ - if (nget_ >= count_) { \ - grow(); \ - } \ - } \ - T* item = items_[get_]; \ - get_ = (get_ + 1) % count_; \ - ++nget_; \ - { \ - if (nget_ > maxget_) { \ - maxget_ = nget_; \ - } \ - } \ - MUTUNLOCK \ - return item; \ - } \ - \ - void Pool::hpfree(T* item) { \ - MUTLOCK \ - assert(nget_ > 0); \ - items_[put_] = item; \ - put_ = (put_ + 1) % count_; \ - --nget_; \ - MUTUNLOCK \ - } \ - \ - void Pool::free_all() { \ - MUTLOCK \ - Pool* pp; \ - long i; \ - nget_ = 0; \ - get_ = 0; \ - put_ = 0; \ - { \ - for (pp = this; pp; pp = pp->chain_) { \ - for (i = 0; i < pp->pool_size_; ++i) { \ - items_[put_++] = pp->pool_ + i; \ - pp->pool_[i].clear(); \ - } \ - } \ - } \ - assert(put_ == count_); \ - put_ = 0; \ - MUTUNLOCK \ +template +void MutexPool::free_all() { + MUTLOCK + MutexPool* pp; + long i; + nget_ = 0; + get_ = 0; + put_ = 0; + { + for (pp = this; pp; pp = pp->chain_) { + for (i = 0; i < pp->pool_size_; ++i) { + items_[put_++] = pp->pool_ + i; + pp->pool_[i].clear(); + } + } } + assert(put_ == count_); + put_ = 0; + MUTUNLOCK +} #endif diff --git a/src/nrncvode/sptbinq.h b/src/nrncvode/sptbinq.h index a2bc23c011..c9cbf8cc84 100644 --- a/src/nrncvode/sptbinq.h +++ b/src/nrncvode/sptbinq.h @@ -16,7 +16,6 @@ #define COLLECT_TQueue_STATISTICS 1 template struct SPTREE; -class TQItemPool; class TQItem { public: diff --git a/src/nrncvode/tqueue.cpp b/src/nrncvode/tqueue.cpp index f764635885..9d6b38f87b 100644 --- a/src/nrncvode/tqueue.cpp +++ b/src/nrncvode/tqueue.cpp @@ -104,7 +104,6 @@ void TQueue_reg() { //---------------- -implementPool(TQItemPool, TQItem) #if BBTQ == 0 #include #endif diff --git a/src/nrncvode/tqueue.h b/src/nrncvode/tqueue.h index 5c9eee87e8..529aaff857 100644 --- a/src/nrncvode/tqueue.h +++ b/src/nrncvode/tqueue.h @@ -6,7 +6,7 @@ #include class TQItem; -declarePool(TQItemPool, TQItem) +using TQItemPool = MutexPool; // 0 use bbtqueue, 1 use rbtqueue, 2 use sptqueue, 3 use sptfifoq #define BBTQ 5 diff --git a/src/nrnoc/nrncvode.h b/src/nrnoc/nrncvode.h index 27b40076ad..ec77bca31b 100644 --- a/src/nrnoc/nrncvode.h +++ b/src/nrnoc/nrncvode.h @@ -7,6 +7,7 @@ extern void nrncvode_set_t(double); extern void deliver_net_events(NrnThread*); extern void nrn_deliver_events(NrnThread*); void clear_event_queue(); +void free_event_queues(); extern void init_net_events(); extern void nrn_record_init(); extern void nrn_play_init(); diff --git a/test/coreneuron/test_pointer.py b/test/coreneuron/test_pointer.py index f79336f815..0c8da83d4a 100644 --- a/test/coreneuron/test_pointer.py +++ b/test/coreneuron/test_pointer.py @@ -218,6 +218,30 @@ def run(tstop): del m +def run_coreneuron_offline_checkpoint_restore(spikes_std): + # standard to compare with checkpoint series + tpnts = [5.0, 10.0] + for perm in [0, 1]: + print("\n\ncell_permute ", perm) + common = "-d coredat --voltage 1000 --verbose 0 --cell-permute %d" % (perm,) + # standard full run + runcn(common + " --tstop %g" % float(tpnts[-1]) + " -o coredat") + # sequence of checkpoints + for i, tpnt in enumerate(tpnts): + tend = tpnt + restore = " --restore coredat/chkpnt%d" % (i,) if i > 0 else "" + checkpoint = " --checkpoint coredat/chkpnt%d" % (i + 1,) + outpath = " -o coredat/chkpnt%d" % (i + 1,) + runcn( + common + " --tstop %g" % (float(tend),) + outpath + restore + checkpoint + ) + + # compare spikes + cmp_spks( + spikes_std, "coredat", ["coredat/chkpnt%d" % (i,) for i in range(1, 3)] + ) + + def test_checkpoint(): if pc.nhost() > 1: return @@ -269,32 +293,22 @@ def run(tstop): assert spikes_std == spikes coreneuron.enable = False - # standard to compare with checkpoint series - tpnts = [5.0, 10.0] - for perm in [0, 1]: - print("\n\ncell_permute ", perm) - common = "-d coredat --voltage 1000 --verbose 0 --cell-permute %d" % (perm,) - # standard full run - runcn(common + " --tstop %g" % float(tpnts[-1]) + " -o coredat") - # sequence of checkpoints - for i, tpnt in enumerate(tpnts): - tend = tpnt - restore = " --restore coredat/chkpnt%d" % (i,) if i > 0 else "" - checkpoint = " --checkpoint coredat/chkpnt%d" % (i + 1,) - outpath = " -o coredat/chkpnt%d" % (i + 1,) - runcn( - common + " --tstop %g" % (float(tend),) + outpath + restore + checkpoint - ) - - # compare spikes - cmp_spks( - spikes_std, "coredat", ["coredat/chkpnt%d" % (i,) for i in range(1, 3)] - ) - + # Delete model before launching the CoreNEURON simulation offline m._callback_setup = None pc.gid_clear() del m + # Shrink memory pools of mechanisms + h.CVode().poolshrink(1) + # Free event queues + h.CVode().free_event_queues() + # After clearing the above data structures it's not possible to proceed with the NEURON + # simulation again or edit the Model + + # Execute CoreNEURON checkpoint-restore simulation in offline mode using the `coredat` dumped + # to disk above + run_coreneuron_offline_checkpoint_restore(spikes_std) + def cmp_spks(spikes, dir, chkpntdirs): # sorted nrn standard spikes into dir/out.spk