diff --git a/neural_modelling/Makefile b/neural_modelling/Makefile
index c1acc7060d..c216189a29 100644
--- a/neural_modelling/Makefile
+++ b/neural_modelling/Makefile
@@ -18,7 +18,9 @@ BUILDS = synapse_expander \
spike_source/poisson \
delay_extension \
robot_motor_control \
- neuron
+ neuron_only \
+ synapse_only \
+ neuron
DIRS = $(BUILDS:%=makefiles/%)
diff --git a/neural_modelling/makefiles/neuron_only/IF_cond_exp_neuron/Makefile b/neural_modelling/makefiles/neuron_only/IF_cond_exp_neuron/Makefile
new file mode 100644
index 0000000000..60b21880f5
--- /dev/null
+++ b/neural_modelling/makefiles/neuron_only/IF_cond_exp_neuron/Makefile
@@ -0,0 +1,25 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = $(notdir $(CURDIR))
+
+NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c
+NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h
+INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_conductance.h
+NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h
+THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h
+SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_exponential_impl.h
+
+include ../neuron_build.mk
diff --git a/neural_modelling/makefiles/neuron_only/IF_cond_exp_stoc_neuron/Makefile b/neural_modelling/makefiles/neuron_only/IF_cond_exp_stoc_neuron/Makefile
new file mode 100644
index 0000000000..48a006bca2
--- /dev/null
+++ b/neural_modelling/makefiles/neuron_only/IF_cond_exp_stoc_neuron/Makefile
@@ -0,0 +1,25 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = $(notdir $(CURDIR))
+
+NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c
+NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h
+INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_conductance.h
+NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h
+THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_maass_stochastic.h
+SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_exponential_impl.h
+
+include ../neuron_build.mk
diff --git a/neural_modelling/makefiles/neuron_only/IF_curr_alpha_neuron/Makefile b/neural_modelling/makefiles/neuron_only/IF_curr_alpha_neuron/Makefile
new file mode 100644
index 0000000000..4aa92c8adc
--- /dev/null
+++ b/neural_modelling/makefiles/neuron_only/IF_curr_alpha_neuron/Makefile
@@ -0,0 +1,25 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = $(notdir $(CURDIR))
+
+NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c
+NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h
+INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_current.h
+NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h
+THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h
+SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_alpha_impl.h
+
+include ../neuron_build.mk
diff --git a/neural_modelling/makefiles/neuron_only/IF_curr_delta_neuron/Makefile b/neural_modelling/makefiles/neuron_only/IF_curr_delta_neuron/Makefile
new file mode 100644
index 0000000000..e9992fe3f1
--- /dev/null
+++ b/neural_modelling/makefiles/neuron_only/IF_curr_delta_neuron/Makefile
@@ -0,0 +1,25 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = $(notdir $(CURDIR))
+
+NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c
+NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h
+INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_delta.h
+NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h
+THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h
+SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_delta_impl.h
+
+include ../neuron_build.mk
diff --git a/neural_modelling/makefiles/neuron_only/IF_curr_exp_ca2_adaptive_neuron/Makefile b/neural_modelling/makefiles/neuron_only/IF_curr_exp_ca2_adaptive_neuron/Makefile
new file mode 100644
index 0000000000..4592dd9360
--- /dev/null
+++ b/neural_modelling/makefiles/neuron_only/IF_curr_exp_ca2_adaptive_neuron/Makefile
@@ -0,0 +1,26 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = $(notdir $(CURDIR))
+
+NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c
+NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h
+INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_current.h
+NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h
+THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h
+SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_exponential_impl.h
+ADDITIONAL_INPUT_H = $(NEURON_DIR)/neuron/additional_inputs/additional_input_ca2_adaptive_impl.h
+
+include ../neuron_build.mk
diff --git a/neural_modelling/makefiles/neuron_only/IF_curr_exp_dual_neuron/Makefile b/neural_modelling/makefiles/neuron_only/IF_curr_exp_dual_neuron/Makefile
new file mode 100644
index 0000000000..b05b37e82f
--- /dev/null
+++ b/neural_modelling/makefiles/neuron_only/IF_curr_exp_dual_neuron/Makefile
@@ -0,0 +1,25 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = $(notdir $(CURDIR))
+
+NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c
+NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h
+INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_current.h
+NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h
+THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h
+SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_dual_excitatory_exponential_impl.h
+
+include ../neuron_build.mk
\ No newline at end of file
diff --git a/neural_modelling/makefiles/neuron_only/IF_curr_exp_neuron/Makefile b/neural_modelling/makefiles/neuron_only/IF_curr_exp_neuron/Makefile
new file mode 100644
index 0000000000..6083713b17
--- /dev/null
+++ b/neural_modelling/makefiles/neuron_only/IF_curr_exp_neuron/Makefile
@@ -0,0 +1,25 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = $(notdir $(CURDIR))
+
+NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c
+NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h
+INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_current.h
+NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h
+THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h
+SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_exponential_impl.h
+
+include ../neuron_build.mk
diff --git a/neural_modelling/makefiles/neuron_only/IF_curr_exp_sEMD_neuron/Makefile b/neural_modelling/makefiles/neuron_only/IF_curr_exp_sEMD_neuron/Makefile
new file mode 100644
index 0000000000..932682fe52
--- /dev/null
+++ b/neural_modelling/makefiles/neuron_only/IF_curr_exp_sEMD_neuron/Makefile
@@ -0,0 +1,25 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = $(notdir $(CURDIR))
+
+NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c
+NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h
+INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_current.h
+NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h
+THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h
+SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_semd_impl.h
+
+include ../neuron_build.mk
diff --git a/neural_modelling/makefiles/neuron_only/IZK_cond_exp_neuron/Makefile b/neural_modelling/makefiles/neuron_only/IZK_cond_exp_neuron/Makefile
new file mode 100644
index 0000000000..5ec6863805
--- /dev/null
+++ b/neural_modelling/makefiles/neuron_only/IZK_cond_exp_neuron/Makefile
@@ -0,0 +1,25 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = $(notdir $(CURDIR))
+
+NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_izh_impl.c
+NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_izh_impl.h
+INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_conductance.h
+NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h
+THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h
+SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_exponential_impl.h
+
+include ../neuron_build.mk
\ No newline at end of file
diff --git a/neural_modelling/makefiles/neuron_only/IZK_curr_exp_neuron/Makefile b/neural_modelling/makefiles/neuron_only/IZK_curr_exp_neuron/Makefile
new file mode 100644
index 0000000000..d2244a4ef8
--- /dev/null
+++ b/neural_modelling/makefiles/neuron_only/IZK_curr_exp_neuron/Makefile
@@ -0,0 +1,25 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = $(notdir $(CURDIR))
+
+NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_izh_impl.c
+NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_izh_impl.h
+INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_current.h
+NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h
+THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h
+SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_exponential_impl.h
+
+include ../neuron_build.mk
\ No newline at end of file
diff --git a/neural_modelling/makefiles/neuron_only/Makefile b/neural_modelling/makefiles/neuron_only/Makefile
new file mode 100644
index 0000000000..6c6a8d1860
--- /dev/null
+++ b/neural_modelling/makefiles/neuron_only/Makefile
@@ -0,0 +1,32 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+MODELS = IF_curr_exp_neuron\
+ IZK_curr_exp_neuron\
+ IF_cond_exp_neuron\
+ IZK_cond_exp_neuron\
+ IF_curr_exp_ca2_adaptive_neuron\
+ IF_curr_exp_dual_neuron\
+ IF_curr_exp_sEMD_neuron\
+ IF_curr_delta_neuron\
+ IF_curr_alpha_neuron\
+ IF_cond_exp_stoc_neuron\
+ external_device_lif_control_neuron
+
+all:
+ for d in $(MODELS); do $(MAKE) -C $$d || exit $$?; done
+
+clean:
+ for d in $(MODELS); do $(MAKE) -C $$d clean || exit $$?; done
diff --git a/neural_modelling/makefiles/neuron_only/external_device_lif_control_neuron/Makefile b/neural_modelling/makefiles/neuron_only/external_device_lif_control_neuron/Makefile
new file mode 100644
index 0000000000..a30124d5e9
--- /dev/null
+++ b/neural_modelling/makefiles/neuron_only/external_device_lif_control_neuron/Makefile
@@ -0,0 +1,22 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = external_device_lif_control_neuron
+
+OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c
+INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_current.h
+NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_external_devices.h
+
+include ../neuron_build.mk
diff --git a/neural_modelling/makefiles/neuron_only/neuron_build.mk b/neural_modelling/makefiles/neuron_only/neuron_build.mk
new file mode 100644
index 0000000000..f8b4c7b068
--- /dev/null
+++ b/neural_modelling/makefiles/neuron_only/neuron_build.mk
@@ -0,0 +1,167 @@
+# See Notes in sPyNNaker/neural_modelling/CHANGES_April_2018
+
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+# If SPINN_DIRS is not defined, this is an error!
+ifndef SPINN_DIRS
+ $(error SPINN_DIRS is not set. Please define SPINN_DIRS (possibly by running "source setup" in the spinnaker package folder))
+endif
+
+# If NEURAL_MODELLING_DIRS is not defined, this is an error!
+ifndef NEURAL_MODELLING_DIRS
+ $(error NEURAL_MODELLING_DIRS is not set. Please define NEURAL_MODELLING_DIRS (possibly by running "source setup" in the sPyNNaker folder))
+endif
+#Check NEURAL_MODELLING_DIRS
+MAKEFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
+CHECK_PATH := $(NEURAL_MODELLING_DIRS)/makefiles/neuron_only/neuron_build.mk
+ifneq ($(CHECK_PATH), $(MAKEFILE_PATH))
+ $(error Please check NEURAL_MODELLING_DIRS as based on that this file is at $(CHECK_PATH) when it is actually at $(MAKEFILE_PATH))
+endif
+
+# Set logging levels
+ifeq ($(SPYNNAKER_DEBUG), DEBUG)
+ NEURON_DEBUG = LOG_DEBUG
+endif
+
+ifndef NEURON_DEBUG
+ NEURON_DEBUG = LOG_INFO
+endif
+
+# Add source directory
+
+# Define the directories
+# Path flag to replace with the modified dir (abspath drops the final /)
+NEURON_DIR := $(abspath $(NEURAL_MODELLING_DIRS)/src)
+MODIFIED_DIR :=$(dir $(abspath $(NEURON_DIR)))modified_src/
+SOURCE_DIRS += $(NEURON_DIR)
+
+# Define a rule to find the source directory of the given file.
+# This attempts to find each of SOURCE_DIRS within the given file name; the
+# first one that matches is then returned. If none match, an empty string
+# will be returned.
+define get_source_dir#(file)
+$(firstword $(strip $(foreach d, $(sort $(SOURCE_DIRS)), $(findstring $(d), $(1)))))
+endef
+
+# Define rule to strip any SOURCE_DIRS from source_file to allow use via local.mk.
+# If no match is found, the value is returned untouched
+# (though this will probably fail later).
+define strip_source_dirs#(source_file)
+$(or $(patsubst $(call get_source_dir, $(1))/%,%,$(1)), $(1))
+endef
+
+# Define a rule to replace any SOURCE_DIRS from header_file with the modified_src folder.
+define replace_source_dirs#(header_file)
+$(patsubst $(call get_source_dir, $(1))%, $(dir $(call get_source_dir, $(1)))modified_src%, $(1))
+endef
+
+# Need to build each neuron seperately or complier gets confused
+# BUILD_DIR and APP_OUTPUT_DIR end with a / for historictical/ shared reasons
+ifndef BUILD_DIR
+ BUILD_DIR := $(NEURAL_MODELLING_DIRS)/builds/$(APP)/
+endif
+ifndef APP_OUTPUT_DIR
+ APP_OUTPUT_DIR := $(NEURAL_MODELLING_DIRS)/../spynnaker/pyNN/model_binaries
+endif
+
+# Check if the neuron implementation is the default one
+ifndef NEURON_IMPL_H
+ $(error NEURON_IMPL_H is not set. Please select a neuron implementation)
+else
+ NEURON_IMPL := $(call strip_source_dirs,$(NEURON_IMPL_H))
+ NEURON_IMPL_H := $(call replace_source_dirs,$(NEURON_IMPL_H))
+ NEURON_IMPL_STANDARD := neuron/implementations/neuron_impl_standard.h
+ NEURON_INCLUDES := -include $(NEURON_IMPL_H)
+ ifeq ($(NEURON_IMPL), $(NEURON_IMPL_STANDARD))
+
+ # Check required inputs and point them to modified sources
+ ifndef ADDITIONAL_INPUT_H
+ ADDITIONAL_INPUT_H = $(MODIFIED_DIR)neuron/additional_inputs/additional_input_none_impl.h
+ else
+ ADDITIONAL_INPUT_H := $(call replace_source_dirs,$(ADDITIONAL_INPUT_H))
+ endif
+
+ ifndef NEURON_MODEL
+ $(error NEURON_MODEL is not set. Please choose a neuron model to compile)
+ else
+ NEURON_MODEL := $(call strip_source_dirs,$(NEURON_MODEL))
+ endif
+
+ ifndef NEURON_MODEL_H
+ $(error NEURON_MODEL_H is not set. Please select a neuron model header file)
+ else
+ NEURON_MODEL_H := $(call replace_source_dirs,$(NEURON_MODEL_H))
+ endif
+
+ ifndef INPUT_TYPE_H
+ $(error INPUT_TYPE_H is not set. Please select an input type header file)
+ else
+ INPUT_TYPE_H := $(call replace_source_dirs,$(INPUT_TYPE_H))
+ endif
+
+ ifndef THRESHOLD_TYPE_H
+ $(error THRESHOLD_TYPE_H is not set. Please select a threshold type header file)
+ else
+ THRESHOLD_TYPE_H := $(call replace_source_dirs,$(THRESHOLD_TYPE_H))
+ endif
+
+ ifndef SYNAPSE_TYPE_H
+ $(error SYNAPSE_TYPE_H is not set. Please select a synapse type header file)
+ else
+ SYNAPSE_TYPE_H := $(call replace_source_dirs,$(SYNAPSE_TYPE_H))
+ endif
+
+ NEURON_INCLUDES := \
+ -include $(NEURON_MODEL_H) \
+ -include $(SYNAPSE_TYPE_H) \
+ -include $(INPUT_TYPE_H) \
+ -include $(THRESHOLD_TYPE_H) \
+ -include $(ADDITIONAL_INPUT_H) \
+ -include $(NEURON_IMPL_H)
+ endif
+endif
+
+OTHER_SOURCES_CONVERTED := $(call strip_source_dirs,$(OTHER_SOURCES))
+
+# List all the sources relative to one of SOURCE_DIRS
+SOURCES = neuron/c_main_neurons.c \
+ neuron/neuron.c \
+ neuron/neuron_recording.c \
+ neuron/plasticity/synapse_dynamics_remote.c \
+ $(NEURON_MODEL) $(OTHER_SOURCES_CONVERTED)
+
+include $(SPINN_DIRS)/make/local.mk
+
+FEC_OPT = $(OTIME)
+
+$(BUILD_DIR)neuron/c_main_neurons.o: $(MODIFIED_DIR)neuron/c_main_neurons.c
+ #c_main.c
+ -@mkdir -p $(dir $@)
+ $(CC) -DLOG_LEVEL=$(NEURON_DEBUG) $(CFLAGS) -o $@ $<
+
+$(BUILD_DIR)neuron/neuron.o: $(MODIFIED_DIR)neuron/neuron.c $(NEURON_MODEL_H) \
+ $(SYNAPSE_TYPE_H)
+ # neuron.o
+ -@mkdir -p $(dir $@)
+ $(CC) -DLOG_LEVEL=$(NEURON_DEBUG) $(CFLAGS) $(NEURON_INCLUDES) -o $@ $<
+
+$(BUILD_DIR)neuron/neuron_recording.o: $(MODIFIED_DIR)neuron/neuron_recording.c $(NEURON_MODEL_H) \
+ $(SYNAPSE_TYPE_H)
+ # neuron_recording.o
+ -@mkdir -p $(dir $@)
+ $(CC) -DLOG_LEVEL=$(NEURON_DEBUG) $(CFLAGS) $(NEURON_INCLUDES) -o $@ $<
+
+.PRECIOUS: $(MODIFIED_DIR)%.c $(MODIFIED_DIR)%.h $(LOG_DICT_FILE) $(EXTRA_PRECIOUS)
diff --git a/neural_modelling/makefiles/synapse_only/Makefile b/neural_modelling/makefiles/synapse_only/Makefile
new file mode 100644
index 0000000000..201814a60c
--- /dev/null
+++ b/neural_modelling/makefiles/synapse_only/Makefile
@@ -0,0 +1,33 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+MODELS = synapses\
+ synapses_stdp_mad_pair_additive\
+ synapses_stdp_mad_pair_multiplicative\
+ synapses_stdp_mad_nearest_pair_additive\
+ synapses_stdp_mad_nearest_pair_multiplicative\
+ synapses_stdp_mad_recurrent_dual_fsm_multiplicative\
+ synapses_stdp_mad_pfister_triplet_additive\
+ synapses_stdp_mad_vogels_2011_additive\
+ synapses_structural_random_distance_weight\
+ synapses_structural_last_neuron_distance_weight\
+ synapses_stdp_mad_pair_additive_structural_random_distance_weight\
+ synapses_stdp_mad_pair_additive_structural_last_neuron_distance_weight
+
+all:
+ for d in $(MODELS); do $(MAKE) -C $$d || exit $$?; done
+
+clean:
+ for d in $(MODELS); do $(MAKE) -C $$d clean || exit $$?; done
diff --git a/neural_modelling/makefiles/synapse_only/synapse_build.mk b/neural_modelling/makefiles/synapse_only/synapse_build.mk
new file mode 100644
index 0000000000..5a55664a29
--- /dev/null
+++ b/neural_modelling/makefiles/synapse_only/synapse_build.mk
@@ -0,0 +1,267 @@
+# See Notes in sPyNNaker/neural_modelling/CHANGES_April_2018
+
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+# If SPINN_DIRS is not defined, this is an error!
+ifndef SPINN_DIRS
+ $(error SPINN_DIRS is not set. Please define SPINN_DIRS (possibly by running "source setup" in the spinnaker package folder))
+endif
+
+# If NEURAL_MODELLING_DIRS is not defined, this is an error!
+ifndef NEURAL_MODELLING_DIRS
+ $(error NEURAL_MODELLING_DIRS is not set. Please define NEURAL_MODELLING_DIRS (possibly by running "source setup" in the sPyNNaker folder))
+endif
+#Check NEURAL_MODELLING_DIRS
+MAKEFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
+CHECK_PATH := $(NEURAL_MODELLING_DIRS)/makefiles/synapse_only/synapse_build.mk
+ifneq ($(CHECK_PATH), $(MAKEFILE_PATH))
+ $(error Please check NEURAL_MODELLING_DIRS as based on that this file is at $(CHECK_PATH) when it is actually at $(MAKEFILE_PATH))
+endif
+
+# Set logging levels
+ifeq ($(SPYNNAKER_DEBUG), DEBUG)
+ SYNAPSE_DEBUG = LOG_DEBUG
+ PLASTIC_DEBUG = LOG_DEBUG
+endif
+
+ifndef SYNAPSE_DEBUG
+ SYNAPSE_DEBUG = LOG_INFO
+endif
+
+ifndef PLASTIC_DEBUG
+ PLASTIC_DEBUG = LOG_INFO
+endif
+
+#POPULATION_TABLE_IMPL := fixed
+POPULATION_TABLE_IMPL := binary_search
+
+# Add source directory
+
+# Define the directories
+# Path flag to replace with the modified dir (abspath drops the final /)
+NEURON_DIR := $(abspath $(NEURAL_MODELLING_DIRS)/src)
+MODIFIED_DIR :=$(dir $(abspath $(NEURON_DIR)))modified_src/
+SOURCE_DIRS += $(NEURON_DIR)
+
+# Define a rule to find the source directory of the given file.
+# This attempts to find each of SOURCE_DIRS within the given file name; the
+# first one that matches is then returned. If none match, an empty string
+# will be returned.
+define get_source_dir#(file)
+$(firstword $(strip $(foreach d, $(sort $(SOURCE_DIRS)), $(findstring $(d), $(1)))))
+endef
+
+# Define rule to strip any SOURCE_DIRS from source_file to allow use via local.mk.
+# If no match is found, the value is returned untouched
+# (though this will probably fail later).
+define strip_source_dirs#(source_file)
+$(or $(patsubst $(call get_source_dir, $(1))/%,%,$(1)), $(1))
+endef
+
+# Define a rule to replace any SOURCE_DIRS from header_file with the modified_src folder.
+define replace_source_dirs#(header_file)
+$(patsubst $(call get_source_dir, $(1))%, $(dir $(call get_source_dir, $(1)))modified_src%, $(1))
+endef
+
+# Need to build each neuron seperately or complier gets confused
+# BUILD_DIR and APP_OUTPUT_DIR end with a / for historictical/ shared reasons
+ifndef BUILD_DIR
+ BUILD_DIR := $(NEURAL_MODELLING_DIRS)/builds/$(APP)/
+endif
+ifndef APP_OUTPUT_DIR
+ APP_OUTPUT_DIR := $(NEURAL_MODELLING_DIRS)/../spynnaker/pyNN/model_binaries
+endif
+
+ifndef SYNAPSE_DYNAMICS
+ $(error SYNAPSE_DYNAMICS is not set. Please select a synapse dynamics implementation)
+else
+ SYNAPSE_DYNAMICS_C := $(call replace_source_dirs,$(SYNAPSE_DYNAMICS))
+ SYNAPSE_DYNAMICS := $(call strip_source_dirs,$(SYNAPSE_DYNAMICS))
+ SYNAPSE_DYNAMICS_O := $(BUILD_DIR)$(SYNAPSE_DYNAMICS:%.c=%.o)
+
+ SYNAPSE_DYNAMICS_STATIC := neuron/plasticity/synapse_dynamics_static_impl.c
+ STDP_ENABLED = 0
+ ifneq ($(SYNAPSE_DYNAMICS), $(SYNAPSE_DYNAMICS_STATIC))
+ STDP_ENABLED = 1
+
+ ifndef TIMING_DEPENDENCE_H
+ $(error TIMING_DEPENDENCE_H is not set which is required when SYNAPSE_DYNAMICS ($(SYNAPSE_DYNAMICS_C)) != $(SYNAPSE_DYNAMICS_STATIC))
+ endif
+ ifndef WEIGHT_DEPENDENCE_H
+ $(error WEIGHT_DEPENDENCE_H is not set which is required when SYNAPSE_DYNAMICS ($(SYNAPSE_DYNAMICS_C)) != $(SYNAPSE_DYNAMICS_STATIC))
+ endif
+ endif
+endif
+
+ifdef WEIGHT_DEPENDENCE
+ WEIGHT_DEPENDENCE_H := $(call replace_source_dirs,$(WEIGHT_DEPENDENCE_H))
+ WEIGHT_DEPENDENCE_C := $(call replace_source_dirs,$(WEIGHT_DEPENDENCE))
+ WEIGHT_DEPENDENCE := $(call strip_source_dirs,$(WEIGHT_DEPENDENCE))
+ WEIGHT_DEPENDENCE_O := $(BUILD_DIR)$(WEIGHT_DEPENDENCE:%.c=%.o)
+endif
+
+ifdef TIMING_DEPENDENCE
+ TIMING_DEPENDENCE_H := $(call replace_source_dirs,$(TIMING_DEPENDENCE_H))
+ TIMING_DEPENDENCE_C := $(call replace_source_dirs,$(TIMING_DEPENDENCE))
+ TIMING_DEPENDENCE := $(call strip_source_dirs,$(TIMING_DEPENDENCE))
+ TIMING_DEPENDENCE_O := $(BUILD_DIR)$(TIMING_DEPENDENCE:%.c=%.o)
+endif
+
+SYNGEN_ENABLED = 1
+ifndef SYNAPTOGENESIS_DYNAMICS
+ SYNAPTOGENESIS_DYNAMICS := neuron/structural_plasticity/synaptogenesis_dynamics_static_impl.c
+ SYNAPTOGENESIS_DYNAMICS_C := $(MODIFIED_DIR)$(SYNAPTOGENESIS_DYNAMICS)
+ SYNGEN_ENABLED = 0
+else
+ SYNAPTOGENESIS_DYNAMICS_C := $(call replace_source_dirs,$(SYNAPTOGENESIS_DYNAMICS))
+ SYNAPTOGENESIS_DYNAMICS := $(call strip_source_dirs,$(SYNAPTOGENESIS_DYNAMICS))
+ ifndef PARTNER_SELECTION
+ $(error PARTNER_SELECTION is not set which is required when SYNAPTOGENESIS_DYNAMICS is set)
+ endif
+ ifndef FORMATION
+ $(error FORMATION is not set which is required when SYNAPTOGENESIS_DYNAMICS is set)
+ endif
+ ifndef ELIMINATION
+ $(error ELIMINATION is not set which is required when SYNAPTOGENESIS_DYNAMICS is set)
+ endif
+endif
+SYNAPTOGENESIS_DYNAMICS_O := $(BUILD_DIR)$(SYNAPTOGENESIS_DYNAMICS:%.c=%.o)
+
+ifdef PARTNER_SELECTION
+ PARTNER_SELECTION_H := $(call replace_source_dirs,$(PARTNER_SELECTION_H))
+ PARTNER_SELECTION_C := $(call replace_source_dirs,$(PARTNER_SELECTION))
+ PARTNER_SELECTION := $(call strip_source_dirs,$(PARTNER_SELECTION))
+ PARTNER_SELECTION_O := $(BUILD_DIR)$(PARTNER_SELECTION:%.c=%.o)
+endif
+
+ifdef FORMATION
+ FORMATION_H := $(call replace_source_dirs,$(FORMATION_H))
+ FORMATION_C := $(call replace_source_dirs,$(FORMATION))
+ FORMATION := $(call strip_source_dirs,$(FORMATION))
+ FORMATION_O := $(BUILD_DIR)$(FORMATION:%.c=%.o)
+endif
+
+ifdef ELIMINATION
+ ELIMINATION_H := $(call replace_source_dirs,$(ELIMINATION_H))
+ ELIMINATION_C := $(call replace_source_dirs,$(ELIMINATION))
+ ELIMINATION := $(call strip_source_dirs,$(ELIMINATION))
+ ELIMINATION_O := $(BUILD_DIR)$(ELIMINATION:%.c=%.o)
+endif
+
+OTHER_SOURCES_CONVERTED := $(call strip_source_dirs,$(OTHER_SOURCES))
+
+# List all the sources relative to one of SOURCE_DIRS
+SOURCES = neuron/c_main_synapses.c \
+ neuron/synapses.c \
+ neuron/direct_synapses.c \
+ neuron/spike_processing_fast.c \
+ neuron/population_table/population_table_$(POPULATION_TABLE_IMPL)_impl.c \
+ $(SYNAPSE_DYNAMICS) $(WEIGHT_DEPENDENCE) \
+ $(TIMING_DEPENDENCE) $(SYNAPTOGENESIS_DYNAMICS) \
+ $(PARTNER_SELECTION) $(FORMATION) $(ELIMINATION) $(OTHER_SOURCES_CONVERTED)
+
+include $(SPINN_DIRS)/make/local.mk
+
+FEC_OPT = $(OTIME)
+
+# Extra compile options
+DO_COMPILE = $(CC) -DLOG_LEVEL=$(SYNAPSE_DEBUG) $(CFLAGS) -DSTDP_ENABLED=$(STDP_ENABLED)
+
+$(BUILD_DIR)neuron/synapses.o: $(MODIFIED_DIR)neuron/synapses.c
+ #synapses.c
+ -@mkdir -p $(dir $@)
+ $(DO_COMPILE) -o $@ $<
+
+$(BUILD_DIR)neuron/direct_synapses.o: $(MODIFIED_DIR)neuron/direct_synapses.c
+ #direct_synapses.c
+ -mkdir -p $(dir $@)
+ $(DO_COMPILE) -o $@ $<
+
+$(BUILD_DIR)neuron/spike_processing_fast.o: $(MODIFIED_DIR)neuron/spike_processing_fast.c
+ #spike_processing_fast.c
+ -@mkdir -p $(dir $@)
+ $(DO_COMPILE) -o $@ $<
+
+$(BUILD_DIR)neuron/population_table/population_table_binary_search_impl.o: $(MODIFIED_DIR)neuron/population_table/population_table_binary_search_impl.c
+ #population_table/population_table_binary_search_impl.c
+ -@mkdir -p $(dir $@)
+ $(DO_COMPILE) -o $@ $<
+
+SYNGEN_INCLUDES:=
+ifeq ($(SYNGEN_ENABLED), 1)
+ SYNGEN_INCLUDES:= -include $(PARTNER_SELECTION_H) -include $(FORMATION_H) -include $(ELIMINATION_H)
+endif
+
+#STDP Build rules If and only if STDP used
+ifeq ($(STDP_ENABLED), 1)
+ STDP_INCLUDES:= -include $(WEIGHT_DEPENDENCE_H) -include $(TIMING_DEPENDENCE_H)
+ STDP_COMPILE = $(CC) -DLOG_LEVEL=$(PLASTIC_DEBUG) $(CFLAGS) -DSTDP_ENABLED=$(STDP_ENABLED) -DSYNGEN_ENABLED=$(SYNGEN_ENABLED) $(STDP_INCLUDES)
+
+ $(SYNAPSE_DYNAMICS_O): $(SYNAPSE_DYNAMICS_C)
+ # SYNAPSE_DYNAMICS_O stdp
+ -@mkdir -p $(dir $@)
+ $(STDP_COMPILE) -o $@ $<
+
+ $(SYNAPTOGENESIS_DYNAMICS_O): $(SYNAPTOGENESIS_DYNAMICS_C)
+ # SYNAPTOGENESIS_DYNAMICS_O stdp
+ -@mkdir -p $(dir $@)
+ $(STDP_COMPILE) $(SYNGEN_INCLUDES) -o $@ $<
+
+ $(BUILD_DIR)neuron/plasticity/common/post_events.o: $(MODIFIED_DIR)neuron/plasticity/common/post_events.c
+ # plasticity/common/post_events.c
+ -@mkdir -p $(dir $@)
+ $(STDP_COMPILE) -o $@ $<
+
+else
+ $(SYNAPTOGENESIS_DYNAMICS_O): $(SYNAPTOGENESIS_DYNAMICS_C)
+ # SYNAPTOGENESIS_DYNAMICS_O without stdp
+ -@mkdir -p $(dir $@)
+ $(DO_COMPILE) $(SYNGEN_INCLUDES) -o $@ $<
+
+ $(SYNAPSE_DYNAMICS_O): $(SYNAPSE_DYNAMICS_C)
+ # SYNAPSE_DYNAMICS_O without stdp
+ -@mkdir -p $(dir $@)
+ $(DO_COMPILE) -o $@ $<
+
+endif
+
+$(WEIGHT_DEPENDENCE_O): $(WEIGHT_DEPENDENCE_C)
+ # WEIGHT_DEPENDENCE_O
+ -@mkdir -p $(dir $@)
+ $(CC) -DLOG_LEVEL=$(PLASTIC_DEBUG) $(CFLAGS) -o $@ $<
+
+$(TIMING_DEPENDENCE_O): $(TIMING_DEPENDENCE_C) $(WEIGHT_DEPENDENCE_H)
+ # TIMING_DEPENDENCE_O
+ -@mkdir -p $(dir $@)
+ $(CC) -DLOG_LEVEL=$(PLASTIC_DEBUG) $(CFLAGS) \
+ -include $(WEIGHT_DEPENDENCE_H) -o $@ $<
+
+$(PARTNER_SELECTION_O): $(PARTNER_SELECTION_C)
+ # PARTNER_SELECTION_O
+ -mkdir -p $(dir $@)
+ $(CC) -DLOG_LEVEL=$(PLASTIC_DEBUG) $(CFLAGS) -o $@ $<
+
+$(FORMATION_O): $(FORMATION_C)
+ # FORMATION_O
+ -mkdir -p $(dir $@)
+ $(CC) -DLOG_LEVEL=$(PLASTIC_DEBUG) $(CFLAGS) -o $@ $<
+
+$(ELIMINATION_O): $(ELIMINATION_C)
+ # ELIMINATION_O
+ -mkdir -p $(dir $@)
+ $(CC) -DLOG_LEVEL=$(PLASTIC_DEBUG) $(CFLAGS) -o $@ $<
+
+.PRECIOUS: $(MODIFIED_DIR)%.c $(MODIFIED_DIR)%.h $(LOG_DICT_FILE) $(EXTRA_PRECIOUS)
diff --git a/neural_modelling/makefiles/synapse_only/synapses/Makefile b/neural_modelling/makefiles/synapse_only/synapses/Makefile
new file mode 100644
index 0000000000..04b643fc53
--- /dev/null
+++ b/neural_modelling/makefiles/synapse_only/synapses/Makefile
@@ -0,0 +1,20 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = $(notdir $(CURDIR))
+
+SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c
+
+include ../synapse_build.mk
diff --git a/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_nearest_pair_additive/Makefile b/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_nearest_pair_additive/Makefile
new file mode 100644
index 0000000000..04bee4f936
--- /dev/null
+++ b/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_nearest_pair_additive/Makefile
@@ -0,0 +1,25 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = $(notdir $(CURDIR))
+
+SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_mad_stdp.c
+SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c
+TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_nearest_pair_impl.c
+TIMING_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_nearest_pair_impl.h
+WEIGHT_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c
+WEIGHT_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h
+
+include ../synapse_build.mk
diff --git a/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_nearest_pair_multiplicative/Makefile b/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_nearest_pair_multiplicative/Makefile
new file mode 100644
index 0000000000..186a9c53e7
--- /dev/null
+++ b/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_nearest_pair_multiplicative/Makefile
@@ -0,0 +1,23 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+APP = $(notdir $(CURDIR))
+
+SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c
+TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_nearest_pair_impl.c
+TIMING_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_nearest_pair_impl.h
+WEIGHT_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c
+WEIGHT_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h
+
+include ../synapse_build.mk
diff --git a/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_pair_additive/Makefile b/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_pair_additive/Makefile
new file mode 100644
index 0000000000..a3c2e81bdd
--- /dev/null
+++ b/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_pair_additive/Makefile
@@ -0,0 +1,25 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = $(notdir $(CURDIR))
+
+SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_mad_stdp.c
+SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c
+TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_pair_impl.c
+TIMING_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_pair_impl.h
+WEIGHT_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c
+WEIGHT_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h
+
+include ../synapse_build.mk
diff --git a/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_pair_additive_structural_last_neuron_distance_weight/Makefile b/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_pair_additive_structural_last_neuron_distance_weight/Makefile
new file mode 100644
index 0000000000..1360c098e2
--- /dev/null
+++ b/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_pair_additive_structural_last_neuron_distance_weight/Makefile
@@ -0,0 +1,31 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = $(notdir $(CURDIR))
+
+SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c
+TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_pair_impl.c
+TIMING_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_pair_impl.h
+WEIGHT_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c
+WEIGHT_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h
+SYNAPTOGENESIS_DYNAMICS = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/topographic_map_impl.c
+PARTNER_SELECTION = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/partner_selection/last_neuron_selection_impl.c
+PARTNER_SELECTION_H = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/partner_selection/last_neuron_selection_impl.h
+FORMATION = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/formation/formation_distance_dependent_impl.c
+FORMATION_H = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/formation/formation_distance_dependent_impl.h
+ELIMINATION = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/elimination/elimination_random_by_weight_impl.c
+ELIMINATION_H = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/elimination/elimination_random_by_weight_impl.h
+
+include ../synapse_build.mk
diff --git a/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_pair_additive_structural_random_distance_weight/Makefile b/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_pair_additive_structural_random_distance_weight/Makefile
new file mode 100644
index 0000000000..fcf60bb95c
--- /dev/null
+++ b/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_pair_additive_structural_random_distance_weight/Makefile
@@ -0,0 +1,31 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = $(notdir $(CURDIR))
+
+SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c
+TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_pair_impl.c
+TIMING_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_pair_impl.h
+WEIGHT_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c
+WEIGHT_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h
+SYNAPTOGENESIS_DYNAMICS = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/topographic_map_impl.c
+PARTNER_SELECTION = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/partner_selection/random_selection_impl.c
+PARTNER_SELECTION_H = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/partner_selection/random_selection_impl.h
+FORMATION = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/formation/formation_distance_dependent_impl.c
+FORMATION_H = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/formation/formation_distance_dependent_impl.h
+ELIMINATION = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/elimination/elimination_random_by_weight_impl.c
+ELIMINATION_H = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/elimination/elimination_random_by_weight_impl.h
+
+include ../synapse_build.mk
diff --git a/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_pair_multiplicative/Makefile b/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_pair_multiplicative/Makefile
new file mode 100644
index 0000000000..85e0d5ecb2
--- /dev/null
+++ b/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_pair_multiplicative/Makefile
@@ -0,0 +1,25 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = $(notdir $(CURDIR))
+
+SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_mad_stdp.c
+SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c
+TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_pair_impl.c
+TIMING_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_pair_impl.h
+WEIGHT_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c
+WEIGHT_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h
+
+include ../synapse_build.mk
diff --git a/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_pfister_triplet_additive/Makefile b/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_pfister_triplet_additive/Makefile
new file mode 100644
index 0000000000..7e2a16ab73
--- /dev/null
+++ b/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_pfister_triplet_additive/Makefile
@@ -0,0 +1,24 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = $(notdir $(CURDIR))
+
+SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c
+TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_pfister_triplet_impl.c
+TIMING_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_pfister_triplet_impl.h
+WEIGHT_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c
+WEIGHT_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.h
+
+include ../synapse_build.mk
diff --git a/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_recurrent_dual_fsm_multiplicative/Makefile b/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_recurrent_dual_fsm_multiplicative/Makefile
new file mode 100644
index 0000000000..579bd26f67
--- /dev/null
+++ b/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_recurrent_dual_fsm_multiplicative/Makefile
@@ -0,0 +1,23 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+APP = $(notdir $(CURDIR))
+
+SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c
+TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_recurrent_dual_fsm_impl.c
+TIMING_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_recurrent_dual_fsm_impl.h
+WEIGHT_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c
+WEIGHT_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h
+
+include ../synapse_build.mk
diff --git a/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_vogels_2011_additive/Makefile b/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_vogels_2011_additive/Makefile
new file mode 100644
index 0000000000..ba914a3995
--- /dev/null
+++ b/neural_modelling/makefiles/synapse_only/synapses_stdp_mad_vogels_2011_additive/Makefile
@@ -0,0 +1,24 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = $(notdir $(CURDIR))
+
+SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c
+TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_vogels_2011_impl.c
+TIMING_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_vogels_2011_impl.h
+WEIGHT_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c
+WEIGHT_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h
+
+include ../synapse_build.mk
diff --git a/neural_modelling/makefiles/synapse_only/synapses_structural_last_neuron_distance_weight/Makefile b/neural_modelling/makefiles/synapse_only/synapses_structural_last_neuron_distance_weight/Makefile
new file mode 100644
index 0000000000..d18fed67b9
--- /dev/null
+++ b/neural_modelling/makefiles/synapse_only/synapses_structural_last_neuron_distance_weight/Makefile
@@ -0,0 +1,27 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = $(notdir $(CURDIR))
+
+SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c
+SYNAPTOGENESIS_DYNAMICS = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/topographic_map_impl.c
+PARTNER_SELECTION = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/partner_selection/last_neuron_selection_impl.c
+PARTNER_SELECTION_H = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/partner_selection/last_neuron_selection_impl.h
+FORMATION = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/formation/formation_distance_dependent_impl.c
+FORMATION_H = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/formation/formation_distance_dependent_impl.h
+ELIMINATION = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/elimination/elimination_random_by_weight_impl.c
+ELIMINATION_H = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/elimination/elimination_random_by_weight_impl.h
+
+include ../synapse_build.mk
diff --git a/neural_modelling/makefiles/synapse_only/synapses_structural_random_distance_weight/Makefile b/neural_modelling/makefiles/synapse_only/synapses_structural_random_distance_weight/Makefile
new file mode 100644
index 0000000000..1d515b13d7
--- /dev/null
+++ b/neural_modelling/makefiles/synapse_only/synapses_structural_random_distance_weight/Makefile
@@ -0,0 +1,27 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+APP = $(notdir $(CURDIR))
+
+SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c
+SYNAPTOGENESIS_DYNAMICS = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/topographic_map_impl.c
+PARTNER_SELECTION = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/partner_selection/random_selection_impl.c
+PARTNER_SELECTION_H = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/partner_selection/random_selection_impl.h
+FORMATION = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/formation/formation_distance_dependent_impl.c
+FORMATION_H = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/formation/formation_distance_dependent_impl.h
+ELIMINATION = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/elimination/elimination_random_by_weight_impl.c
+ELIMINATION_H = $(NEURON_DIR)/neuron/structural_plasticity/synaptogenesis/elimination/elimination_random_by_weight_impl.h
+
+include ../synapse_build.mk
diff --git a/neural_modelling/src/delay_extension/delay_extension.c b/neural_modelling/src/delay_extension/delay_extension.c
index 65e44528e3..2d276b4603 100644
--- a/neural_modelling/src/delay_extension/delay_extension.c
+++ b/neural_modelling/src/delay_extension/delay_extension.c
@@ -226,10 +226,10 @@ static bool read_parameters(struct delay_parameters *params) {
log_info("\t parrot neurons = %u, neuron bit field words = %u,"
" num delay stages = %u, num delay slots = %u (pot = %u),"
- " num delay slots mask = %08x",
+ " num delay slots mask = %08x, n delay in a stage = %u",
num_neurons, neuron_bit_field_words,
num_delay_stages, num_delay_slots, num_delay_slots_pot,
- num_delay_slots_mask);
+ num_delay_slots_mask, n_delay_in_a_stage);
// Create array containing a bitfield specifying whether each neuron should
// emit spikes after each delay stage
diff --git a/neural_modelling/src/neuron/c_main.c b/neural_modelling/src/neuron/c_main.c
index 9469fe9b16..cfa2abf995 100644
--- a/neural_modelling/src/neuron/c_main.c
+++ b/neural_modelling/src/neuron/c_main.c
@@ -33,71 +33,60 @@
* @image html spynnaker_c_code_flow.png
*/
-#include
+#include "c_main_neuron_common.h"
+#include "c_main_synapse_common.h"
+#include "c_main_common.h"
#include "regions.h"
-#include "neuron.h"
-#include "synapses.h"
-#include "spike_processing.h"
-#include "population_table/population_table.h"
-#include "plasticity/synapse_dynamics.h"
-#include "structural_plasticity/synaptogenesis_dynamics.h"
#include "profile_tags.h"
-#include "direct_synapses.h"
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-/* validates that the model being compiled does indeed contain a application
- * magic number*/
-#ifndef APPLICATION_NAME_HASH
-#error APPLICATION_NAME_HASH was undefined. Make sure you define this\
- constant
-#endif
-
-//! The provenance information written on application shutdown.
-struct neuron_provenance {
- //! A count of presynaptic events.
- uint32_t n_pre_synaptic_events;
- //! A count of synaptic saturations.
- uint32_t n_synaptic_weight_saturations;
- //! A count of the times that the synaptic input circular buffers overflowed
- uint32_t n_input_buffer_overflows;
- //! The current time.
- uint32_t current_timer_tick;
- //! The number of STDP weight saturations.
- uint32_t n_plastic_synaptic_weight_saturations;
- uint32_t n_ghost_pop_table_searches;
- uint32_t n_failed_bitfield_reads;
- uint32_t n_dmas_complete;
- uint32_t n_spikes_processed;
- uint32_t n_invalid_master_pop_table_hits;
- uint32_t n_filtered_by_bitfield;
- //! The number of rewirings performed.
- uint32_t n_rewires;
- uint32_t n_packets_dropped_from_lateness;
- uint32_t spike_processing_get_max_filled_input_buffer_size;
- //! the number of times the TDMA fully missed its slots
- uint32_t n_tdma_mises;
+#include "spike_processing.h"
+
+//! The combined provenance from synapses and neurons
+struct combined_provenance {
+ struct neuron_provenance neuron_provenance;
+ struct synapse_provenance synapse_provenance;
+ struct spike_processing_provenance spike_processing_provenance;
//! Maximum backgrounds queued
uint32_t max_backgrounds_queued;
//! Background queue overloads
uint32_t n_background_queue_overloads;
};
-//! values for the priority for each callback
+//! Identify the priorities for all tasks
typedef enum callback_priorities {
MC = -1, DMA = 0, USER = 0, TIMER = 0, SDP = 1, BACKGROUND = 1
} callback_priorities;
-//! The number of regions that are to be used for recording
-#define NUMBER_OF_REGIONS_TO_RECORD 5
+//! From the regions, extract those that are common
+const struct common_regions COMMON_REGIONS = {
+ .system = SYSTEM_REGION,
+ .provenance = PROVENANCE_DATA_REGION,
+ .profiler = PROFILER_REGION,
+ .recording = RECORDING_REGION
+};
+
+//! Identify the priorities of the common tasks
+const struct common_priorities COMMON_PRIORITIES = {
+ .sdp = SDP,
+ .dma = DMA,
+ .timer = TIMER
+};
-// Globals
+//! From the regions, extract those that are neuron-specific
+const struct neuron_regions NEURON_REGIONS = {
+ .neuron_params = NEURON_PARAMS_REGION,
+ .neuron_recording = NEURON_RECORDING_REGION
+};
+
+//! From the regions, extract those that are synapse-specific
+const struct synapse_regions SYNAPSE_REGIONS = {
+ .synapse_params = SYNAPSE_PARAMS_REGION,
+ .direct_matrix = DIRECT_MATRIX_REGION,
+ .synaptic_matrix = SYNAPTIC_MATRIX_REGION,
+ .pop_table = POPULATION_TABLE_REGION,
+ .synapse_dynamics = SYNAPSE_DYNAMICS_REGION,
+ .structural_dynamics = STRUCTURAL_DYNAMICS_REGION,
+ .bitfield_filter = BIT_FIELD_FILTER_REGION
+};
//! The current timer tick value.
// the timer tick callback returning the same value.
@@ -112,20 +101,8 @@ static uint32_t simulation_ticks = 0;
//! Determines if this model should run for infinite time
static uint32_t infinite_run;
-//! Timer callbacks since last rewiring
-static int32_t last_rewiring_time = 0;
-
-//! Rewiring period represented as an integer
-static int32_t rewiring_period = 0;
-
-//! Flag representing whether rewiring is enabled
-static bool rewiring = false;
-
-//! Count the number of rewiring attempts
-static uint32_t count_rewire_attempts = 0;
-
-//! The number of neurons on the core
-static uint32_t n_neurons;
+//! The recording flags indicating if anything is recording
+static uint32_t recording_flags = 0;
//! The number of background tasks queued / running
static uint32_t n_backgrounds_queued = 0;
@@ -136,194 +113,60 @@ static uint32_t n_background_overloads = 0;
//! The maximum number of background tasks queued
static uint32_t max_backgrounds_queued = 0;
-//! timer count for tdma of certain models; exported
-uint global_timer_count;
-
+//! The ring buffers to be used in the simulation
+static weight_t *ring_buffers;
//! \brief Callback to store provenance data (format: neuron_provenance).
//! \param[out] provenance_region: Where to write the provenance data
static void c_main_store_provenance_data(address_t provenance_region) {
- log_debug("writing other provenance data");
- struct neuron_provenance *prov = (void *) provenance_region;
-
- // store the data into the provenance data region
- prov->n_pre_synaptic_events = synapses_get_pre_synaptic_events();
- prov->n_synaptic_weight_saturations = synapses_saturation_count;
- prov->n_input_buffer_overflows = spike_processing_get_buffer_overflows();
- prov->current_timer_tick = time;
- prov->n_plastic_synaptic_weight_saturations =
- synapse_dynamics_get_plastic_saturation_count();
- prov->n_ghost_pop_table_searches = ghost_pop_table_searches;
- prov->n_failed_bitfield_reads = failed_bit_field_reads;
- prov->n_dmas_complete = spike_processing_get_dma_complete_count();
- prov->n_spikes_processed = spike_processing_get_spike_processing_count();
- prov->n_invalid_master_pop_table_hits = invalid_master_pop_hits;
- prov->n_filtered_by_bitfield = bit_field_filtered_packets;
- prov->n_rewires = spike_processing_get_successful_rewires();
- prov->n_packets_dropped_from_lateness =
- spike_processing_get_n_packets_dropped_from_lateness();
- prov->spike_processing_get_max_filled_input_buffer_size =
- spike_processing_get_max_filled_input_buffer_size();
- prov->n_tdma_mises = tdma_processing_times_behind();
+ struct combined_provenance *prov = (void *) provenance_region;
prov->n_background_queue_overloads = n_background_overloads;
prov->max_backgrounds_queued = max_backgrounds_queued;
-
- log_debug("finished other provenance data");
-}
-
-//! \brief Initialises the model by reading in the regions and checking
-//! recording data.
-//! \return True if it successfully initialised, false otherwise
-static bool initialise(void) {
- log_debug("Initialise: started");
-
- // Get the address this core's DTCM data starts at from SRAM
- data_specification_metadata_t *ds_regions =
- data_specification_get_data_address();
-
- // Read the header
- if (!data_specification_read_header(ds_regions)) {
- return false;
- }
-
- // Get the timing details and set up the simulation interface
- if (!simulation_initialise(
- data_specification_get_region(SYSTEM_REGION, ds_regions),
- APPLICATION_NAME_HASH, &timer_period, &simulation_ticks,
- &infinite_run, &time, SDP, DMA)) {
- return false;
- }
- simulation_set_provenance_function(
- c_main_store_provenance_data,
- data_specification_get_region(PROVENANCE_DATA_REGION, ds_regions));
-
- // Set up the neurons
- uint32_t n_synapse_types;
- uint32_t incoming_spike_buffer_size;
- uint32_t n_regions_used;
- if (!neuron_initialise(
- data_specification_get_region(NEURON_PARAMS_REGION, ds_regions),
- data_specification_get_region(NEURON_RECORDING_REGION, ds_regions),
- &n_neurons, &n_synapse_types, &incoming_spike_buffer_size,
- &n_regions_used)) {
- return false;
- }
-
- // Set up the synapses
- uint32_t *ring_buffer_to_input_buffer_left_shifts;
- bool clear_input_buffers_of_late_packets_init;
- if (!synapses_initialise(
- data_specification_get_region(SYNAPSE_PARAMS_REGION, ds_regions),
- n_neurons, n_synapse_types,
- &ring_buffer_to_input_buffer_left_shifts,
- &clear_input_buffers_of_late_packets_init)) {
- return false;
- }
-
- // set up direct synapses
- address_t direct_synapses_address;
- if (!direct_synapses_initialise(
- data_specification_get_region(DIRECT_MATRIX_REGION, ds_regions),
- &direct_synapses_address)) {
- return false;
- }
-
- // Set up the population table
- uint32_t row_max_n_words;
- if (!population_table_initialise(
- data_specification_get_region(POPULATION_TABLE_REGION, ds_regions),
- data_specification_get_region(SYNAPTIC_MATRIX_REGION, ds_regions),
- direct_synapses_address, &row_max_n_words)) {
- return false;
- }
- // Set up the synapse dynamics
- if (!synapse_dynamics_initialise(
- data_specification_get_region(SYNAPSE_DYNAMICS_REGION, ds_regions),
- n_neurons, n_synapse_types,
- ring_buffer_to_input_buffer_left_shifts)) {
- return false;
- }
-
- // Set up structural plasticity dynamics
- if (!synaptogenesis_dynamics_initialise(data_specification_get_region(
- STRUCTURAL_DYNAMICS_REGION, ds_regions), &n_regions_used)) {
- return false;
- }
-
- rewiring_period = synaptogenesis_rewiring_period();
- rewiring = rewiring_period != -1;
-
- if (!spike_processing_initialise(
- row_max_n_words, MC, USER, incoming_spike_buffer_size,
- clear_input_buffers_of_late_packets_init, n_regions_used)) {
- return false;
- }
-
- // Setup profiler
- profiler_init(data_specification_get_region(PROFILER_REGION, ds_regions));
-
- // Do bitfield configuration last to only use any unused memory
- if (!population_table_load_bitfields(
- data_specification_get_region(BIT_FIELD_FILTER_REGION, ds_regions))) {
- return false;
- }
-
- print_post_to_pre_entry();
-
- log_debug("Initialise: finished");
- return true;
+ store_neuron_provenance(&prov->neuron_provenance);
+ store_synapse_provenance(&prov->synapse_provenance);
+ spike_processing_store_provenance(&prov->spike_processing_provenance);
}
//! \brief the function to call when resuming a simulation
void resume_callback(void) {
- data_specification_metadata_t *ds_regions =
- data_specification_get_data_address();
+
+ // Reset recording
+ recording_reset();
// try resuming neuron
- if (!neuron_resume(
- data_specification_get_region(NEURON_PARAMS_REGION, ds_regions))) {
+ if (!neuron_resume()) {
log_error("failed to resume neuron.");
rt_error(RTE_SWERR);
}
- // If the time has been reset to zero then the ring buffers need to be
- // flushed in case there is a delayed spike left over from a previous run
+ // Resume synapses
// NOTE: at reset, time is set to UINT_MAX ahead of timer_callback(...)
- if ((time+1) == 0) {
- synapses_flush_ring_buffers();
- }
+ synapses_resume(time + 1);
+}
+//! Process the ring buffers for the next time step
+static inline void process_ring_buffers(void) {
+ uint32_t first_index = synapse_row_get_first_ring_buffer_index(
+ time, synapse_type_index_bits, synapse_delay_mask);
+ neuron_transfer(&ring_buffers[first_index]);
+
+ // Print the neuron inputs.
+ #if LOG_LEVEL >= LOG_DEBUG
+ log_debug("Inputs");
+ neuron_print_inputs();
+ #endif // LOG_LEVEL >= LOG_DEBUG
}
-//! \brief Background activites called from timer
+//! \brief Background activities called from timer
//! \param timer_count the number of times this call back has been
//! executed since start of simulation
//! \param[in] local_time: The time step being executed
void background_callback(uint timer_count, uint local_time) {
- global_timer_count = timer_count;
profiler_write_entry_disable_irq_fiq(PROFILER_ENTER | PROFILER_TIMER);
- last_rewiring_time++;
-
- // This is the part where I save the input and output indices
- // from the circular buffer
- // If time == 0 as well as output == input == 0 then no rewire is
- // supposed to happen. No spikes yet
log_debug("Timer tick %u \n", local_time);
- // Then do rewiring
- if (rewiring &&
- ((last_rewiring_time >= rewiring_period && !synaptogenesis_is_fast())
- || synaptogenesis_is_fast())) {
- last_rewiring_time = 0;
- // put flag in spike processing to do synaptic rewiring
- if (synaptogenesis_is_fast()) {
- spike_processing_do_rewiring(rewiring_period);
- } else {
- spike_processing_do_rewiring(1);
- }
- count_rewire_attempts++;
- }
+ spike_processing_do_rewiring(synaptogenesis_n_updates());
// Now do neuron time step update
neuron_do_timestep_update(local_time, timer_count);
@@ -340,16 +183,18 @@ void timer_callback(uint timer_count, UNUSED uint unused) {
// Disable interrupts to stop DMAs and MC getting in the way of this bit
uint32_t state = spin1_int_disable();
+ // Increment time step
time++;
// Clear any outstanding spikes
spike_processing_clear_input_buffer(time);
+ // Next bit without DMA, but with MC
spin1_mode_restore(state);
state = spin1_irq_disable();
- // Also do synapses timestep update, as this is time-critical
- synapses_do_timestep_update(time);
+ // Process ring buffers for the inputs from last time step
+ process_ring_buffers();
/* if a fixed number of simulation ticks that were specified at startup
* then do reporting for finishing */
@@ -358,22 +203,16 @@ void timer_callback(uint timer_count, UNUSED uint unused) {
// Enter pause and resume state to avoid another tick
simulation_handle_pause_resume(resume_callback);
- log_debug("Completed a run");
+ // Pause neuron processing
+ neuron_pause();
- // rewrite neuron params to SDRAM for reading out if needed
- data_specification_metadata_t *ds_regions =
- data_specification_get_data_address();
- neuron_pause(data_specification_get_region(NEURON_PARAMS_REGION, ds_regions));
-
- profiler_write_entry_disable_irq_fiq(PROFILER_EXIT | PROFILER_TIMER);
-
- profiler_finalise();
+ // Pause common functions
+ common_pause(recording_flags);
// Subtract 1 from the time so this tick gets done again on the next
// run
time--;
- log_debug("Rewire tries = %d", count_rewire_attempts);
simulation_ready_to_read();
spin1_mode_restore(state);
return;
@@ -393,23 +232,69 @@ void timer_callback(uint timer_count, UNUSED uint unused) {
spin1_mode_restore(state);
}
-//! \brief The entry point for this model.
-void c_main(void) {
+//! \brief Initialises the model by reading in the regions and checking
+//! recording data.
+//! \return True if it successfully initialised, false otherwise
+static bool initialise(void) {
+ log_debug("Initialise: started");
- // initialise the model
- if (!initialise()) {
- rt_error(RTE_API);
+ data_specification_metadata_t *ds_regions;
+ if (!initialise_common_regions(
+ &timer_period, &simulation_ticks, &infinite_run, &time,
+ &recording_flags, c_main_store_provenance_data, timer_callback,
+ COMMON_REGIONS, COMMON_PRIORITIES, &ds_regions)) {
+ return false;
}
- // Start the time at "-1" so that the first tick will be 0
- time = UINT32_MAX;
+ // Setup neurons
+ uint32_t n_rec_regions_used;
+ if (!initialise_neuron_regions(
+ ds_regions, NEURON_REGIONS, &n_rec_regions_used)) {
+ return false;
+ }
+
+ // Setup synapses
+ uint32_t incoming_spike_buffer_size;
+ bool clear_input_buffer_of_late_packets;
+ uint32_t row_max_n_words;
+ if (!initialise_synapse_regions(
+ ds_regions, SYNAPSE_REGIONS, &ring_buffers, &row_max_n_words,
+ &incoming_spike_buffer_size,
+ &clear_input_buffer_of_late_packets, &n_rec_regions_used)) {
+ return false;
+ }
+
+ // Setup spike processing
+ if (!spike_processing_initialise(
+ row_max_n_words, MC, USER, incoming_spike_buffer_size,
+ clear_input_buffer_of_late_packets, n_rec_regions_used)) {
+ return false;
+ }
+
+ // Do bitfield configuration last to only use any unused memory
+ if (!population_table_load_bitfields(data_specification_get_region(
+ SYNAPSE_REGIONS.bitfield_filter, ds_regions))) {
+ return false;
+ }
// Set timer tick (in microseconds)
log_debug("setting timer tick callback for %d microseconds", timer_period);
spin1_set_timer_tick(timer_period);
- // Set up the timer tick callback (others are handled elsewhere)
- spin1_callback_on(TIMER_TICK, timer_callback, TIMER);
+ log_debug("Initialise: finished");
+ return true;
+}
+
+//! \brief The entry point for this model.
+void c_main(void) {
+
+ // Start the time at "-1" so that the first tick will be 0
+ time = UINT32_MAX;
+
+ // initialise the model
+ if (!initialise()) {
+ rt_error(RTE_API);
+ }
simulation_run();
}
diff --git a/neural_modelling/src/neuron/c_main_common.h b/neural_modelling/src/neuron/c_main_common.h
new file mode 100644
index 0000000000..9e29236021
--- /dev/null
+++ b/neural_modelling/src/neuron/c_main_common.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2017-2020 The University of Manchester
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+/* validates that the model being compiled does indeed contain a application
+ * magic number*/
+#ifndef APPLICATION_NAME_HASH
+#error APPLICATION_NAME_HASH was undefined. Make sure you define this\
+ constant
+#endif
+
+//! The identifiers of the regions used by all simulation cores
+struct common_regions {
+ //! Data for general simulation setup
+ uint32_t system;
+ //! Where provenance data will be stored
+ uint32_t provenance;
+ //! Where profile data will be read and stored
+ uint32_t profiler;
+ //! Where recording metadata will be read and stored
+ uint32_t recording;
+};
+
+//! The callback priorities used by all simulation cores
+struct common_priorities {
+ //! The SDP callback priority
+ uint32_t sdp;
+ //! The DMA callback priority
+ uint32_t dma;
+ //! The timer callback priority
+ uint32_t timer;
+};
+
+//! \brief Read data from simulation regions used by all binaries and set up
+//! \param[out] timer_period: Returns the timer period of the simulation
+//! \param[in] simulation_ticks:
+//! Pointer to the variable that will hold the timer period, which is
+//! updated by the simulation interface
+//! \param[in] infinite_run:
+//! Pointer to the variable that will hold whether this is an infinite run,
+//! which is updated by the simulation interface
+//! \param[in] time:
+//! Pointer to the variable that will hold the current simulation time,
+//! which is updated by the simulation interface
+//! \param[out] recording_flags:
+//! Returns the flags that indicate which regions are being recorded
+//! \param[in] store_provenance_function:
+//! Callback to store additional provenance custom to this model
+//! \param[in] timer_callback:
+//! Callback on a timer tick
+//! \param[in] regions: The identifiers of the various regions to be read
+//! \param[in] priorities: The interrupt priorities of the signals
+//! \param[out] ds_regions: Returns the data specification regions
+//! \return boolean indicating success (True) or failure (False)
+static inline bool initialise_common_regions(
+ uint32_t *timer_period, uint32_t *simulation_ticks,
+ uint32_t *infinite_run, uint32_t *time, uint32_t *recording_flags,
+ prov_callback_t store_provenance_function, callback_t timer_callback,
+ struct common_regions regions, struct common_priorities priorities,
+ data_specification_metadata_t **ds_regions) {
+
+ // Get the address this core's DTCM data starts at from SRAM
+ *ds_regions = data_specification_get_data_address();
+
+ // Read the header
+ if (!data_specification_read_header(*ds_regions)) {
+ return false;
+ }
+
+ // Get the timing details and set up the simulation interface
+ if (!simulation_initialise(
+ data_specification_get_region(regions.system, *ds_regions),
+ APPLICATION_NAME_HASH, timer_period, simulation_ticks,
+ infinite_run, time, priorities.sdp, priorities.dma)) {
+ return false;
+ }
+ simulation_set_provenance_function(
+ store_provenance_function,
+ data_specification_get_region(regions.provenance, *ds_regions));
+
+ // Setup profiler
+ profiler_init(data_specification_get_region(regions.profiler, *ds_regions));
+
+ // Setup recording
+ void *rec_addr = data_specification_get_region(regions.recording, *ds_regions);
+ if (!recording_initialize(&rec_addr, recording_flags)) {
+ return false;
+ }
+
+ if (timer_callback) {
+
+ // Set up the timer tick callback (others are handled elsewhere)
+ spin1_callback_on(TIMER_TICK, timer_callback, priorities.timer);
+ }
+
+ return true;
+}
+
+//! \brief Do things required when the simulation is paused
+//! \param[in] recording_flags: Flags returned from initialise_common_regions
+static inline void common_pause(uint32_t recording_flags) {
+
+ // Finalise any recordings that are in progress
+ if (recording_flags > 0) {
+ log_debug("updating recording regions");
+ recording_finalise();
+ }
+
+ profiler_finalise();
+}
diff --git a/neural_modelling/src/neuron/c_main_neuron_common.h b/neural_modelling/src/neuron/c_main_neuron_common.h
new file mode 100644
index 0000000000..2be26d37a3
--- /dev/null
+++ b/neural_modelling/src/neuron/c_main_neuron_common.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2017-2020 The University of Manchester
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+#include
+#include
+
+#include
+#include
+
+#include "neuron.h"
+
+//! The provenance information provided by neurons
+struct neuron_provenance {
+ //! The current time.
+ uint32_t current_timer_tick;
+ //! The number of times a TDMA slot was missed
+ uint32_t n_tdma_mises;
+ //! Earliest send time within any time step
+ uint32_t earliest_send;
+ //! Latest send time within any time step
+ uint32_t latest_send;
+};
+
+//! The region IDs used by the neuron processing
+struct neuron_regions {
+ //! The neuron parameters
+ uint32_t neuron_params;
+ //! The neuron recording details
+ uint32_t neuron_recording;
+};
+
+//! Declare that time exists
+extern uint32_t time;
+
+//! Latest time in a timestep that any neuron has sent a spike
+extern uint32_t latest_send_time;
+
+//! Earliest time in a timestep that any neuron has sent a spike
+extern uint32_t earliest_send_time;
+
+//! \brief Callback to store neuron provenance data (format: neuron_provenance).
+//! \param[out] prov: The data structure to store provenance data in
+static inline void store_neuron_provenance(struct neuron_provenance *prov) {
+ prov->current_timer_tick = time;
+ prov->n_tdma_mises = tdma_processing_times_behind();
+ prov->earliest_send = earliest_send_time;
+ prov->latest_send = latest_send_time;
+}
+
+//! \brief Read data to set up neuron processing
+//! \param[in] ds_regions: Pointer to region position data
+//! \param[in] regions: The indices of the regions to be read
+//! \param[out] n_rec_regions_used: The number of recording regions used
+//! \return a boolean indicating success (True) or failure (False)
+static inline bool initialise_neuron_regions(
+ data_specification_metadata_t *ds_regions,
+ struct neuron_regions regions, uint32_t *n_rec_regions_used) {
+
+ // Set up the neurons
+ if (!neuron_initialise(
+ data_specification_get_region(regions.neuron_params, ds_regions),
+ data_specification_get_region(regions.neuron_recording, ds_regions),
+ n_rec_regions_used)) {
+ return false;
+ }
+
+ return true;
+}
diff --git a/neural_modelling/src/neuron/c_main_neurons.c b/neural_modelling/src/neuron/c_main_neurons.c
new file mode 100644
index 0000000000..2d4e7cfe42
--- /dev/null
+++ b/neural_modelling/src/neuron/c_main_neurons.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2017-2019 The University of Manchester
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+/*!
+ * @dir
+ * @brief Implementation of simulator for a single neural population on a
+ * SpiNNaker CPU core. Or rather of a slice of a population.
+ *
+ * @file
+ * @brief This file contains the main function of the application framework,
+ * which the application programmer uses to configure and run applications.
+ *
+ * This is the main entrance class for most of the neural models. The following
+ * Figure shows how all of the c code
+ * interacts with each other and what classes
+ * are used to represent over arching logic
+ * (such as plasticity, spike processing, utilities, synapse types, models)
+ *
+ * @image html spynnaker_c_code_flow.png
+ */
+
+#include "c_main_neuron_common.h"
+#include "c_main_common.h"
+#include "profile_tags.h"
+#include "dma_common.h"
+#include
+#include
+
+//! values for the priority for each callback
+typedef enum callback_priorities {
+ DMA = -2, SDP = 0, TIMER = 0
+} callback_priorities;
+
+//! Overall regions to be used by the neuron core
+enum regions {
+ SYSTEM_REGION,
+ PROVENANCE_DATA_REGION,
+ PROFILER_REGION,
+ RECORDING_REGION,
+ NEURON_PARAMS_REGION,
+ NEURON_RECORDING_REGION,
+ SDRAM_PARAMS_REGION
+};
+
+//! From the regions, select those that are common
+const struct common_regions COMMON_REGIONS = {
+ .system = SYSTEM_REGION,
+ .provenance = PROVENANCE_DATA_REGION,
+ .profiler = PROFILER_REGION,
+ .recording = RECORDING_REGION
+};
+
+//! Identify the priority of certain tasks
+const struct common_priorities COMMON_PRIORITIES = {
+ .sdp = SDP,
+ .dma = DMA,
+ .timer = TIMER
+};
+
+/**
+ * From the regions, select those that are used for neuron-specific things
+ */
+const struct neuron_regions NEURON_REGIONS = {
+ .neuron_params = NEURON_PARAMS_REGION,
+ .neuron_recording = NEURON_RECORDING_REGION
+};
+
+//! A region of SDRAM used to transfer synapses
+struct sdram_config {
+ //! The start address of the input data to be transferred
+ uint8_t *address;
+ //! The size of the input data to be transferred per core
+ uint32_t size_in_bytes;
+ //! The number of neurons
+ uint32_t n_neurons;
+ //! The number of synapse types
+ uint32_t n_synapse_types;
+ //! The number of synapse cores feeding into here
+ uint32_t n_synapse_cores;
+ //! The number of bits needed for the neurons
+ uint32_t synapse_index_bits;
+};
+
+//! Provenance for this specific core
+struct neurons_provenance {
+ uint32_t n_timer_overruns;
+};
+
+//! The number of buffers for synaptic data (one processing, one in progress)
+#define N_SYNAPTIC_BUFFERS 2
+
+//! The current timer tick value.
+// the timer tick callback returning the same value.
+uint32_t time;
+
+//! timer tick period (in microseconds)
+static uint32_t timer_period;
+
+//! The number of timer ticks to run for before being expected to exit
+static uint32_t simulation_ticks = 0;
+
+//! Determines if this model should run for infinite time
+static uint32_t infinite_run;
+
+//! The recording flags indicating if anything is recording
+static uint32_t recording_flags = 0;
+
+//! The SDRAM input configuration data
+static struct sdram_config sdram_inputs;
+
+//! The inputs from the various synapse cores
+static weight_t *synaptic_contributions[N_SYNAPTIC_BUFFERS];
+
+//! The timer overruns
+static uint32_t timer_overruns = 0;
+
+//! All the synaptic contributions for adding up in 2 formats
+static union {
+ uint32_t *as_int;
+ weight_t *as_weight;
+} all_synaptic_contributions;
+
+
+//! \brief Callback to store provenance data (format: neuron_provenance).
+//! \param[out] provenance_region: Where to write the provenance data
+static void store_provenance_data(address_t provenance_region) {
+ struct neuron_provenance *prov = (void *) provenance_region;
+ store_neuron_provenance(prov);
+ struct neurons_provenance *n_prov = (void *) &prov[1];
+ n_prov->n_timer_overruns = timer_overruns;
+
+}
+
+//! \brief the function to call when resuming a simulation
+void resume_callback(void) {
+
+ // Reset recording
+ recording_reset();
+
+ // try resuming neuron
+ if (!neuron_resume()) {
+ log_error("failed to resume neuron.");
+ rt_error(RTE_SWERR);
+ }
+}
+
+//! \brief Add up all the synaptic contributions into a global buffer
+//! \param[in] syns The weights to be added
+static inline void sum(weight_t *syns) {
+ uint32_t n_words = sdram_inputs.size_in_bytes >> 2;
+ const uint32_t *src = (const uint32_t *) syns;
+ uint32_t *tgt = all_synaptic_contributions.as_int;
+ for (uint32_t i = n_words; i > 0; i--) {
+ *tgt++ += *src++;
+ }
+}
+
+//! \brief Timer interrupt callback
+//! \param[in] timer_count: the number of times this call back has been
+//! executed since start of simulation
+//! \param[in] unused: unused parameter kept for API consistency
+void timer_callback(uint timer_count, UNUSED uint unused) {
+
+ profiler_write_entry_disable_irq_fiq(PROFILER_ENTER | PROFILER_TIMER);
+
+ uint32_t start_time = tc[T1_COUNT];
+
+ time++;
+
+ log_debug("Timer tick %u \n", time);
+
+ /* if a fixed number of simulation ticks that were specified at startup
+ * then do reporting for finishing */
+ if (simulation_is_finished()) {
+
+ // Enter pause and resume state to avoid another tick
+ simulation_handle_pause_resume(resume_callback);
+
+ // Pause neuron processing
+ neuron_pause();
+
+ // Pause common functions
+ common_pause(recording_flags);
+
+ profiler_write_entry_disable_irq_fiq(PROFILER_EXIT | PROFILER_TIMER);
+
+ // Subtract 1 from the time so this tick gets done again on the next
+ // run
+ time--;
+
+ simulation_ready_to_read();
+ return;
+ }
+
+ // Start the transfer of the first part of the weight data
+ uint8_t *sdram = sdram_inputs.address;
+ uint32_t write_index = 0;
+ uint32_t read_index = 0;
+
+ // Start the first DMA
+ do_fast_dma_read(sdram, synaptic_contributions[write_index],
+ sdram_inputs.size_in_bytes);
+ write_index = !write_index;
+
+ for (uint32_t i = 0; i < sdram_inputs.n_synapse_cores; i++) {
+ // Wait for the last DMA to complete
+ wait_for_dma_to_complete();
+
+ // Start the next DMA if not finished
+ if (i + 1 < sdram_inputs.n_synapse_cores) {
+ sdram += sdram_inputs.size_in_bytes;
+ do_fast_dma_read(sdram, synaptic_contributions[write_index],
+ sdram_inputs.size_in_bytes);
+ write_index = !write_index;
+ }
+
+ // Add in the contributions from the last read item
+ sum(synaptic_contributions[read_index]);
+ read_index = !read_index;
+ }
+
+ neuron_transfer(all_synaptic_contributions.as_weight);
+
+ // Now do neuron time step update
+ neuron_do_timestep_update(time, timer_count);
+
+ uint32_t end_time = tc[T1_COUNT];
+ if (end_time > start_time) {
+ timer_overruns += 1;
+ }
+
+ profiler_write_entry_disable_irq_fiq(PROFILER_EXIT | PROFILER_TIMER);
+}
+
+//! \brief Initialises the model by reading in the regions and checking
+//! recording data.
+//! \return True if it successfully initialised, false otherwise
+static bool initialise(void) {
+ log_debug("Initialise: started");
+
+ data_specification_metadata_t *ds_regions;
+ if (!initialise_common_regions(
+ &timer_period, &simulation_ticks, &infinite_run, &time,
+ &recording_flags, store_provenance_data, timer_callback,
+ COMMON_REGIONS, COMMON_PRIORITIES, &ds_regions)) {
+ return false;
+ }
+
+ // Setup neurons
+ uint32_t n_rec_regions_used;
+ if (!initialise_neuron_regions(
+ ds_regions, NEURON_REGIONS, &n_rec_regions_used)) {
+ return false;
+ }
+
+ // Setup for reading synaptic inputs at start of each time step
+ struct sdram_config * sdram_config = data_specification_get_region(
+ SDRAM_PARAMS_REGION, ds_regions);
+ spin1_memcpy(&sdram_inputs, sdram_config, sizeof(struct sdram_config));
+
+ log_info("Transferring ring buffers from 0x%08x for %d neurons (%d bits) "
+ "and %d synapse types from %d cores using %d bytes per core",
+ sdram_inputs.address, sdram_inputs.n_neurons,
+ sdram_inputs.synapse_index_bits, sdram_inputs.n_synapse_types,
+ sdram_inputs.n_synapse_cores, sdram_inputs.size_in_bytes);
+
+ uint32_t n_words = sdram_inputs.size_in_bytes >> 2;
+ for (uint32_t i = 0; i < N_SYNAPTIC_BUFFERS; i++) {
+ synaptic_contributions[i] = spin1_malloc(sdram_inputs.size_in_bytes);
+ if (synaptic_contributions == NULL) {
+ log_error("Could not allocate %d bytes for synaptic contributions %d",
+ sdram_inputs.size_in_bytes, i);
+ return false;
+ }
+ for (uint32_t j = 0; j < n_words; j++) {
+ synaptic_contributions[i][j] = 0;
+ }
+ }
+ all_synaptic_contributions.as_int = spin1_malloc(sdram_inputs.size_in_bytes);
+ if (all_synaptic_contributions.as_int == NULL) {
+ log_error("Could not allocate %d bytes for all synaptic contributions",
+ sdram_inputs.size_in_bytes);
+ return false;
+ }
+ for (uint32_t j = 0; j < n_words; j++) {
+ all_synaptic_contributions.as_int[j] = 0;
+ }
+ uint32_t *sdram_word = (void *) sdram_inputs.address;
+ for (uint32_t i = 0; i < sdram_inputs.n_synapse_cores; i++) {
+ for (uint32_t j = 0; j < n_words; j++) {
+ *(sdram_word++) = 0;
+ }
+ }
+ // Set timer tick (in microseconds)
+ log_debug("setting timer tick callback for %d microseconds", timer_period);
+ spin1_set_timer_tick(timer_period);
+
+ log_debug("Initialise: finished");
+ return true;
+}
+
+//! \brief The entry point for this model.
+void c_main(void) {
+
+ // Start the time at "-1" so that the first tick will be 0
+ time = UINT32_MAX;
+
+ // initialise the model
+ if (!initialise()) {
+ rt_error(RTE_API);
+ }
+
+ simulation_run();
+}
diff --git a/neural_modelling/src/neuron/c_main_synapse_common.h b/neural_modelling/src/neuron/c_main_synapse_common.h
new file mode 100644
index 0000000000..37bea7c51c
--- /dev/null
+++ b/neural_modelling/src/neuron/c_main_synapse_common.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2017-2020 The University of Manchester
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#include
+#include
+
+#include
+#include
+#include "synapses.h"
+#include "population_table/population_table.h"
+#include "plasticity/synapse_dynamics.h"
+#include "structural_plasticity/synaptogenesis_dynamics.h"
+#include "direct_synapses.h"
+
+//! The region IDs used by synapse processing
+struct synapse_regions {
+ //! The parameters of the synapse processing
+ uint32_t synapse_params;
+ //! The direct or single matrix to be copied to DTCM
+ uint32_t direct_matrix;
+ //! The table to map from keys to memory addresses
+ uint32_t pop_table;
+ //! The SDRAM-based matrix of source spikes to target neurons
+ uint32_t synaptic_matrix;
+ //! Configuration for STDP
+ uint32_t synapse_dynamics;
+ //! Configuration for structural plasticity
+ uint32_t structural_dynamics;
+ //! The filters to avoid DMA transfers of empty rows
+ uint32_t bitfield_filter;
+};
+
+//! The provenance information for synaptic processing
+struct synapse_provenance {
+ //! A count of presynaptic events.
+ uint32_t n_pre_synaptic_events;
+ //! A count of synaptic saturations.
+ uint32_t n_synaptic_weight_saturations;
+ //! The number of STDP weight saturations.
+ uint32_t n_plastic_synaptic_weight_saturations;
+ //! The number of population table searches that had no match
+ uint32_t n_ghost_pop_table_searches;
+ //! The number of bit field reads that couldn't be read in due to DTCM limits
+ uint32_t n_failed_bitfield_reads;
+ //! The number of population table searches that found an "invalid" entry
+ uint32_t n_invalid_master_pop_table_hits;
+ //! The number of spikes that a bit field filtered, stopping a DMA
+ uint32_t n_filtered_by_bitfield;
+};
+
+//! \brief Callback to store synapse provenance data (format: synapse_provenance).
+//! \param[out] prov: The data structure to store the provenance data in
+static inline void store_synapse_provenance(struct synapse_provenance *prov) {
+
+ // store the data into the provenance data region
+ prov->n_pre_synaptic_events = synapses_get_pre_synaptic_events();
+ prov->n_synaptic_weight_saturations = synapses_saturation_count;
+ prov->n_plastic_synaptic_weight_saturations =
+ synapse_dynamics_get_plastic_saturation_count();
+ prov->n_ghost_pop_table_searches = ghost_pop_table_searches;
+ prov->n_failed_bitfield_reads = failed_bit_field_reads;
+ prov->n_invalid_master_pop_table_hits = invalid_master_pop_hits;
+ prov->n_filtered_by_bitfield = bit_field_filtered_packets;
+}
+
+//! \brief Read data to set up synapse processing
+//! \param[in] ds_regions: Pointer to region position data
+//! \param[in] regions: The indices of the regions to be read
+//! \param[out] ring_buffers: The ring buffers that will be used
+//! \param[out] row_max_n_words: Pointer to receive the maximum number of words
+//! in a synaptic row
+//! \param[out] incoming_spike_buffer_size: Pointer to receive the size to make
+//! the spike input buffer
+//! \param[out] clear_input_buffer_of_late_packets: Pointer to receive whether
+//! to clear the input buffer
+//! each time step
+//! \param[in/out] n_recording_regions_used: Pointer to variable which starts
+//! as the next recording region to use
+//! and is updated with regions used here
+//! \return a boolean indicating success (True) or failure (False)
+static inline bool initialise_synapse_regions(
+ data_specification_metadata_t *ds_regions,
+ struct synapse_regions regions, weight_t **ring_buffers,
+ uint32_t *row_max_n_words,
+ uint32_t *incoming_spike_buffer_size,
+ bool *clear_input_buffer_of_late_packets,
+ uint32_t *n_recording_regions_used) {
+ // Set up the synapses
+ uint32_t *ring_buffer_to_input_buffer_left_shifts;
+ uint32_t n_neurons;
+ uint32_t n_synapse_types;
+ if (!synapses_initialise(
+ data_specification_get_region(regions.synapse_params, ds_regions),
+ &n_neurons, &n_synapse_types, ring_buffers,
+ &ring_buffer_to_input_buffer_left_shifts,
+ clear_input_buffer_of_late_packets,
+ incoming_spike_buffer_size)) {
+ return false;
+ }
+
+ // set up direct synapses
+ address_t direct_synapses_address;
+ if (!direct_synapses_initialise(
+ data_specification_get_region(regions.direct_matrix, ds_regions),
+ &direct_synapses_address)) {
+ return false;
+ }
+
+ // Set up the population table
+ if (!population_table_initialise(
+ data_specification_get_region(regions.pop_table, ds_regions),
+ data_specification_get_region(regions.synaptic_matrix, ds_regions),
+ direct_synapses_address, row_max_n_words)) {
+ return false;
+ }
+ // Set up the synapse dynamics
+ if (!synapse_dynamics_initialise(
+ data_specification_get_region(regions.synapse_dynamics, ds_regions),
+ n_neurons, n_synapse_types, ring_buffer_to_input_buffer_left_shifts)) {
+ return false;
+ }
+
+ // Set up structural plasticity dynamics
+ if (!synaptogenesis_dynamics_initialise(data_specification_get_region(
+ regions.structural_dynamics, ds_regions), n_recording_regions_used)) {
+ return false;
+ }
+
+ return true;
+}
diff --git a/neural_modelling/src/neuron/c_main_synapses.c b/neural_modelling/src/neuron/c_main_synapses.c
new file mode 100644
index 0000000000..19e859a036
--- /dev/null
+++ b/neural_modelling/src/neuron/c_main_synapses.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2017-2019 The University of Manchester
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+/*!
+ * @dir
+ * @brief Implementation of simulator for a single neural population on a
+ * SpiNNaker CPU core. Or rather of a slice of a population.
+ *
+ * @file
+ * @brief This file contains the main function of the application framework,
+ * which the application programmer uses to configure and run applications.
+ *
+ * This is the main entrance class for most of the neural models. The following
+ * Figure shows how all of the c code
+ * interacts with each other and what classes
+ * are used to represent over arching logic
+ * (such as plasticity, spike processing, utilities, synapse types, models)
+ *
+ * @image html spynnaker_c_code_flow.png
+ */
+
+#include "c_main_synapse_common.h"
+#include "c_main_common.h"
+#include "spike_processing_fast.h"
+#include "structural_plasticity/synaptogenesis_dynamics.h"
+#include
+
+//! values for the priority for each callback
+typedef enum callback_priorities {
+ MC = -1, DMA = -2, TIMER = 0, SDP = 0
+} callback_priorities;
+
+//! Provenance data region layout
+struct provenance_data {
+ struct synapse_provenance synapse_prov;
+ struct spike_processing_fast_provenance spike_processing_prov;
+};
+
+//! Overall regions used by the synapse core
+enum regions {
+ SYSTEM_REGION,
+ PROVENANCE_DATA_REGION,
+ PROFILER_REGION,
+ RECORDING_REGION,
+ SYNAPSE_PARAMS_REGION,
+ DIRECT_MATRIX_REGION,
+ SYNAPTIC_MATRIX_REGION,
+ POPULATION_TABLE_REGION,
+ SYNAPSE_DYNAMICS_REGION,
+ STRUCTURAL_DYNAMICS_REGION,
+ BIT_FIELD_FILTER_REGION,
+ SDRAM_PARAMS_REGION,
+ KEY_REGION
+};
+
+//! From the regions, select those that are common
+const struct common_regions COMMON_REGIONS = {
+ .system = SYSTEM_REGION,
+ .provenance = PROVENANCE_DATA_REGION,
+ .profiler = PROFILER_REGION,
+ .recording = RECORDING_REGION
+};
+
+//! Identify the priority of common tasks
+const struct common_priorities COMMON_PRIORITIES = {
+ .sdp = SDP,
+ .dma = DMA,
+ .timer = TIMER
+};
+
+//! From the regions, select those that are used for synapse-specific things
+const struct synapse_regions SYNAPSE_REGIONS = {
+ .synapse_params = SYNAPSE_PARAMS_REGION,
+ .direct_matrix = DIRECT_MATRIX_REGION,
+ .synaptic_matrix = SYNAPTIC_MATRIX_REGION,
+ .pop_table = POPULATION_TABLE_REGION,
+ .synapse_dynamics = SYNAPSE_DYNAMICS_REGION,
+ .structural_dynamics = STRUCTURAL_DYNAMICS_REGION,
+ .bitfield_filter = BIT_FIELD_FILTER_REGION
+};
+
+//! The current timer tick value.
+// the timer tick callback returning the same value.
+uint32_t time;
+
+//! timer tick period (in microseconds)
+static uint32_t timer_period;
+
+//! The number of timer ticks to run for before being expected to exit
+static uint32_t simulation_ticks = 0;
+
+//! Determines if this model should run for infinite time
+static uint32_t infinite_run;
+
+//! The recording flags indicating if anything is recording
+static uint32_t recording_flags = 0;
+
+//! \brief Callback to store provenance data (format: neuron_provenance).
+//! \param[out] provenance_region: Where to write the provenance data
+static void store_provenance_data(address_t provenance_region) {
+ struct provenance_data *prov = (void *) provenance_region;
+ store_synapse_provenance(&prov->synapse_prov);
+ spike_processing_fast_store_provenance(&prov->spike_processing_prov);
+}
+
+//! \brief the function to call when resuming a simulation
+void resume_callback(void) {
+
+ // Reset recording
+ recording_reset();
+
+ // Resume synapses
+ // NOTE: at reset, time is set to UINT_MAX ahead of timer_callback(...)
+ synapses_resume(time + 1);
+}
+
+void timer_callback(UNUSED uint unused0, UNUSED uint unused1) {
+ time++;
+ if (simulation_is_finished()) {
+ // Enter pause and resume state to avoid another tick
+ simulation_handle_pause_resume(resume_callback);
+
+ // Pause common functions
+ common_pause(recording_flags);
+
+ simulation_ready_to_read();
+ return;
+ }
+
+ uint32_t n_rewires = synaptogenesis_n_updates();
+ spike_processing_fast_time_step_loop(time, n_rewires);
+}
+
+//! \brief Initialises the model by reading in the regions and checking
+//! recording data.
+//! \return True if it successfully initialised, false otherwise
+static bool initialise(void) {
+ log_debug("Initialise: started");
+
+ data_specification_metadata_t *ds_regions;
+ if (!initialise_common_regions(
+ &timer_period, &simulation_ticks, &infinite_run, &time,
+ &recording_flags, store_provenance_data, timer_callback,
+ COMMON_REGIONS, COMMON_PRIORITIES, &ds_regions)) {
+ return false;
+ }
+
+ // Setup synapses
+ uint32_t incoming_spike_buffer_size;
+ uint32_t row_max_n_words;
+ bool clear_input_buffer_of_late_packets;
+ weight_t *ring_buffers;
+ uint32_t n_rec_regions_used = 0;
+ if (!initialise_synapse_regions(
+ ds_regions, SYNAPSE_REGIONS, &ring_buffers, &row_max_n_words,
+ &incoming_spike_buffer_size,
+ &clear_input_buffer_of_late_packets, &n_rec_regions_used)) {
+ return false;
+ }
+
+ // Setup for writing synaptic inputs at the end of each run
+ struct sdram_config *sdram_config = data_specification_get_region(
+ SDRAM_PARAMS_REGION, ds_regions);
+ struct key_config *key_config = data_specification_get_region(
+ KEY_REGION, ds_regions);
+
+ if (!spike_processing_fast_initialise(
+ row_max_n_words, incoming_spike_buffer_size,
+ clear_input_buffer_of_late_packets, n_rec_regions_used, MC,
+ *sdram_config, *key_config, ring_buffers)) {
+ return false;
+ }
+
+ // Do bitfield configuration last to only use any unused memory
+ if (!population_table_load_bitfields(data_specification_get_region(
+ SYNAPSE_REGIONS.bitfield_filter, ds_regions))) {
+ return false;
+ }
+
+ // Set timer tick (in microseconds)
+ log_debug("setting timer tick callback for %d microseconds", timer_period);
+ spin1_set_timer_tick(timer_period);
+
+ recording_reset();
+
+ log_debug("Initialise: finished");
+ return true;
+}
+
+//! \brief The entry point for this model.
+void c_main(void) {
+
+ // initialise the model
+ if (!initialise()) {
+ rt_error(RTE_API);
+ }
+
+ simulation_run();
+}
diff --git a/neural_modelling/src/neuron/dma_common.h b/neural_modelling/src/neuron/dma_common.h
new file mode 100644
index 0000000000..8876afd6d8
--- /dev/null
+++ b/neural_modelling/src/neuron/dma_common.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2020 The University of Manchester
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+#ifndef _DMA_COMMON_H_
+#define _DMA_COMMON_H_
+#include
+#include
+#include
+
+//! Value of the masked DMA status register when transfer is complete
+#define DMA_COMPLETE 0x400
+
+//! Mask to apply to the DMA status register to check for completion
+#define DMA_CHECK_MASK 0x401
+
+//! DMA write flags
+static const uint32_t DMA_WRITE_FLAGS =
+ DMA_WIDTH << 24 | DMA_BURST_SIZE << 21 | DMA_WRITE << 19;
+
+//! DMA read flags
+static const uint32_t DMA_READ_FLAGS =
+ DMA_WIDTH << 24 | DMA_BURST_SIZE << 21 | DMA_READ << 19;
+
+//! \brief Is there a DMA currently running?
+//! \return True if there is something transferring now.
+static inline bool dma_done(void) {
+ return (dma[DMA_STAT] & DMA_CHECK_MASK) == DMA_COMPLETE;
+}
+
+//! \brief Start the DMA doing a write; the write may not be finished at the
+//! end of this call.
+//! \param[in] tcm_address: The local DTCM address to read the data from
+//! \param[in] system_address: The SDRAM address to write the data to
+//! \param[in] n_bytes: The number of bytes to be written from DTCM to SDRAM
+static inline void do_fast_dma_write(void *tcm_address, void *system_address,
+ uint32_t n_bytes) {
+#if LOG_LEVEL >= LOG_DEBUG
+ // Useful for checking when things are going wrong, but shouldn't be
+ // needed in normal code
+ uint32_t stat = dma[DMA_STAT];
+ if (stat & 0x1FFFFF) {
+ log_error("DMA pending or in progress on write: 0x%08x", stat);
+ rt_error(RTE_SWERR);
+ }
+#endif
+ uint32_t desc = DMA_WRITE_FLAGS | n_bytes;
+ dma[DMA_ADRS] = (uint32_t) system_address;
+ dma[DMA_ADRT] = (uint32_t) tcm_address;
+ dma[DMA_DESC] = desc;
+}
+
+//! \brief Start the DMA doing a read; the read may not be finished at the end
+//! of this call.
+//! \param[in] system_address: The SDRAM address to read the data from
+//! \param[in] tcm_address: The DTCM address to write the data to
+//! \param[in] n_bytes: The number of bytes to be read from SDRAM to DTCM
+static inline void do_fast_dma_read(void *system_address, void *tcm_address,
+ uint32_t n_bytes) {
+#if LOG_LEVEL >= LOG_DEBUG
+ // Useful for checking when things are going wrong, but shouldn't be
+ // needed in normal code
+ uint32_t stat = dma[DMA_STAT];
+ if (stat & 0x1FFFFF) {
+ log_error("DMA pending or in progress on read: 0x%08x", stat);
+ rt_error(RTE_SWERR);
+ }
+#endif
+ uint32_t desc = DMA_READ_FLAGS | n_bytes;
+ dma[DMA_ADRS] = (uint32_t) system_address;
+ dma[DMA_ADRT] = (uint32_t) tcm_address;
+ dma[DMA_DESC] = desc;
+}
+
+//! \brief Wait for a DMA transfer to complete.
+static inline void wait_for_dma_to_complete(void) {
+#if LOG_LEVEL >= LOG_DEBUG
+ // Useful for checking when things are going wrong, but shouldn't be
+ // needed in normal code
+ uint32_t n_loops = 0;
+ while (!dma_done() && n_loops < 10000) {
+ n_loops++;
+ }
+ if (!dma_done()) {
+ log_error("Timeout on DMA loop: DMA stat = 0x%08x!", dma[DMA_STAT]);
+ rt_error(RTE_SWERR);
+ }
+#else
+ // This is the normal loop, done without checking
+ while (!dma_done()) {
+ continue;
+ }
+#endif
+ dma[DMA_CTRL] = 0x8;
+}
+
+
+//! \brief Cancel any outstanding DMA transfers
+static inline void cancel_dmas(void) {
+ dma[DMA_CTRL] = 0x3F;
+ while (dma[DMA_STAT] & 0x1) {
+ continue;
+ }
+ dma[DMA_CTRL] = 0xD;
+ while (dma[DMA_CTRL] & 0xD) {
+ continue;
+ }
+}
+
+#endif
diff --git a/neural_modelling/src/neuron/implementations/neuron_impl.h b/neural_modelling/src/neuron/implementations/neuron_impl.h
index 488008996a..1154858197 100644
--- a/neural_modelling/src/neuron/implementations/neuron_impl.h
+++ b/neural_modelling/src/neuron/implementations/neuron_impl.h
@@ -23,6 +23,7 @@
#define _NEURON_IMPL_H_
#include
+#include
//! \brief Initialise the particular implementation of the data
//! \param[in] n_neurons: The number of neurons
@@ -46,10 +47,10 @@ static void neuron_impl_load_neuron_parameters(
//! \brief Do the timestep update for the particular implementation
//! \param[in] neuron_index: The index of the neuron to update
+//! \param[in] time: The time step of the update
//! \param[in] external_bias: External input to be applied to the neuron
-//! \return True if a spike has occurred
-static bool neuron_impl_do_timestep_update(
- index_t neuron_index, input_t external_bias);
+static void neuron_impl_do_timestep_update(
+ uint32_t timer_count, uint32_t time, uint32_t n_neurons);
//! \brief Stores neuron parameters back into SDRAM
//! \param[out] address: the address in SDRAM to start the store
diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_external_devices.h b/neural_modelling/src/neuron/implementations/neuron_impl_external_devices.h
index bdeb452581..ae39afcd41 100644
--- a/neural_modelling/src/neuron/implementations/neuron_impl_external_devices.h
+++ b/neural_modelling/src/neuron/implementations/neuron_impl_external_devices.h
@@ -110,9 +110,6 @@ static synapse_param_t *neuron_synapse_shaping_params;
//! The number of steps to run per timestep
static uint n_steps_per_timestep;
-//! setup from c_main
-extern uint global_timer_count;
-
#ifndef SOMETIMES_UNUSED
#define SOMETIMES_UNUSED __attribute__((unused))
#endif // !SOMETIMES_UNUSED
@@ -334,131 +331,131 @@ static bool _test_will_fire(packet_firing_data_t *packet_firing) {
SOMETIMES_UNUSED // Marked unused as only used sometimes
//! \brief Do the timestep update for the particular implementation
//! \param[in] neuron_index: The index of the neuron to update
+//! \param[in] time: The time step of the update
//! \param[in] external_bias: External input to be applied to the neuron
-//! \return True if a spike has occurred
-static bool neuron_impl_do_timestep_update(index_t neuron_index,
- input_t external_bias) {
- // Get the neuron itself
- neuron_t *this_neuron = &neuron_array[neuron_index];
-
- // Get the input_type parameters and voltage for this neuron
- input_type_t *input_types = &input_type_array[neuron_index];
-
- // Get threshold and additional input parameters for this neuron
- packet_firing_data_t *the_packet_firing =
- &packet_firing_array[neuron_index];
- additional_input_t *additional_inputs =
- &additional_input_array[neuron_index];
- synapse_param_t *the_synapse_type =
- &neuron_synapse_shaping_params[neuron_index];
-
- // Store whether the neuron has spiked
- bool will_fire = false;
-
- // Loop however many times requested; do this in reverse for efficiency,
- // and because the index doesn't actually matter
- for (uint32_t i = n_steps_per_timestep; i > 0; i--) {
- // Get the voltage
- state_t soma_voltage = neuron_model_get_membrane_voltage(this_neuron);
-
- // Get the exc and inh values from the synapses
- input_t exc_values[NUM_EXCITATORY_RECEPTORS];
- input_t *exc_syn_values =
- synapse_types_get_excitatory_input(exc_values, the_synapse_type);
- input_t inh_values[NUM_INHIBITORY_RECEPTORS];
- input_t *inh_syn_values =
- synapse_types_get_inhibitory_input(inh_values, the_synapse_type);
-
- // Call functions to obtain exc_input and inh_input
- input_t *exc_input_values = input_type_get_input_value(
- exc_syn_values, input_types, NUM_EXCITATORY_RECEPTORS);
- input_t *inh_input_values = input_type_get_input_value(
- inh_syn_values, input_types, NUM_INHIBITORY_RECEPTORS);
-
- // Sum g_syn contributions from all receptors for recording
- REAL total_exc = 0;
- REAL total_inh = 0;
-
- for (int i = 0; i < NUM_EXCITATORY_RECEPTORS; i++) {
- total_exc += exc_input_values[i];
- }
- for (int i = 0; i < NUM_INHIBITORY_RECEPTORS; i++) {
- total_inh += inh_input_values[i];
- }
-
- // Do recording if on the first step
- if (i == n_steps_per_timestep) {
- neuron_recording_record_accum(
- V_RECORDING_INDEX, neuron_index, soma_voltage);
- neuron_recording_record_accum(
- GSYN_EXC_RECORDING_INDEX, neuron_index, total_exc);
- neuron_recording_record_accum(
- GSYN_INH_RECORDING_INDEX, neuron_index, total_inh);
- }
-
- // Call functions to convert exc_input and inh_input to current
- input_type_convert_excitatory_input_to_current(
- exc_input_values, input_types, soma_voltage);
- input_type_convert_inhibitory_input_to_current(
- inh_input_values, input_types, soma_voltage);
-
- external_bias += additional_input_get_input_value_as_current(
- additional_inputs, soma_voltage);
-
- // update neuron parameters
- state_t result = neuron_model_state_update(
- NUM_EXCITATORY_RECEPTORS, exc_input_values,
- NUM_INHIBITORY_RECEPTORS, inh_input_values,
- external_bias, this_neuron);
+static void neuron_impl_do_timestep_update(
+ uint32_t timer_count, UNUSED uint32_t time, uint32_t n_neurons) {
+
+ for (uint32_t neuron_index = 0; neuron_index < n_neurons; neuron_index++) {
+ // Get the neuron itself
+ neuron_t *this_neuron = &neuron_array[neuron_index];
+
+ // Get the input_type parameters and voltage for this neuron
+ input_type_t *input_types = &input_type_array[neuron_index];
+
+ // Get threshold and additional input parameters for this neuron
+ packet_firing_data_t *the_packet_firing =
+ &packet_firing_array[neuron_index];
+ additional_input_t *additional_inputs =
+ &additional_input_array[neuron_index];
+ synapse_param_t *the_synapse_type =
+ &neuron_synapse_shaping_params[neuron_index];
+
+ // Store whether the neuron has spiked
+ bool will_fire = false;
+
+ // Loop however many times requested; do this in reverse for efficiency,
+ // and because the index doesn't actually matter
+ for (uint32_t i = n_steps_per_timestep; i > 0; i--) {
+ // Get the voltage
+ state_t soma_voltage = neuron_model_get_membrane_voltage(this_neuron);
+
+ // Get the exc and inh values from the synapses
+ input_t exc_values[NUM_EXCITATORY_RECEPTORS];
+ input_t *exc_syn_values =
+ synapse_types_get_excitatory_input(exc_values, the_synapse_type);
+ input_t inh_values[NUM_INHIBITORY_RECEPTORS];
+ input_t *inh_syn_values =
+ synapse_types_get_inhibitory_input(inh_values, the_synapse_type);
+
+ // Call functions to obtain exc_input and inh_input
+ input_t *exc_input_values = input_type_get_input_value(
+ exc_syn_values, input_types, NUM_EXCITATORY_RECEPTORS);
+ input_t *inh_input_values = input_type_get_input_value(
+ inh_syn_values, input_types, NUM_INHIBITORY_RECEPTORS);
+
+ // Sum g_syn contributions from all receptors for recording
+ REAL total_exc = 0;
+ REAL total_inh = 0;
+
+ for (int i = 0; i < NUM_EXCITATORY_RECEPTORS; i++) {
+ total_exc += exc_input_values[i];
+ }
+ for (int i = 0; i < NUM_INHIBITORY_RECEPTORS; i++) {
+ total_inh += inh_input_values[i];
+ }
- // determine if a packet should fly
- will_fire = _test_will_fire(the_packet_firing);
+ // Do recording if on the first step
+ if (i == n_steps_per_timestep) {
+ neuron_recording_record_accum(
+ V_RECORDING_INDEX, neuron_index, soma_voltage);
+ neuron_recording_record_accum(
+ GSYN_EXC_RECORDING_INDEX, neuron_index, total_exc);
+ neuron_recording_record_accum(
+ GSYN_INH_RECORDING_INDEX, neuron_index, total_inh);
+ }
- // If spike occurs, communicate to relevant parts of model
- if (will_fire) {
- if (the_packet_firing->value_as_payload) {
- accum value_to_send = result;
- if (result > the_packet_firing->max_value) {
- value_to_send = the_packet_firing->max_value;
+ // Call functions to convert exc_input and inh_input to current
+ input_type_convert_excitatory_input_to_current(
+ exc_input_values, input_types, soma_voltage);
+ input_type_convert_inhibitory_input_to_current(
+ inh_input_values, input_types, soma_voltage);
+
+ uint32_t external_bias = additional_input_get_input_value_as_current(
+ additional_inputs, soma_voltage);
+
+ // update neuron parameters
+ state_t result = neuron_model_state_update(
+ NUM_EXCITATORY_RECEPTORS, exc_input_values,
+ NUM_INHIBITORY_RECEPTORS, inh_input_values,
+ external_bias, this_neuron);
+
+ // determine if a packet should fly
+ will_fire = _test_will_fire(the_packet_firing);
+
+ // If spike occurs, communicate to relevant parts of model
+ if (will_fire) {
+ if (the_packet_firing->value_as_payload) {
+ accum value_to_send = result;
+ if (result > the_packet_firing->max_value) {
+ value_to_send = the_packet_firing->max_value;
+ }
+ if (result < the_packet_firing->min_value) {
+ value_to_send = the_packet_firing->min_value;
+ }
+
+ uint payload = _get_payload(
+ the_packet_firing->type,
+ value_to_send * the_packet_firing->value_as_payload);
+
+ log_debug("Sending key=0x%08x payload=0x%08x",
+ the_packet_firing->key, payload);
+
+ tdma_processing_send_packet(
+ the_packet_firing->key, payload,
+ WITH_PAYLOAD, timer_count);
+ } else {
+ log_debug("Sending key=0x%08x", the_packet_firing->key);
+
+ tdma_processing_send_packet(
+ the_packet_firing->key, 0,
+ NO_PAYLOAD, timer_count);
}
- if (result < the_packet_firing->min_value) {
- value_to_send = the_packet_firing->min_value;
- }
-
- uint payload = _get_payload(
- the_packet_firing->type,
- value_to_send * the_packet_firing->value_as_payload);
-
- log_debug("Sending key=0x%08x payload=0x%08x",
- the_packet_firing->key, payload);
-
- tdma_processing_send_packet(
- the_packet_firing->key, payload,
- WITH_PAYLOAD, global_timer_count);
- } else {
- log_debug("Sending key=0x%08x", the_packet_firing->key);
-
- tdma_processing_send_packet(
- the_packet_firing->key, 0,
- NO_PAYLOAD, global_timer_count);
}
+
+ // Shape the existing input according to the included rule
+ synapse_types_shape_input(the_synapse_type);
}
- // Shape the existing input according to the included rule
- synapse_types_shape_input(the_synapse_type);
- }
+ if (will_fire) {
+ // Record the spike
+ neuron_recording_record_bit(PACKET_RECORDING_BITFIELD, neuron_index);
+ }
- if (will_fire) {
- // Record the spike
- neuron_recording_record_bit(PACKET_RECORDING_BITFIELD, neuron_index);
+ #if LOG_LEVEL >= LOG_DEBUG
+ neuron_model_print_state_variables(this_neuron);
+ #endif // LOG_LEVEL >= LOG_DEBUG
}
-
-#if LOG_LEVEL >= LOG_DEBUG
- neuron_model_print_state_variables(this_neuron);
-#endif // LOG_LEVEL >= LOG_DEBUG
-
- // Return the boolean to the model timestep update
- return false;
}
SOMETIMES_UNUSED // Marked unused as only used sometimes
diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_standard.h b/neural_modelling/src/neuron/implementations/neuron_impl_standard.h
index 78b8d7c9ab..5ed625694a 100644
--- a/neural_modelling/src/neuron/implementations/neuron_impl_standard.h
+++ b/neural_modelling/src/neuron/implementations/neuron_impl_standard.h
@@ -245,115 +245,109 @@ static void neuron_impl_load_neuron_parameters(
}
SOMETIMES_UNUSED // Marked unused as only used sometimes
-//! \brief Do the timestep update for the particular implementation
-//! \param[in] neuron_index: The index of the neuron to update
-//! \param[in] external_bias: External input to be applied to the neuron
-//! \return True if a spike has occurred
-static bool neuron_impl_do_timestep_update(index_t neuron_index,
- input_t external_bias) {
- // Get the neuron itself
- neuron_t *this_neuron = &neuron_array[neuron_index];
-
- // Get the input_type parameters and voltage for this neuron
- input_type_t *input_types = &input_type_array[neuron_index];
-
- // Get threshold and additional input parameters for this neuron
- threshold_type_t *the_threshold_type = &threshold_type_array[neuron_index];
- additional_input_t *additional_inputs =
- &additional_input_array[neuron_index];
- synapse_param_t *the_synapse_type =
- &neuron_synapse_shaping_params[neuron_index];
-
- // Store whether the neuron has spiked
- bool has_spiked = false;
-
- // Loop however many times requested; do this in reverse for efficiency,
- // and because the index doesn't actually matter
- for (uint32_t i_step = n_steps_per_timestep; i_step > 0; i_step--) {
- // Get the voltage
- state_t soma_voltage = neuron_model_get_membrane_voltage(this_neuron);
-
- // Get the exc and inh values from the synapses
- input_t exc_values[NUM_EXCITATORY_RECEPTORS];
- input_t *exc_syn_values =
- synapse_types_get_excitatory_input(exc_values, the_synapse_type);
- input_t inh_values[NUM_INHIBITORY_RECEPTORS];
- input_t *inh_syn_values =
- synapse_types_get_inhibitory_input(inh_values, the_synapse_type);
-
- // Call functions to obtain exc_input and inh_input
- input_t *exc_input_values = input_type_get_input_value(
- exc_syn_values, input_types, NUM_EXCITATORY_RECEPTORS);
- input_t *inh_input_values = input_type_get_input_value(
- inh_syn_values, input_types, NUM_INHIBITORY_RECEPTORS);
-
- // Sum g_syn contributions from all receptors for recording
- REAL total_exc = 0;
- REAL total_inh = 0;
-
- for (int i = 0; i < NUM_EXCITATORY_RECEPTORS; i++) {
- total_exc += exc_input_values[i];
- }
- for (int i = 0; i < NUM_INHIBITORY_RECEPTORS; i++) {
- total_inh += inh_input_values[i];
- }
+static void neuron_impl_do_timestep_update(
+ uint32_t timer_count, uint32_t time, uint32_t n_neurons) {
+
+ for (uint32_t neuron_index = 0; neuron_index < n_neurons; neuron_index++) {
+
+ // Get the neuron itself
+ neuron_t *this_neuron = &neuron_array[neuron_index];
+
+ // Get the input_type parameters and voltage for this neuron
+ input_type_t *input_types = &input_type_array[neuron_index];
+
+ // Get threshold and additional input parameters for this neuron
+ threshold_type_t *the_threshold_type = &threshold_type_array[neuron_index];
+ additional_input_t *additional_inputs =
+ &additional_input_array[neuron_index];
+ synapse_param_t *the_synapse_type =
+ &neuron_synapse_shaping_params[neuron_index];
+
+ // Loop however many times requested; do this in reverse for efficiency,
+ // and because the index doesn't actually matter
+ for (uint32_t i_step = n_steps_per_timestep; i_step > 0; i_step--) {
+ // Get the voltage
+ state_t soma_voltage = neuron_model_get_membrane_voltage(this_neuron);
+
+ // Get the exc and inh values from the synapses
+ input_t exc_values[NUM_EXCITATORY_RECEPTORS];
+ input_t *exc_syn_values =
+ synapse_types_get_excitatory_input(exc_values, the_synapse_type);
+ input_t inh_values[NUM_INHIBITORY_RECEPTORS];
+ input_t *inh_syn_values =
+ synapse_types_get_inhibitory_input(inh_values, the_synapse_type);
+
+ // Call functions to obtain exc_input and inh_input
+ input_t *exc_input_values = input_type_get_input_value(
+ exc_syn_values, input_types, NUM_EXCITATORY_RECEPTORS);
+ input_t *inh_input_values = input_type_get_input_value(
+ inh_syn_values, input_types, NUM_INHIBITORY_RECEPTORS);
+
+ // Sum g_syn contributions from all receptors for recording
+ REAL total_exc = 0;
+ REAL total_inh = 0;
+
+ for (int i = 0; i < NUM_EXCITATORY_RECEPTORS; i++) {
+ total_exc += exc_input_values[i];
+ }
+ for (int i = 0; i < NUM_INHIBITORY_RECEPTORS; i++) {
+ total_inh += inh_input_values[i];
+ }
- // Do recording if on the first step
- if (i_step == n_steps_per_timestep) {
- neuron_recording_record_accum(
- V_RECORDING_INDEX, neuron_index, soma_voltage);
- neuron_recording_record_accum(
- GSYN_EXC_RECORDING_INDEX, neuron_index, total_exc);
- neuron_recording_record_accum(
- GSYN_INH_RECORDING_INDEX, neuron_index, total_inh);
- }
+ // Do recording if on the first step
+ if (i_step == n_steps_per_timestep) {
+ neuron_recording_record_accum(
+ V_RECORDING_INDEX, neuron_index, soma_voltage);
+ neuron_recording_record_accum(
+ GSYN_EXC_RECORDING_INDEX, neuron_index, total_exc);
+ neuron_recording_record_accum(
+ GSYN_INH_RECORDING_INDEX, neuron_index, total_inh);
+ }
- // Call functions to convert exc_input and inh_input to current
- input_type_convert_excitatory_input_to_current(
- exc_input_values, input_types, soma_voltage);
- input_type_convert_inhibitory_input_to_current(
- inh_input_values, input_types, soma_voltage);
+ // Call functions to convert exc_input and inh_input to current
+ input_type_convert_excitatory_input_to_current(
+ exc_input_values, input_types, soma_voltage);
+ input_type_convert_inhibitory_input_to_current(
+ inh_input_values, input_types, soma_voltage);
- external_bias += additional_input_get_input_value_as_current(
- additional_inputs, soma_voltage);
+ input_t external_bias = additional_input_get_input_value_as_current(
+ additional_inputs, soma_voltage);
- // update neuron parameters
- state_t result = neuron_model_state_update(
- NUM_EXCITATORY_RECEPTORS, exc_input_values,
- NUM_INHIBITORY_RECEPTORS, inh_input_values,
- external_bias, this_neuron);
+ // update neuron parameters
+ state_t result = neuron_model_state_update(
+ NUM_EXCITATORY_RECEPTORS, exc_input_values,
+ NUM_INHIBITORY_RECEPTORS, inh_input_values,
+ external_bias, this_neuron);
- // determine if a spike should occur
- bool spike_now =
- threshold_type_is_above_threshold(result, the_threshold_type);
+ // determine if a spike should occur
+ bool spike_now =
+ threshold_type_is_above_threshold(result, the_threshold_type);
- // If spike occurs, communicate to relevant parts of model
- if (spike_now) {
- has_spiked = true;
+ // If spike occurs, communicate to relevant parts of model
+ if (spike_now) {
- // Call relevant model-based functions
- // Tell the neuron model
- neuron_model_has_spiked(this_neuron);
+ // Call relevant model-based functions
+ // Tell the neuron model
+ neuron_model_has_spiked(this_neuron);
- // Tell the additional input
- additional_input_has_spiked(additional_inputs);
- }
+ // Tell the additional input
+ additional_input_has_spiked(additional_inputs);
- // Shape the existing input according to the included rule
- synapse_types_shape_input(the_synapse_type);
- }
+ // Record the spike
+ neuron_recording_record_bit(SPIKE_RECORDING_BITFIELD, neuron_index);
- if (has_spiked) {
- // Record the spike
- neuron_recording_record_bit(SPIKE_RECORDING_BITFIELD, neuron_index);
- }
+ // Send the spike
+ send_spike(timer_count, time, neuron_index);
+ }
-#if LOG_LEVEL >= LOG_DEBUG
- neuron_model_print_state_variables(this_neuron);
-#endif // LOG_LEVEL >= LOG_DEBUG
+ // Shape the existing input according to the included rule
+ synapse_types_shape_input(the_synapse_type);
+ }
- // Return the boolean to the model timestep update
- return has_spiked;
+ #if LOG_LEVEL >= LOG_DEBUG
+ neuron_model_print_state_variables(this_neuron);
+ #endif // LOG_LEVEL >= LOG_DEBUG
+ }
}
SOMETIMES_UNUSED // Marked unused as only used sometimes
diff --git a/neural_modelling/src/neuron/models/neuron_model.h b/neural_modelling/src/neuron/models/neuron_model.h
index 299dc81f5e..66dad82f47 100644
--- a/neural_modelling/src/neuron/models/neuron_model.h
+++ b/neural_modelling/src/neuron/models/neuron_model.h
@@ -39,11 +39,17 @@ typedef struct global_neuron_params_t global_neuron_params_t;
//! pointer to global neuron parameters
typedef global_neuron_params_t* global_neuron_params_pointer_t;
+#ifndef SOMETIMES_UNUSED
+#define SOMETIMES_UNUSED __attribute__((unused))
+#endif // !SOMETIMES_UNUSED
+
+SOMETIMES_UNUSED // Marked unused as only used sometimes
//! \brief set the global neuron parameters
//! \param[in] params: The parameters to set
void neuron_model_set_global_neuron_params(
const global_neuron_params_t *params);
+SOMETIMES_UNUSED // Marked unused as only used sometimes
//! \brief primary function called in timer loop after synaptic updates
//! \param[in] num_excitatory_inputs: Number of excitatory receptor types.
//! \param[in] exc_input: Pointer to array of inputs per receptor type received
@@ -59,22 +65,24 @@ void neuron_model_set_global_neuron_params(
//! contains all the parameters for a specific neuron
//! \return the value to be compared with a threshold value to determine if the
//! neuron has spiked
-state_t neuron_model_state_update(
+static state_t neuron_model_state_update(
uint16_t num_excitatory_inputs, const input_t *exc_input,
uint16_t num_inhibitory_inputs, const input_t *inh_input,
input_t external_bias, neuron_t *restrict neuron);
+SOMETIMES_UNUSED // Marked unused as only used sometimes
//! \brief Indicates that the neuron has spiked
//! \param[in, out] neuron pointer to a neuron parameter struct which contains
//! all the parameters for a specific neuron
-void neuron_model_has_spiked(neuron_t *restrict neuron);
+static void neuron_model_has_spiked(neuron_t *restrict neuron);
+SOMETIMES_UNUSED // Marked unused as only used sometimes
//! \brief get the neuron membrane voltage for a given neuron parameter set
//! \param[in] neuron: a pointer to a neuron parameter struct which contains
//! all the parameters for a specific neuron
//! \return the membrane voltage for a given neuron with the neuron
//! parameters specified in neuron
-state_t neuron_model_get_membrane_voltage(const neuron_t *neuron);
+static state_t neuron_model_get_membrane_voltage(const neuron_t *neuron);
//! \brief printout of state variables i.e. those values that might change
//! \param[in] neuron: a pointer to a neuron parameter struct which contains all
diff --git a/neural_modelling/src/neuron/models/neuron_model_izh_impl.c b/neural_modelling/src/neuron/models/neuron_model_izh_impl.c
index 480d023325..043873b91b 100644
--- a/neural_modelling/src/neuron/models/neuron_model_izh_impl.c
+++ b/neural_modelling/src/neuron/models/neuron_model_izh_impl.c
@@ -22,116 +22,13 @@
#include
//! The global parameters of the Izhekevich neuron model
-static const global_neuron_params_t *global_params;
-
-/*! \brief For linear membrane voltages, 1.5 is the correct value. However
- * with actual membrane voltage behaviour and tested over an wide range of
- * use cases 1.85 gives slightly better spike timings.
- */
-static const REAL SIMPLE_TQ_OFFSET = REAL_CONST(1.85);
-
-/////////////////////////////////////////////////////////////
-#if 0
-// definition for Izhikevich neuron
-static inline void neuron_ode(
- REAL t, REAL stateVar[], REAL dstateVar_dt[],
- neuron_t *neuron, REAL input_this_timestep) {
- REAL V_now = stateVar[1];
- REAL U_now = stateVar[2];
- log_debug(" sv1 %9.4k V %9.4k --- sv2 %9.4k U %9.4k\n", stateVar[1],
- neuron->V, stateVar[2], neuron->U);
-
- // Update V
- dstateVar_dt[1] =
- REAL_CONST(140.0)
- + (REAL_CONST(5.0) + REAL_CONST(0.0400) * V_now) * V_now - U_now
- + input_this_timestep;
-
- // Update U
- dstateVar_dt[2] = neuron->A * (neuron->B * V_now - U_now);
-}
-#endif
-
-//! \brief The original model uses 0.04, but this (1 ULP larger?) gives better
-//! numeric stability.
-//!
-//! Thanks to Mantas Mikaitis for this!
-static const REAL MAGIC_MULTIPLIER = REAL_CONST(0.040008544921875);
-
-/*!
- * \brief Midpoint is best balance between speed and accuracy so far.
- * \details From ODE solver comparison work, paper shows that Trapezoid version
- * gives better accuracy at small speed cost
- * \param[in] h: threshold
- * \param[in,out] neuron: The model being updated
- * \param[in] input_this_timestep: the input
- */
-static inline void rk2_kernel_midpoint(
- REAL h, neuron_t *neuron, REAL input_this_timestep) {
- // to match Mathematica names
- REAL lastV1 = neuron->V;
- REAL lastU1 = neuron->U;
- REAL a = neuron->A;
- REAL b = neuron->B;
-
- REAL pre_alph = REAL_CONST(140.0) + input_this_timestep - lastU1;
- REAL alpha = pre_alph
- + (REAL_CONST(5.0) + MAGIC_MULTIPLIER * lastV1) * lastV1;
- REAL eta = lastV1 + REAL_HALF(h * alpha);
-
- // could be represented as a long fract?
- REAL beta = REAL_HALF(h * (b * lastV1 - lastU1) * a);
-
- neuron->V += h * (pre_alph - beta
- + (REAL_CONST(5.0) + MAGIC_MULTIPLIER * eta) * eta);
-
- neuron->U += a * h * (-lastU1 - beta + b * eta);
-}
+const global_neuron_params_t *global_params;
void neuron_model_set_global_neuron_params(
const global_neuron_params_t *params) {
global_params = params;
}
-state_t neuron_model_state_update(
- uint16_t num_excitatory_inputs, const input_t *exc_input,
- uint16_t num_inhibitory_inputs, const input_t *inh_input,
- input_t external_bias, neuron_t *restrict neuron) {
- REAL total_exc = 0;
- REAL total_inh = 0;
-
- for (int i =0; iI_offset;
-
- // the best AR update so far
- rk2_kernel_midpoint(neuron->this_h, neuron, input_this_timestep);
- neuron->this_h = global_params->machine_timestep_ms;
-
- return neuron->V;
-}
-
-void neuron_model_has_spiked(neuron_t *restrict neuron) {
- // reset membrane voltage
- neuron->V = neuron->C;
-
- // offset 2nd state variable
- neuron->U += neuron->D;
-
- // simple threshold correction - next timestep (only) gets a bump
- neuron->this_h = global_params->machine_timestep_ms * SIMPLE_TQ_OFFSET;
-}
-
-state_t neuron_model_get_membrane_voltage(const neuron_t *neuron) {
- return neuron->V;
-}
-
void neuron_model_print_state_variables(const neuron_t *neuron) {
log_debug("V = %11.4k ", neuron->V);
log_debug("U = %11.4k ", neuron->U);
diff --git a/neural_modelling/src/neuron/models/neuron_model_izh_impl.h b/neural_modelling/src/neuron/models/neuron_model_izh_impl.h
index a18dea55f5..2a840d9623 100644
--- a/neural_modelling/src/neuron/models/neuron_model_izh_impl.h
+++ b/neural_modelling/src/neuron/models/neuron_model_izh_impl.h
@@ -46,4 +46,110 @@ typedef struct global_neuron_params_t {
REAL machine_timestep_ms;
} global_neuron_params_t;
+extern const global_neuron_params_t *global_params;
+
+/*! \brief For linear membrane voltages, 1.5 is the correct value. However
+ * with actual membrane voltage behaviour and tested over an wide range of
+ * use cases 1.85 gives slightly better spike timings.
+ */
+static const REAL SIMPLE_TQ_OFFSET = REAL_CONST(1.85);
+
+/////////////////////////////////////////////////////////////
+#if 0
+// definition for Izhikevich neuron
+static inline void neuron_ode(
+ REAL t, REAL stateVar[], REAL dstateVar_dt[],
+ neuron_t *neuron, REAL input_this_timestep) {
+ REAL V_now = stateVar[1];
+ REAL U_now = stateVar[2];
+ log_debug(" sv1 %9.4k V %9.4k --- sv2 %9.4k U %9.4k\n", stateVar[1],
+ neuron->V, stateVar[2], neuron->U);
+
+ // Update V
+ dstateVar_dt[1] =
+ REAL_CONST(140.0)
+ + (REAL_CONST(5.0) + REAL_CONST(0.0400) * V_now) * V_now - U_now
+ + input_this_timestep;
+
+ // Update U
+ dstateVar_dt[2] = neuron->A * (neuron->B * V_now - U_now);
+}
+#endif
+
+//! \brief The original model uses 0.04, but this (1 ULP larger?) gives better
+//! numeric stability.
+//!
+//! Thanks to Mantas Mikaitis for this!
+static const REAL MAGIC_MULTIPLIER = REAL_CONST(0.040008544921875);
+
+/*!
+ * \brief Midpoint is best balance between speed and accuracy so far.
+ * \details From ODE solver comparison work, paper shows that Trapezoid version
+ * gives better accuracy at small speed cost
+ * \param[in] h: threshold
+ * \param[in,out] neuron: The model being updated
+ * \param[in] input_this_timestep: the input
+ */
+static inline void rk2_kernel_midpoint(
+ REAL h, neuron_t *neuron, REAL input_this_timestep) {
+ // to match Mathematica names
+ REAL lastV1 = neuron->V;
+ REAL lastU1 = neuron->U;
+ REAL a = neuron->A;
+ REAL b = neuron->B;
+
+ REAL pre_alph = REAL_CONST(140.0) + input_this_timestep - lastU1;
+ REAL alpha = pre_alph
+ + (REAL_CONST(5.0) + MAGIC_MULTIPLIER * lastV1) * lastV1;
+ REAL eta = lastV1 + REAL_HALF(h * alpha);
+
+ // could be represented as a long fract?
+ REAL beta = REAL_HALF(h * (b * lastV1 - lastU1) * a);
+
+ neuron->V += h * (pre_alph - beta
+ + (REAL_CONST(5.0) + MAGIC_MULTIPLIER * eta) * eta);
+
+ neuron->U += a * h * (-lastU1 - beta + b * eta);
+}
+
+
+static state_t neuron_model_state_update(
+ uint16_t num_excitatory_inputs, const input_t *exc_input,
+ uint16_t num_inhibitory_inputs, const input_t *inh_input,
+ input_t external_bias, neuron_t *restrict neuron) {
+ REAL total_exc = 0;
+ REAL total_inh = 0;
+
+ for (int i =0; iI_offset;
+
+ // the best AR update so far
+ rk2_kernel_midpoint(neuron->this_h, neuron, input_this_timestep);
+ neuron->this_h = global_params->machine_timestep_ms;
+
+ return neuron->V;
+}
+
+static void neuron_model_has_spiked(neuron_t *restrict neuron) {
+ // reset membrane voltage
+ neuron->V = neuron->C;
+
+ // offset 2nd state variable
+ neuron->U += neuron->D;
+
+ // simple threshold correction - next timestep (only) gets a bump
+ neuron->this_h = global_params->machine_timestep_ms * SIMPLE_TQ_OFFSET;
+}
+
+static state_t neuron_model_get_membrane_voltage(const neuron_t *neuron) {
+ return neuron->V;
+}
+
#endif // _NEURON_MODEL_IZH_CURR_IMPL_H_
diff --git a/neural_modelling/src/neuron/models/neuron_model_lif_impl.c b/neural_modelling/src/neuron/models/neuron_model_lif_impl.c
index c6345b3a65..cc048303c8 100644
--- a/neural_modelling/src/neuron/models/neuron_model_lif_impl.c
+++ b/neural_modelling/src/neuron/models/neuron_model_lif_impl.c
@@ -21,66 +21,11 @@
#include
-//! \brief simple Leaky I&F ODE
-//! \param[in,out] neuron: The neuron to update
-//! \param[in] V_prev: previous voltage
-//! \param[in] input_this_timestep: The input to apply
-static inline void lif_neuron_closed_form(
- neuron_t *neuron, REAL V_prev, input_t input_this_timestep) {
- REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest;
-
- // update membrane voltage
- neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev));
-}
-
void neuron_model_set_global_neuron_params(
UNUSED const global_neuron_params_t *params) {
// Does Nothing - no params
}
-state_t neuron_model_state_update(
- uint16_t num_excitatory_inputs, const input_t *exc_input,
- uint16_t num_inhibitory_inputs, const input_t *inh_input,
- input_t external_bias, neuron_t *restrict neuron) {
- log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]);
- log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]);
-
- // If outside of the refractory period
- if (neuron->refract_timer <= 0) {
- REAL total_exc = 0;
- REAL total_inh = 0;
-
- for (int i=0; i < num_excitatory_inputs; i++) {
- total_exc += exc_input[i];
- }
- for (int i=0; i< num_inhibitory_inputs; i++) {
- total_inh += inh_input[i];
- }
- // Get the input in nA
- input_t input_this_timestep =
- total_exc - total_inh + external_bias + neuron->I_offset;
-
- lif_neuron_closed_form(
- neuron, neuron->V_membrane, input_this_timestep);
- } else {
- // countdown refractory timer
- neuron->refract_timer--;
- }
- return neuron->V_membrane;
-}
-
-void neuron_model_has_spiked(neuron_t *restrict neuron) {
- // reset membrane voltage
- neuron->V_membrane = neuron->V_reset;
-
- // reset refractory timer
- neuron->refract_timer = neuron->T_refract;
-}
-
-state_t neuron_model_get_membrane_voltage(const neuron_t *neuron) {
- return neuron->V_membrane;
-}
-
void neuron_model_print_state_variables(const neuron_t *neuron) {
log_debug("V membrane = %11.4k mv", neuron->V_membrane);
}
diff --git a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h
index 1913497937..b3145c4701 100644
--- a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h
+++ b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h
@@ -56,4 +56,57 @@ typedef struct neuron_t {
typedef struct global_neuron_params_t {
} global_neuron_params_t;
+//! \brief simple Leaky I&F ODE
+//! \param[in,out] neuron: The neuron to update
+//! \param[in] V_prev: previous voltage
+//! \param[in] input_this_timestep: The input to apply
+static inline void lif_neuron_closed_form(
+ neuron_t *neuron, REAL V_prev, input_t input_this_timestep) {
+ REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest;
+
+ // update membrane voltage
+ neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev));
+}
+
+static state_t neuron_model_state_update(
+ uint16_t num_excitatory_inputs, const input_t *exc_input,
+ uint16_t num_inhibitory_inputs, const input_t *inh_input,
+ input_t external_bias, neuron_t *restrict neuron) {
+
+ // If outside of the refractory period
+ if (neuron->refract_timer <= 0) {
+ REAL total_exc = 0;
+ REAL total_inh = 0;
+
+ for (int i=0; i < num_excitatory_inputs; i++) {
+ total_exc += exc_input[i];
+ }
+ for (int i=0; i< num_inhibitory_inputs; i++) {
+ total_inh += inh_input[i];
+ }
+ // Get the input in nA
+ input_t input_this_timestep =
+ total_exc - total_inh + external_bias + neuron->I_offset;
+
+ lif_neuron_closed_form(
+ neuron, neuron->V_membrane, input_this_timestep);
+ } else {
+ // countdown refractory timer
+ neuron->refract_timer--;
+ }
+ return neuron->V_membrane;
+}
+
+static void neuron_model_has_spiked(neuron_t *restrict neuron) {
+ // reset membrane voltage
+ neuron->V_membrane = neuron->V_reset;
+
+ // reset refractory timer
+ neuron->refract_timer = neuron->T_refract;
+}
+
+static state_t neuron_model_get_membrane_voltage(const neuron_t *neuron) {
+ return neuron->V_membrane;
+}
+
#endif // _NEURON_MODEL_LIF_CURR_IMPL_H_
diff --git a/neural_modelling/src/neuron/neuron.c b/neural_modelling/src/neuron/neuron.c
index da50b69bb9..faf75d9df5 100644
--- a/neural_modelling/src/neuron/neuron.c
+++ b/neural_modelling/src/neuron/neuron.c
@@ -23,62 +23,74 @@
#include "neuron_recording.h"
#include "implementations/neuron_impl.h"
#include "plasticity/synapse_dynamics.h"
+#include "tdma_processing.h"
#include
-#include
//! The key to be used for this core (will be ORed with neuron ID)
-static key_t key;
+key_t key;
//! A checker that says if this model should be transmitting. If set to false
//! by the data region, then this model should not have a key.
-static bool use_key;
+bool use_key;
+
+//! Latest time in a timestep that any neuron has sent a spike
+uint32_t latest_send_time = 0xFFFFFFFF;
+
+//! Earliest time in a timestep that any neuron has sent a spike
+uint32_t earliest_send_time = 0;
//! The number of neurons on the core
static uint32_t n_neurons;
-//! The recording flags
-static uint32_t recording_flags = 0;
+//! The closest power of 2 >= n_neurons
+static uint32_t n_neurons_peak;
+
+//! The number of synapse types
+static uint32_t n_synapse_types;
+
+//! Amount to left shift the ring buffer by to make it an input
+static uint32_t *ring_buffer_to_input_left_shifts;
+
+//! The address where the actual neuron parameters start
+static address_t saved_params_address;
//! parameters that reside in the neuron_parameter_data_region
struct neuron_parameters {
uint32_t has_key;
uint32_t transmission_key;
uint32_t n_neurons_to_simulate;
+ uint32_t n_neurons_peak;
uint32_t n_synapse_types;
- uint32_t incoming_spike_buffer_size;
+ uint32_t ring_buffer_shifts[];
};
-//! Offset of start of global parameters, in words.
-#define START_OF_GLOBAL_PARAMETERS \
- ((sizeof(struct neuron_parameters) + \
- sizeof(struct tdma_parameters)) / sizeof(uint32_t))
-
//! \brief does the memory copy for the neuron parameters
//! \param[in] address: the address where the neuron parameters are stored
//! in SDRAM
//! \return bool which is true if the mem copy's worked, false otherwise
-static bool neuron_load_neuron_parameters(address_t address) {
+static bool neuron_load_neuron_parameters(void) {
log_debug("loading parameters");
// call the neuron implementation functions to do the work
- neuron_impl_load_neuron_parameters(
- address, START_OF_GLOBAL_PARAMETERS, n_neurons);
+ // Note the "next" is 0 here because we are using a saved address
+ // which has already accounted for the position of the data within
+ // the region being read.
+ neuron_impl_load_neuron_parameters(saved_params_address, 0, n_neurons);
return true;
}
-bool neuron_resume(address_t address) { // EXPORTED
+bool neuron_resume(void) { // EXPORTED
if (!neuron_recording_reset(n_neurons)){
log_error("failed to reload the neuron recording parameters");
return false;
}
log_debug("neuron_reloading_neuron_parameters: starting");
- return neuron_load_neuron_parameters(address);
+ return neuron_load_neuron_parameters();
}
bool neuron_initialise(
address_t address, address_t recording_address, // EXPORTED
- uint32_t *n_neurons_value, uint32_t *n_synapse_types_value,
- uint32_t *incoming_spike_buffer_size, uint32_t *n_rec_regions_used) {
+ uint32_t *n_rec_regions_used) {
log_debug("neuron_initialise: starting");
// init the TDMA
@@ -103,14 +115,26 @@ bool neuron_initialise(
// Read the neuron details
n_neurons = params->n_neurons_to_simulate;
- *n_neurons_value = n_neurons;
- *n_synapse_types_value = params->n_synapse_types;
+ n_neurons_peak = params->n_neurons_peak;
+ n_synapse_types = params->n_synapse_types;
+
+ // Set up ring buffer left shifts
+ uint32_t ring_buffer_bytes = n_synapse_types * sizeof(uint32_t);
+ ring_buffer_to_input_left_shifts = spin1_malloc(ring_buffer_bytes);
+ if (ring_buffer_to_input_left_shifts == NULL) {
+ log_error("Not enough memory to allocate ring buffer");
+ return false;
+ }
+
+ // read in ring buffer to input left shifts
+ spin1_memcpy(
+ ring_buffer_to_input_left_shifts, params->ring_buffer_shifts,
+ ring_buffer_bytes);
- // Read the size of the incoming spike buffer to use
- *incoming_spike_buffer_size = params->incoming_spike_buffer_size;
+ // Store where the actual neuron parameters start
+ saved_params_address = ¶ms->ring_buffer_shifts[n_synapse_types];
- log_debug("\t n_neurons = %u, spike buffer size = %u", n_neurons,
- *incoming_spike_buffer_size);
+ log_info("\t n_neurons = %u, peak %u", n_neurons, n_neurons_peak);
// Call the neuron implementation initialise function to setup DTCM etc.
if (!neuron_impl_initialise(n_neurons)) {
@@ -118,85 +142,64 @@ bool neuron_initialise(
}
// load the data into the allocated DTCM spaces.
- if (!neuron_load_neuron_parameters(address)) {
+ if (!neuron_load_neuron_parameters()) {
return false;
}
// setup recording region
if (!neuron_recording_initialise(
- recording_address, &recording_flags, n_neurons, n_rec_regions_used)) {
+ recording_address, n_neurons, n_rec_regions_used)) {
return false;
}
return true;
}
-void neuron_pause(address_t address) { // EXPORTED
- /* Finalise any recordings that are in progress, writing back the final
- * amounts of samples recorded to SDRAM */
- if (recording_flags > 0) {
- log_debug("updating recording regions");
- neuron_recording_finalise();
- }
+void neuron_pause(void) { // EXPORTED
// call neuron implementation function to do the work
- neuron_impl_store_neuron_parameters(
- address, START_OF_GLOBAL_PARAMETERS, n_neurons);
+ neuron_impl_store_neuron_parameters(saved_params_address, 0, n_neurons);
}
void neuron_do_timestep_update(timer_t time, uint timer_count) { // EXPORTED
// the phase in this timer tick im in (not tied to neuron index)
- tdma_processing_reset_phase();
+ // tdma_processing_reset_phase();
// Prepare recording for the next timestep
neuron_recording_setup_for_next_recording();
- // update each neuron individually
- for (index_t neuron_index = 0; neuron_index < n_neurons; neuron_index++) {
-
- // Get external bias from any source of intrinsic plasticity
- input_t external_bias =
- synapse_dynamics_get_intrinsic_bias(time, neuron_index);
-
- // call the implementation function (boolean for spike)
- bool spike = neuron_impl_do_timestep_update(
- neuron_index, external_bias);
-
- // If the neuron has spiked
- if (spike) {
- log_debug("neuron %u spiked at time %u", neuron_index, time);
-
- // Do any required synapse processing
- synapse_dynamics_process_post_synaptic_event(time, neuron_index);
-
- if (use_key) {
- tdma_processing_send_packet(
- (key | neuron_index), 0, NO_PAYLOAD, timer_count);
- }
- } else {
- log_debug("the neuron %d has been determined to not spike",
- neuron_index);
- }
- }
+ neuron_impl_do_timestep_update(timer_count, time, n_neurons);
log_debug("time left of the timer after tdma is %d", tc[T1_COUNT]);
- // Disable interrupts to avoid possible concurrent access
- uint cpsr = spin1_int_disable();
-
// Record the recorded variables
neuron_recording_record(time);
-
- // Re-enable interrupts
- spin1_mode_restore(cpsr);
}
-void neuron_add_inputs( // EXPORTED
- index_t synapse_type_index, index_t neuron_index,
- input_t weights_this_timestep) {
- neuron_impl_add_inputs(
- synapse_type_index, neuron_index, weights_this_timestep);
+void neuron_transfer(weight_t *syns) { // EXPORTED
+ uint32_t synapse_index = 0;
+ uint32_t ring_buffer_index = 0;
+ for (uint32_t s_i = n_synapse_types; s_i > 0; s_i--) {
+ uint32_t rb_shift = ring_buffer_to_input_left_shifts[synapse_index];
+ uint32_t neuron_index = 0;
+ for (uint32_t n_i = n_neurons_peak; n_i > 0; n_i--) {
+ weight_t value = syns[ring_buffer_index];
+ if (value > 0) {
+ if (neuron_index > n_neurons) {
+ log_error("Neuron index %u out of range", neuron_index);
+ rt_error(RTE_SWERR);
+ }
+ input_t val_to_add = synapse_row_convert_weight_to_input(
+ value, rb_shift);
+ neuron_impl_add_inputs(synapse_index, neuron_index, val_to_add);
+ }
+ syns[ring_buffer_index] = 0;
+ ring_buffer_index++;
+ neuron_index++;
+ }
+ synapse_index++;
+ }
}
#if LOG_LEVEL >= LOG_DEBUG
diff --git a/neural_modelling/src/neuron/neuron.h b/neural_modelling/src/neuron/neuron.h
index e140904e4c..dcb961202f 100644
--- a/neural_modelling/src/neuron/neuron.h
+++ b/neural_modelling/src/neuron/neuron.h
@@ -32,6 +32,7 @@
#ifndef _NEURON_H_
#define _NEURON_H_
+#include "synapse_row.h"
#include
#include
@@ -41,17 +42,10 @@
//! NEURON_PARAMS data region in SDRAM
//! \param[in] recording_address: the recording parameters in SDRAM
//! (contains which regions are active and how big they are)
-//! \param[out] n_neurons_value: The number of neurons this model is to
-//! simulate
-//! \param[out] n_synapse_types_value: The number of synapse types in
-//! the model
-//! \param[out] incoming_spike_buffer_size: The number of spikes to
-//! support in the incoming spike circular buffer
//! \param[out] n_rec_regions_used: The number of regions used by neuron recording
//! \return True if the translation was successful, otherwise False
bool neuron_initialise(
- address_t address, address_t recording_address, uint32_t *n_neurons_value,
- uint32_t *n_synapse_types_value, uint32_t *incoming_spike_buffer_size,
+ address_t address, address_t recording_address,
uint32_t *n_rec_regions_used);
//! \brief executes all the updates to neural parameters when a given timer
@@ -61,24 +55,17 @@ bool neuron_initialise(
void neuron_do_timestep_update(timer_t time, uint timer_count);
//! \brief Prepare to resume simulation of the neurons
-//! \param[in] address: the address where the neuron parameters are stored
-//! in SDRAM
//! \return bool which is true if the resume was successful or not
-bool neuron_resume(address_t address);
+bool neuron_resume(void);
//! \brief Perform steps needed before pausing a simulation.
//! \details Stores neuron parameters back into SDRAM.
-//! \param[in] address: the address where the neuron parameters are stored
-//! in SDRAM
-void neuron_pause(address_t address);
+void neuron_pause(void);
-//! \brief Add inputs to the neuron
-//! \param[in] synapse_type_index the synapse type (e.g. exc. or inh.)
-//! \param[in] neuron_index the index of the neuron
-//! \param[in] weights_this_timestep weight inputs to be added
-void neuron_add_inputs(
- index_t synapse_type_index, index_t neuron_index,
- input_t weights_this_timestep);
+//! \brief Add inputs to the neurons
+//! \param[in] syns The inputs to be added; this is an array of size
+//! n_synapse_types * 2^ceil(log_2(n_neurons)).
+void neuron_transfer(weight_t *syns);
#if LOG_LEVEL >= LOG_DEBUG
//! \brief Print the inputs to the neurons.
diff --git a/neural_modelling/src/neuron/neuron_recording.c b/neural_modelling/src/neuron/neuron_recording.c
index f5a8d6c089..011f683e0e 100644
--- a/neural_modelling/src/neuron/neuron_recording.c
+++ b/neural_modelling/src/neuron/neuron_recording.c
@@ -89,11 +89,6 @@ static void reset_record_counter(void) {
}
}
-//! \brief wrapper to recording finalise
-void neuron_recording_finalise(void) {
- recording_finalise();
-}
-
//! \brief the number of bytes used in bitfield recording for n_neurons
//! \param[in] n_neurons: The number of neurons to create a bitfield for
//! \return the size of the bitfield data structure for the number of neurons
@@ -185,7 +180,6 @@ static bool neuron_recording_read_in_elements(
}
bool neuron_recording_reset(uint32_t n_neurons) {
- recording_reset();
if (!neuron_recording_read_in_elements(reset_address, n_neurons)) {
log_error("failed to reread in the new elements after reset");
return false;
@@ -283,15 +277,10 @@ typedef struct neuron_recording_header {
} neuron_recording_header_t;
bool neuron_recording_initialise(
- void *recording_address, uint32_t *recording_flags,
- uint32_t n_neurons, uint32_t *n_rec_regions_used) {
+ void *recording_address, uint32_t n_neurons,
+ uint32_t *n_rec_regions_used) {
// boot up the basic recording
void *data_addr = recording_address;
- bool success = recording_initialize(&data_addr, recording_flags);
- if (!success) {
- log_error("failed to init basic recording.");
- return false;
- }
// Verify the number of recording and bitfield elements
neuron_recording_header_t *header = data_addr;
diff --git a/neural_modelling/src/neuron/neuron_recording.h b/neural_modelling/src/neuron/neuron_recording.h
index c529734c7f..0b6743e4af 100644
--- a/neural_modelling/src/neuron/neuron_recording.h
+++ b/neural_modelling/src/neuron/neuron_recording.h
@@ -209,17 +209,12 @@ bool neuron_recording_reset(uint32_t n_neurons);
//! \brief sets up the recording stuff
//! \param[in] recording_address: sdram location for the recording data
-//! \param[out] recording_flags: Output of flags which can be used to check if
-//! a channel is enabled for recording
//! \param[in] n_neurons: the number of neurons to setup for
//! \param[out] n_rec_regions_used: Output the number of regions used by neuron
//! recording
//! \return bool stating if the init was successful or not
bool neuron_recording_initialise(
- void *recording_address, uint32_t *recording_flags,
- uint32_t n_neurons, uint32_t *n_rec_regions_used);
-
-//! \brief finishes recording
-void neuron_recording_finalise(void);
+ void *recording_address, uint32_t n_neurons,
+ uint32_t *n_rec_regions_used);
#endif //_NEURON_RECORDING_H_
diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c
index e3f3c74430..9c89d03db4 100644
--- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c
+++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c
@@ -36,19 +36,6 @@
#include
#include
-//! ::synapse_index_bits + number of synapse type bits
-static uint32_t synapse_type_index_bits;
-//! Number of bits to hold the neuron index
-static uint32_t synapse_index_bits;
-//! Mask to extract the neuron index (has ::synapse_index_bits bits set)
-static uint32_t synapse_index_mask;
-//! Mask to extract the type and index (has ::synapse_type_index_bits bits set)
-static uint32_t synapse_type_index_mask;
-//! ::synapse_delay_index_type_bits + number of bits to encode delay
-static uint32_t synapse_delay_index_type_bits;
-//! Mask to extract the synapse type
-static uint32_t synapse_type_mask;
-
//! The type of configuration parameters in SDRAM (written by host)
typedef struct stdp_params {
//! The back-propagation delay, in basic simulation timesteps
@@ -239,10 +226,10 @@ void synapse_dynamics_print_plastic_synapses(
synapses_print_weight(
weight, ring_buffer_to_input_buffer_left_shifts[synapse_type]);
log_debug("nA) d: %2u, %s, n = %3u)] - {%08x %08x}\n",
- synapse_row_sparse_delay(control_word, synapse_type_index_bits),
+ synapse_row_sparse_delay(control_word, synapse_type_index_bits, synapse_delay_mask),
synapse_types_get_type_char(synapse_type),
synapse_row_sparse_index(control_word, synapse_index_mask),
- SYNAPSE_DELAY_MASK, synapse_type_index_bits);
+ synapse_delay_mask, synapse_type_index_bits);
}
#endif // LOG_LEVEL >= LOG_DEBUG
}
@@ -287,31 +274,6 @@ bool synapse_dynamics_initialise(
return false;
}
- uint32_t n_neurons_power_2 = n_neurons;
- uint32_t log_n_neurons = 1;
- if (n_neurons != 1) {
- if (!is_power_of_2(n_neurons)) {
- n_neurons_power_2 = next_power_of_2(n_neurons);
- }
- log_n_neurons = ilog_2(n_neurons_power_2);
- }
-
- uint32_t n_synapse_types_power_2 = n_synapse_types;
- uint32_t log_n_synapse_types = 1;
- if (n_synapse_types != 1) {
- if (!is_power_of_2(n_synapse_types)) {
- n_synapse_types_power_2 = next_power_of_2(n_synapse_types);
- }
- log_n_synapse_types = ilog_2(n_synapse_types_power_2);
- }
-
- synapse_type_index_bits = log_n_neurons + log_n_synapse_types;
- synapse_type_index_mask = (1 << synapse_type_index_bits) - 1;
- synapse_index_bits = log_n_neurons;
- synapse_index_mask = (1 << synapse_index_bits) - 1;
- synapse_delay_index_type_bits =
- SYNAPSE_DELAY_BITS + synapse_type_index_bits;
- synapse_type_mask = (1 << log_n_synapse_types) - 1;
return true;
}
@@ -347,7 +309,7 @@ bool synapse_dynamics_process_plastic_synapses(
// 16-bits of 32-bit fixed synapse so same functions can be used
uint32_t delay_axonal = sparse_axonal_delay(control_word);
uint32_t delay_dendritic = synapse_row_sparse_delay(
- control_word, synapse_type_index_bits);
+ control_word, synapse_type_index_bits, synapse_delay_mask);
uint32_t type = synapse_row_sparse_type(
control_word, synapse_index_bits, synapse_type_mask);
uint32_t index =
@@ -360,9 +322,9 @@ bool synapse_dynamics_process_plastic_synapses(
synapse_structure_get_update_state(*plastic_words, type);
// Convert into ring buffer offset
- uint32_t ring_buffer_index = synapses_get_ring_buffer_index_combined(
+ uint32_t ring_buffer_index = synapse_row_get_ring_buffer_index_combined(
delay_axonal + delay_dendritic + time, type_index,
- synapse_type_index_bits);
+ synapse_type_index_bits, synapse_delay_mask);
// Update the synapse state
uint32_t post_delay = delay_dendritic;
@@ -410,11 +372,6 @@ void synapse_dynamics_process_post_synaptic_event(
timing_add_post_spike(time, last_post_time, last_post_trace));
}
-input_t synapse_dynamics_get_intrinsic_bias(
- UNUSED uint32_t time, UNUSED index_t neuron_index) {
- return ZERO;
-}
-
uint32_t synapse_dynamics_get_plastic_pre_synaptic_events(void) {
return num_plastic_pre_synaptic_events;
}
@@ -441,7 +398,8 @@ bool synapse_dynamics_find_neuron(
uint32_t control_word = *control_words++;
if (synapse_row_sparse_index(control_word, synapse_index_mask) == id) {
*offset = synapse_row_num_plastic_controls(fixed_region) - plastic_synapse;
- *delay = synapse_row_sparse_delay(control_word, synapse_type_index_bits);
+ *delay = synapse_row_sparse_delay(control_word, synapse_type_index_bits,
+ synapse_delay_mask);
*synapse_type = synapse_row_sparse_type(
control_word, synapse_index_bits, synapse_type_mask);
return true;
@@ -479,7 +437,7 @@ bool synapse_dynamics_remove_neuron(uint32_t offset, synaptic_row_t row){
static inline control_t control_conversion(
uint32_t id, uint32_t delay, uint32_t type) {
control_t new_control =
- (delay & ((1 << SYNAPSE_DELAY_BITS) - 1)) << synapse_type_index_bits;
+ (delay & ((1 << synapse_delay_bits) - 1)) << synapse_type_index_bits;
new_control |= (type & ((1 << synapse_type_index_bits) - 1)) << synapse_index_bits;
new_control |= id & ((1 << synapse_index_bits) - 1);
return new_control;
diff --git a/neural_modelling/src/neuron/plasticity/synapse_dynamics.h b/neural_modelling/src/neuron/plasticity/synapse_dynamics.h
index b6a44b03d6..fbfa135bf9 100644
--- a/neural_modelling/src/neuron/plasticity/synapse_dynamics.h
+++ b/neural_modelling/src/neuron/plasticity/synapse_dynamics.h
@@ -53,13 +53,6 @@ bool synapse_dynamics_process_plastic_synapses(
void synapse_dynamics_process_post_synaptic_event(
uint32_t time, index_t neuron_index);
-//! \brief Get the intrinsic bias of the synapses
-//! \param[in] time: The current simulation time
-//! \param[in] neuron_index: Which neuron are we processing
-//! \return The intrinsic bias
-input_t synapse_dynamics_get_intrinsic_bias(
- uint32_t time, index_t neuron_index);
-
//! \brief Print the synapse dynamics
//! \param[in] plastic_region_data: Where the plastic data is
//! \param[in] fixed_region: Where the fixed data is
diff --git a/neural_modelling/src/neuron/plasticity/synapse_dynamics_remote.c b/neural_modelling/src/neuron/plasticity/synapse_dynamics_remote.c
new file mode 100644
index 0000000000..ae4906e7b8
--- /dev/null
+++ b/neural_modelling/src/neuron/plasticity/synapse_dynamics_remote.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2017-2020 The University of Manchester
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#include "synapse_dynamics.h"
+
+void synapse_dynamics_process_post_synaptic_event(
+ UNUSED uint32_t time, UNUSED uint32_t neuron_index) {
+ // Does nothing, because the spike will be sent back using multicast
+}
diff --git a/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c b/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c
index 75eb6a6548..2116810e28 100644
--- a/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c
+++ b/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c
@@ -23,43 +23,14 @@
* \author Petrut Bogdan
*/
#include "synapse_dynamics.h"
+#include
#include
#include
-//! ::synapse_index_bits + ::synapse_type_bits
-static uint32_t synapse_type_index_bits;
-//! Number of bits to hold the neuron index
-static uint32_t synapse_index_bits;
-//! Mask to extract the neuron index (has ::synapse_index_bits bits set)
-static uint32_t synapse_index_mask;
-//! Number of bits to hold the synapse type
-static uint32_t synapse_type_bits;
-//! Mask to extract the synapse type (has ::synapse_type_bits bits set)
-static uint32_t synapse_type_mask;
-
bool synapse_dynamics_initialise(
- UNUSED address_t address, uint32_t n_neurons, uint32_t n_synapse_types,
+ UNUSED address_t address, UNUSED uint32_t n_neurons,
+ UNUSED uint32_t n_synapse_types,
UNUSED uint32_t *ring_buffer_to_input_buffer_left_shifts) {
- uint32_t n_neurons_power_2 = n_neurons;
- uint32_t log_n_neurons = 1;
- if (n_neurons != 1) {
- if (!is_power_of_2(n_neurons)) {
- n_neurons_power_2 = next_power_of_2(n_neurons);
- }
- log_n_neurons = ilog_2(n_neurons_power_2);
- }
- uint32_t n_synapse_types_power_2 = n_synapse_types;
- synapse_type_bits = 1;
- if (n_synapse_types != 1) {
- if (!is_power_of_2(n_synapse_types)) {
- n_synapse_types_power_2 = next_power_of_2(n_synapse_types);
- }
- synapse_type_bits = ilog_2(n_synapse_types_power_2);
- }
- synapse_type_index_bits = log_n_neurons + synapse_type_bits;
- synapse_index_bits = log_n_neurons;
- synapse_index_mask = (1 << synapse_index_bits) - 1;
- synapse_type_mask = (1 << synapse_type_bits) - 1;
return true;
}
@@ -77,12 +48,6 @@ bool synapse_dynamics_process_plastic_synapses(
return false;
}
-//---------------------------------------
-input_t synapse_dynamics_get_intrinsic_bias(
- UNUSED uint32_t time, UNUSED index_t neuron_index) {
- return ZERO;
-}
-
void synapse_dynamics_print_plastic_synapses(
UNUSED synapse_row_plastic_data_t *plastic_region_data,
UNUSED synapse_row_fixed_part_t *fixed_region,
@@ -115,7 +80,7 @@ bool synapse_dynamics_find_neuron(
fixed_synapse;
*weight = synapse_row_sparse_weight(synaptic_word);
*delay = synapse_row_sparse_delay(synaptic_word,
- synapse_type_index_bits);
+ synapse_type_index_bits, synapse_delay_mask);
*synapse_type = synapse_row_sparse_type(
synaptic_word, synapse_index_bits, synapse_type_mask);
return true;
@@ -142,7 +107,7 @@ bool synapse_dynamics_remove_neuron(uint32_t offset, synaptic_row_t row) {
static inline uint32_t _fixed_synapse_convert(
uint32_t id, weight_t weight, uint32_t delay, uint32_t type) {
uint32_t new_synapse = weight << (32 - SYNAPSE_WEIGHT_BITS);
- new_synapse |= ((delay & ((1 << SYNAPSE_DELAY_BITS) - 1)) <<
+ new_synapse |= ((delay & ((1 << synapse_delay_bits) - 1)) <<
synapse_type_index_bits);
new_synapse |= ((type & ((1 << synapse_type_bits) - 1)) <<
synapse_index_bits);
diff --git a/neural_modelling/src/neuron/population_table/population_table.h b/neural_modelling/src/neuron/population_table/population_table.h
index 963d03d680..950a1d568f 100644
--- a/neural_modelling/src/neuron/population_table/population_table.h
+++ b/neural_modelling/src/neuron/population_table/population_table.h
@@ -39,6 +39,9 @@ extern uint32_t failed_bit_field_reads;
//! they don't hit anything
extern uint32_t bit_field_filtered_packets;
+//! \brief The number of addresses from the same spike left to process
+extern uint16_t items_to_go;
+
//! \brief Set up the table
//! \param[in] table_address: The address of the start of the table data
//! \param[in] synapse_rows_address: The address of the start of the synapse
@@ -66,6 +69,11 @@ bool population_table_get_first_address(
spike_t spike, synaptic_row_t* row_address,
size_t* n_bytes_to_transfer);
+//! \brief Determine if there are more items with the same key
+static inline bool population_table_is_next(void) {
+ return items_to_go > 0;
+}
+
//! \brief Get the next row data for a previously given spike. If no spike has
//! been given, return False.
//! \param[out] spike: The initiating spike
diff --git a/neural_modelling/src/neuron/population_table/population_table_binary_search_impl.c b/neural_modelling/src/neuron/population_table/population_table_binary_search_impl.c
index 0b943148a1..33f36621e8 100644
--- a/neural_modelling/src/neuron/population_table/population_table_binary_search_impl.c
+++ b/neural_modelling/src/neuron/population_table/population_table_binary_search_impl.c
@@ -122,7 +122,8 @@ static uint32_t last_neuron_id = 0;
static uint16_t next_item = 0;
//! The number of relevant items remaining in the ::address_list
-static uint16_t items_to_go = 0;
+//! NOTE: Exported for speed of check
+uint16_t items_to_go = 0;
//! The bitfield map
static bit_field_t *connectivity_bit_field = NULL;
@@ -487,10 +488,13 @@ bool population_table_get_first_address(
log_debug("position = %d", position);
master_population_table_entry entry = master_population_table[position];
+
+ #if LOG_LEVEL >= LOG_DEBUG
if (entry.count == 0) {
log_debug("Spike %u (= %x): Population found in master population"
"table but count is 0", spike, spike);
}
+ #endif
last_spike = spike;
next_item = entry.start;
diff --git a/neural_modelling/src/neuron/regions.h b/neural_modelling/src/neuron/regions.h
index 1c02486be9..f685cac213 100644
--- a/neural_modelling/src/neuron/regions.h
+++ b/neural_modelling/src/neuron/regions.h
@@ -38,5 +38,6 @@ typedef enum neuron_regions_e {
DIRECT_MATRIX_REGION, //!< direct synaptic matrix; 11
BIT_FIELD_FILTER_REGION, //!< bitfield filter; 12
BIT_FIELD_BUILDER, //!< bitfield builder parameters; 13
- BIT_FIELD_KEY_MAP //!< bitfield key map; 14
+ BIT_FIELD_KEY_MAP, //!< bitfield key map; 14
+ RECORDING_REGION //!< general recording data; 15
} regions_e;
diff --git a/neural_modelling/src/neuron/send_spike.h b/neural_modelling/src/neuron/send_spike.h
new file mode 100644
index 0000000000..67bb96cac5
--- /dev/null
+++ b/neural_modelling/src/neuron/send_spike.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2021 The University of Manchester
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+#ifndef __SEND_SPIKE_H__
+#define __SEND_SPIKE_H__
+
+#include
+#include
+#include
+#include
+#include "plasticity/synapse_dynamics.h"
+
+//! Key from neruon.c
+extern uint32_t key;
+
+//! Whether to use key from neuron.c
+extern bool use_key;
+
+//! Earliest time from neuron.c
+extern uint32_t earliest_send_time;
+
+//! Latest time from neuron.c
+extern uint32_t latest_send_time;
+
+//! Mask to recognise the Comms Controller "not full" flag
+#define TX_NOT_FULL_MASK 0x10000000
+
+//! \brief Perform direct spike sending with hardware for speed
+//! \param[in] key The key to send
+static inline void send_spike_mc(uint32_t key) {
+ // Wait for there to be space to send
+ uint32_t n_loops = 0;
+ while (!(cc[CC_TCR] & TX_NOT_FULL_MASK) && (n_loops < 10000)) {
+ spin1_delay_us(1);
+ n_loops++;
+ }
+ if (!(cc[CC_TCR] & TX_NOT_FULL_MASK)) {
+ io_printf(IO_BUF, "[ERROR] Couldn't send spike; TCR=0x%08x\n", cc[CC_TCR]);
+ rt_error(RTE_SWERR);
+ }
+
+ // Do the send
+ cc[CC_TCR] = PKT_MC;
+ cc[CC_TXKEY] = key;
+}
+
+//! \brief Performs the sending of a spike. Inlined for speed.
+//! \param[in] timer_count The global timer count when the time step started
+//! \param[in] time The current time step
+//! \param[in] The neuron index to send
+static inline void send_spike(UNUSED uint32_t timer_count, uint32_t time,
+ uint32_t neuron_index) {
+ // Do any required synapse processing
+ synapse_dynamics_process_post_synaptic_event(time, neuron_index);
+
+ if (use_key) {
+ send_spike_mc(key | neuron_index);
+
+ // Keep track of provenance data
+ uint32_t clocks = tc[T1_COUNT];
+ if (clocks > earliest_send_time) {
+ earliest_send_time = clocks;
+ }
+ if (clocks < latest_send_time) {
+ latest_send_time = clocks;
+ }
+ }
+}
+
+#endif // __SEND_SPIKE_H__
diff --git a/neural_modelling/src/neuron/spike_processing.c b/neural_modelling/src/neuron/spike_processing.c
index 80b6654e37..3fb6ea9b16 100644
--- a/neural_modelling/src/neuron/spike_processing.c
+++ b/neural_modelling/src/neuron/spike_processing.c
@@ -215,7 +215,8 @@ static inline bool is_something_to_do(
//! \param[in,out] n_rewires: Accumulator of number of rewirings
//! \param[in,out] n_synapse_processes:
//! Accumulator of number of synapses processed
-static void setup_synaptic_dma_read(dma_buffer *current_buffer,
+//! \return Whether an actual DMA was set up or not
+static bool setup_synaptic_dma_read(dma_buffer *current_buffer,
uint32_t *n_rewires, uint32_t *n_synapse_processes) {
// Set up to store the DMA location and size to read
synaptic_row_t row;
@@ -256,6 +257,7 @@ static void setup_synaptic_dma_read(dma_buffer *current_buffer,
// processing and not the surplus DMA requests.
spike_processing_count++;
}
+ return setup_done;
}
//! \brief Set up a DMA write of synaptic data.
@@ -289,42 +291,50 @@ static inline void setup_synaptic_dma_write(
}
}
-//! \brief Called when a multicast packet is received
-//! \param[in] key: The key of the packet. The spike.
-//! \param payload: the payload of the packet. The count.
-static void multicast_packet_received_callback(uint key, uint payload) {
- p_per_ts_struct.packets_this_time_step += 1;
-
- // handle the 2 cases separately
- if (payload == 0) {
- log_debug(
- "Received spike %x at %d, DMA Busy = %d", key, time, dma_busy);
- // set to 1 to work with the loop.
- payload = 1;
- } else {
- log_debug(
- "Received spike %x with payload %d at %d, DMA Busy = %d",
- key, payload, time, dma_busy);
- }
-
- // cycle through the packet insertion
- for (uint count = payload; count > 0; count--) {
- in_spikes_add_spike(key);
- }
-
+//! \brief Start the DMA processing loop if not already running
+static inline void start_dma_loop(void) {
// If we're not already processing synaptic DMAs,
// flag pipeline as busy and trigger a feed event
// NOTE: locking is not used here because this is assumed to be FIQ
if (!dma_busy) {
log_debug("Sending user event for new spike");
+ // Only set busy if successful.
+ // NOTE: Counts when unsuccessful are handled by the API
if (spin1_trigger_user_event(0, 0)) {
dma_busy = true;
- } else {
- log_warning("Could not trigger user event\n");
}
}
}
+//! \brief Called when a multicast packet is received
+//! \param[in] key: The key of the packet. The spike.
+//! \param[in] unused: Only specified to match API
+static void multicast_packet_received_callback(uint key, UNUSED uint unused) {
+ p_per_ts_struct.packets_this_time_step += 1;
+ log_debug("Received spike %x at %d, DMA Busy = %d", key, time, dma_busy);
+ if (in_spikes_add_spike(key)) {
+ start_dma_loop();
+ }
+}
+
+//! \brief Called when a multicast packet is received
+//! \param[in] key: The key of the packet. The spike.
+//! \param[in] payload: the payload of the packet. The count.
+static void multicast_packet_pl_received_callback(uint key, uint payload) {
+ p_per_ts_struct.packets_this_time_step += 1;
+ log_debug("Received spike %x with payload %d at %d, DMA Busy = %d",
+ key, payload, time, dma_busy);
+
+ // cycle through the packet insertion
+ bool added = false;
+ for (uint count = payload; count > 0; count--) {
+ added = in_spikes_add_spike(key);
+ }
+ if (added) {
+ start_dma_loop();
+ }
+}
+
//! \brief Called when a DMA completes
//! \param unused: unused
//! \param[in] tag: What sort of DMA has finished?
@@ -354,7 +364,7 @@ static void dma_complete_callback(UNUSED uint unused, uint tag) {
bool plastic_only = true;
// If rewiring, do rewiring first
- for (uint32_t i = 0; i < n_rewires; i++) {
+ for (uint32_t i = n_rewires; i > 0; i--) {
if (synaptogenesis_row_restructure(time, current_buffer->row)) {
write_back = true;
plastic_only = false;
@@ -381,7 +391,7 @@ static void dma_complete_callback(UNUSED uint unused, uint tag) {
address_t row = (address_t) current_buffer->row;
for (uint32_t i = 0;
i < (current_buffer->n_bytes_transferred >> 2); i++) {
- log_error("%u: 0x%.8x", i, row[i]);
+ log_error("%u: 0x%08x", i, row[i]);
}
rt_error(RTE_SWERR);
}
@@ -431,7 +441,6 @@ void spike_processing_clear_input_buffer(timer_t time) {
// Record the count whether clearing or not for provenance
count_input_buffer_packets_late += n_spikes;
-
}
bool spike_processing_initialise( // EXPORTED
@@ -465,7 +474,7 @@ bool spike_processing_initialise( // EXPORTED
spin1_callback_on(MC_PACKET_RECEIVED,
multicast_packet_received_callback, mc_packet_callback_priority);
spin1_callback_on(MCPL_PACKET_RECEIVED,
- multicast_packet_received_callback, mc_packet_callback_priority);
+ multicast_packet_pl_received_callback, mc_packet_callback_priority);
simulation_dma_transfer_done_callback_on(
DMA_TAG_READ_SYNAPTIC_ROW, dma_complete_callback);
spin1_callback_on(USER_EVENT, user_event_callback, user_event_priority);
@@ -473,29 +482,13 @@ bool spike_processing_initialise( // EXPORTED
return true;
}
-uint32_t spike_processing_get_buffer_overflows(void) { // EXPORTED
- // Check for buffer overflow
- return in_spikes_get_n_buffer_overflows();
-}
-
-uint32_t spike_processing_get_dma_complete_count(void) {
- return dma_complete_count;
-}
-
-uint32_t spike_processing_get_spike_processing_count(void) {
- return spike_processing_count;
-}
-
-uint32_t spike_processing_get_successful_rewires(void) { // EXPORTED
- return n_successful_rewires;
-}
-
-uint32_t spike_processing_get_n_packets_dropped_from_lateness(void) { // EXPORTED
- return count_input_buffer_packets_late;
-}
-
-uint32_t spike_processing_get_max_filled_input_buffer_size(void) { // EXPORTED
- return biggest_fill_size_of_input_buffer;
+void spike_processing_store_provenance(struct spike_processing_provenance *prov) {
+ prov->n_input_buffer_overflows = in_spikes_get_n_buffer_overflows();
+ prov->n_dmas_complete = dma_complete_count;
+ prov->n_spikes_processed = spike_processing_count;
+ prov->n_rewires = n_successful_rewires;
+ prov->n_packets_dropped_from_lateness = count_input_buffer_packets_late;
+ prov->max_filled_input_buffer_size = biggest_fill_size_of_input_buffer;
}
//! \brief set the number of times spike_processing has to attempt rewiring
@@ -504,17 +497,7 @@ bool spike_processing_do_rewiring(int number_of_rewires) {
// disable interrupts
uint cpsr = spin1_int_disable();
rewires_to_do += number_of_rewires;
-
- // If we're not already processing synaptic DMAs,
- // flag pipeline as busy and trigger a feed event
- if (!dma_busy) {
- log_debug("Sending user event for rewiring");
- if (spin1_trigger_user_event(0, 0)) {
- dma_busy = true;
- } else {
- log_debug("Could not trigger user event\n");
- }
- }
+ start_dma_loop();
// enable interrupts
spin1_mode_restore(cpsr);
return true;
diff --git a/neural_modelling/src/neuron/spike_processing.h b/neural_modelling/src/neuron/spike_processing.h
index 0958e78b87..ff5a12c14c 100644
--- a/neural_modelling/src/neuron/spike_processing.h
+++ b/neural_modelling/src/neuron/spike_processing.h
@@ -24,12 +24,31 @@
#include
#include
+// Provenance for spike processing
+struct spike_processing_provenance {
+ //! A count of the times that the synaptic input circular buffers overflowed
+ uint32_t n_input_buffer_overflows;
+ //! The number of DMAs performed
+ uint32_t n_dmas_complete;
+ //! The number of spikes received and processed
+ uint32_t n_spikes_processed;
+ //! The number of rewirings performed.
+ uint32_t n_rewires;
+ //! The number of packets that were cleared at the end of timesteps
+ uint32_t n_packets_dropped_from_lateness;
+ //! The maximum size of the input buffer
+ uint32_t max_filled_input_buffer_size;
+};
+
//! \brief Initialise the spike processing system
//! \param[in] row_max_n_bytes: The maximum size of a synaptic row
//! \param[in] mc_packet_callback_priority:
//! Multicast packet receive interrupt priority
//! \param[in] user_event_priority: User event interrupt priority
//! \param[in] incoming_spike_buffer_size: Size of buffer for receiving spikes
+//! \param[in] clear_input_buffers_of_late_packets: Whether packets that are left
+//! at the end of a time step are
+//! wiped
//! \param[in] packets_per_timestep_region:
//! The recording region to use for the packets per timestep
//! \return True if initialisation succeeded
@@ -39,39 +58,17 @@ bool spike_processing_initialise(
bool clear_input_buffers_of_late_packets_init,
uint32_t packets_per_timestep_region);
-//! \brief Gets the number of times the input buffer has overflowed
-//! \return the number of times the input buffer has overflowed
-uint32_t spike_processing_get_buffer_overflows(void);
-
-//! \brief Gets the number of DMA's that were completed
-//! \return the number of DMA's that were completed.
-uint32_t spike_processing_get_dma_complete_count(void);
-
-//! \brief Gets the number of spikes that were processed
-//! \return the number of spikes that were processed
-uint32_t spike_processing_get_spike_processing_count(void);
-
-//! \brief Gets the number of successful rewires performed
-//! \return the number of successful rewires
-uint32_t spike_processing_get_successful_rewires(void);
+//! \brief Get provenance data for Spike processing
+//! \param[in] prov The structure to store the provenance data in
+void spike_processing_store_provenance(struct spike_processing_provenance *prov);
//! \brief Set the number of times spike_processing has to attempt rewiring.
//! \param[in] number_of_rewires: The number of rewirings to perform
//! \return currently always true
bool spike_processing_do_rewiring(int number_of_rewires);
-//! \brief return the number of packets dropped by the input buffer as they
-//! arrived too late to be processed
-//! \return the number of packets dropped.
-uint32_t spike_processing_get_n_packets_dropped_from_lateness(void);
-
//! \brief clears the input buffer of packets
//! \param[in] time: The current timestep
void spike_processing_clear_input_buffer(timer_t time);
-//! \brief returns how many packets were at max inside the input buffer at
-//! any given point.
-//! \return the max size the input buffer reached
-uint32_t spike_processing_get_max_filled_input_buffer_size(void);
-
#endif // _SPIKE_PROCESSING_H_
diff --git a/neural_modelling/src/neuron/spike_processing_fast.c b/neural_modelling/src/neuron/spike_processing_fast.c
new file mode 100644
index 0000000000..b7fa876f4b
--- /dev/null
+++ b/neural_modelling/src/neuron/spike_processing_fast.c
@@ -0,0 +1,611 @@
+/*
+ * Copyright (c) 2020 The University of Manchester
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#include "spike_processing_fast.h"
+#include "population_table/population_table.h"
+#include "synapses.h"
+#include "plasticity/synapse_dynamics.h"
+#include "structural_plasticity/synaptogenesis_dynamics.h"
+#include "dma_common.h"
+#include
+#include
+#include
+#include
+#include
+
+//! DMA buffer structure combines the row read from SDRAM with information
+//! about the read.
+typedef struct dma_buffer {
+ //! Address in SDRAM to write back plastic region to
+ synaptic_row_t sdram_writeback_address;
+
+ //! \brief Key of originating spike
+ //! \details used to allow row data to be re-used for multiple spikes
+ spike_t originating_spike;
+
+ //! Number of bytes transferred in the read
+ uint32_t n_bytes_transferred;
+
+ //! Row data
+ synaptic_row_t row;
+} dma_buffer;
+
+//! The number of DMA Buffers to use
+#define N_DMA_BUFFERS 2
+
+//! Mask to apply to perform modulo on the DMA buffer index
+#define DMA_BUFFER_MOD_MASK 0x1
+
+//! The DTCM buffers for the synapse rows
+static dma_buffer dma_buffers[N_DMA_BUFFERS];
+
+//! The index of the next buffer to be filled by a DMA
+static uint32_t next_buffer_to_fill;
+
+//! The index of the buffer currently being filled by a DMA read
+static uint32_t next_buffer_to_process;
+
+//! \brief How many packets were lost from the input buffer because of
+//! late arrival
+static uint32_t count_input_buffer_packets_late;
+
+//! tracker of how full the input buffer got.
+static uint32_t biggest_fill_size_of_input_buffer;
+
+//! \brief Whether if we should clear packets from the input buffer at the
+//! end of a timer tick.
+static bool clear_input_buffers_of_late_packets;
+
+//! The number of CPU cycles taken to transfer spikes (measured later)
+static uint32_t clocks_to_transfer = 0;
+
+//! The number of successful rewiring attempts
+static uint32_t n_successful_rewires = 0;
+
+//! The number of DMAs successfully completed
+static uint32_t dma_complete_count = 0;
+
+//! The number of spikes successfully processed
+static uint32_t spike_processing_count = 0;
+
+//! The maximum number of spikes received in a time step
+static uint32_t max_spikes_received = 0;
+
+//! The number of spikes processed this time step
+static uint32_t spikes_processed_this_time_step = 0;
+
+//! The maximum number of spikes processed in a time step
+static uint32_t max_spikes_processed = 0;
+
+//! The number of times the transfer ran to the next time step
+static uint32_t transfer_timer_overruns = 0;
+
+//! The maximum overrun of the timer tick
+static uint32_t max_transfer_timer_overrun = 0;
+
+//! The number of times the timer tick was skipped entirely
+static uint32_t skipped_time_steps = 0;
+
+//! The number of packets received this time step for recording
+static struct {
+ uint32_t time;
+ uint32_t packets_this_time_step;
+} p_per_ts_struct;
+
+//! the region to record the packets per time step in
+static uint32_t p_per_ts_region;
+
+//! Where synaptic input is to be written
+static struct sdram_config sdram_inputs;
+
+//! Key configuration to detect local neuron spikes
+static struct key_config key_config;
+
+//! The ring buffers to use
+static weight_t *ring_buffers;
+
+//! \brief Determine if this is the end of the time step
+//! \return True if end of time step
+static inline bool is_end_of_time_step(void) {
+ return tc[T2_COUNT] == 0;
+}
+
+//! \brief Clear end of time step so it can be detected again
+static inline void clear_end_of_time_step(void) {
+ tc[T2_INT_CLR] = 1;
+}
+
+//! \brief Wait for a DMA to complete or the end of a time step, whichever
+//! happens first.
+//! \return True if the DMA is completed first, False if the time step ended first
+static inline bool wait_for_dma_to_complete_or_end(void) {
+#if LOG_LEVEL >= LOG_DEBUG
+ // Useful for checking when things are going wrong, but shouldn't be
+ // needed in normal code
+ uint32_t n_loops = 0;
+ while (!is_end_of_time_step() && !dma_done() && n_loops < 10000) {
+ n_loops++;
+ }
+ if (!is_end_of_time_step() && !dma_done()) {
+ log_error("Timeout on DMA loop: DMA stat = 0x%08x!", dma[DMA_STAT]);
+ rt_error(RTE_SWERR);
+ }
+#else
+ // This is the normal loop, done without checking
+ while (!dma_done()) {
+ continue;
+ }
+#endif
+ dma[DMA_CTRL] = 0x8;
+
+ return !is_end_of_time_step();
+}
+
+//! \brief Transfer the front of the ring buffers to SDRAM to be read by the
+//! neuron core at the next time step.
+//! \param[in] time The current time step being executed.
+static inline void transfer_buffers(uint32_t time) {
+ uint32_t first_ring_buffer = synapse_row_get_first_ring_buffer_index(
+ time + 1, synapse_type_index_bits, synapse_delay_mask);
+ log_debug("Writing %d bytes to 0x%08x from ring buffer %d at 0x%08x",
+ sdram_inputs.size_in_bytes, sdram_inputs.address, first_ring_buffer,
+ &ring_buffers[first_ring_buffer]);
+ do_fast_dma_write(&ring_buffers[first_ring_buffer], sdram_inputs.address,
+ sdram_inputs.size_in_bytes);
+}
+
+//! \brief Do processing related to the end of the time step
+//! \param[in] time The time step that is ending.
+static inline void process_end_of_time_step(uint32_t time) {
+ // Stop interrupt processing
+ uint32_t cspr = spin1_int_disable();
+
+ cancel_dmas();
+
+ // Start transferring buffer data for next time step
+ transfer_buffers(time);
+ wait_for_dma_to_complete();
+
+ // uint32_t end = tc[T1_COUNT];
+ if (tc[T1_MASK_INT]) {
+ transfer_timer_overruns++;
+ uint32_t diff = tc[T1_LOAD] - tc[T1_COUNT];
+ if (diff > max_transfer_timer_overrun) {
+ max_transfer_timer_overrun = diff;
+ }
+ }
+
+ spin1_mode_restore(cspr);
+}
+
+//! \brief Read a synaptic row from SDRAM into a local buffer.
+static inline void read_synaptic_row(spike_t spike, synaptic_row_t row,
+ uint32_t n_bytes) {
+ dma_buffer *buffer = &dma_buffers[next_buffer_to_fill];
+ buffer->sdram_writeback_address = row;
+ buffer->originating_spike = spike;
+ buffer->n_bytes_transferred = n_bytes;
+ do_fast_dma_read(row, buffer->row, n_bytes);
+ next_buffer_to_fill = (next_buffer_to_fill + 1) & DMA_BUFFER_MOD_MASK;
+}
+
+//! \brief Get the next spike, keeping track of provenance data
+//! \param[in] time Simulation time step
+//! \param[out] spike Pointer to receive the next spike
+//! \return True if a spike was retrieved
+static inline bool get_next_spike(uint32_t time, spike_t *spike) {
+ uint32_t n_spikes = in_spikes_size();
+ if (biggest_fill_size_of_input_buffer < n_spikes) {
+ biggest_fill_size_of_input_buffer = n_spikes;
+ }
+ if (!in_spikes_get_next_spike(spike)) {
+ return false;
+ }
+ // Detect a looped back spike
+ if ((*spike & key_config.mask) == key_config.key) {
+ synapse_dynamics_process_post_synaptic_event(
+ time, *spike & key_config.spike_id_mask);
+ return key_config.self_connected;
+ }
+ return true;
+}
+
+//! \brief Start the first DMA after awaking from spike reception. Loops over
+//! available spikes until one causes a DMA.
+//! \param[in] time Simulation time step
+//! \param[in/out] spike Starts as the first spike received, but might change
+//! if the first spike doesn't cause a DMA
+//! \return True if a DMA was started
+static inline bool start_first_dma(uint32_t time, spike_t *spike) {
+ synaptic_row_t row;
+ uint32_t n_bytes;
+
+ do {
+ if (population_table_get_first_address(*spike, &row, &n_bytes)) {
+ read_synaptic_row(*spike, row, n_bytes);
+ return true;
+ }
+ } while (!is_end_of_time_step() && get_next_spike(time, spike));
+
+ return false;
+}
+
+//! \brief Get the details for the next DMA, but don't start it.
+//! \param[in] time Simulation time step
+//! \param[out] spike Pointer to receive the spike the DMA relates to
+//! \param[out] row Pointer to receive the address to be transferred
+//! \param[out] n_bytes Pointer to receive the number of bytes to transfer
+//! \return True if there is a DMA to do
+static inline bool get_next_dma(uint32_t time, spike_t *spike, synaptic_row_t *row,
+ uint32_t *n_bytes) {
+ if (population_table_is_next() && population_table_get_next_address(
+ spike, row, n_bytes)) {
+ return true;
+ }
+
+ while (!is_end_of_time_step() && get_next_spike(time, spike)) {
+ if (population_table_get_first_address(*spike, row, n_bytes)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+//! \brief Handle a synapse processing error.
+//! \param[in] buffer The DMA buffer that was being processed
+static inline void handle_row_error(dma_buffer *buffer) {
+ log_error(
+ "Error processing spike 0x%.8x for address 0x%.8x (local=0x%.8x)",
+ buffer->originating_spike, buffer->sdram_writeback_address, buffer->row);
+
+ // Print out the row for debugging
+ address_t row = (address_t) buffer->row;
+ for (uint32_t i = 0; i < (buffer->n_bytes_transferred >> 2); i++) {
+ log_error(" %u: 0x%08x", i, row[i]);
+ }
+
+ // Print out parsed data for static synapses
+ synapse_row_fixed_part_t *fixed_region = synapse_row_fixed_region(buffer->row);
+ uint32_t *synaptic_words = synapse_row_fixed_weight_controls(fixed_region);
+ uint32_t fixed_synapse = synapse_row_num_fixed_synapses(fixed_region);
+ log_error("\nFixed-Fixed Region (%u synapses):", fixed_synapse);
+ for (; fixed_synapse > 0; fixed_synapse--) {
+ uint32_t synaptic_word = *synaptic_words++;
+
+ uint32_t delay = synapse_row_sparse_delay(
+ synaptic_word, synapse_type_index_bits, synapse_delay_mask);
+ uint32_t type = synapse_row_sparse_type(
+ synaptic_word, synapse_index_bits, synapse_type_mask);
+ uint32_t neuron = synapse_row_sparse_index(
+ synaptic_word, synapse_index_mask);
+ log_error(" Delay %u, Synapse Type %u, Neuron %u", delay, type, neuron);
+ }
+ rt_error(RTE_SWERR);
+}
+
+//! \brief Process a row that has been transferred
+//! \param[in] time The current time step of the simulation
+//! \param[in] dma_in_progress Whether there was a DMA started and not checked
+static inline void process_current_row(uint32_t time, bool dma_in_progress) {
+ bool write_back = false;
+ dma_buffer *buffer = &dma_buffers[next_buffer_to_process];
+
+ if (!synapses_process_synaptic_row(time, buffer->row, &write_back)) {
+ handle_row_error(buffer);
+ }
+ synaptogenesis_spike_received(time, buffer->originating_spike);
+ spike_processing_count++;
+ if (write_back) {
+ uint32_t n_bytes = synapse_row_plastic_size(buffer->row) * sizeof(uint32_t);
+ void *system_address = synapse_row_plastic_region(
+ buffer->sdram_writeback_address);
+ void *tcm_address = synapse_row_plastic_region(buffer->row);
+ // Make sure an outstanding DMA is completed before starting this one
+ if (dma_in_progress) {
+ wait_for_dma_to_complete();
+ }
+ do_fast_dma_write(tcm_address, system_address, n_bytes);
+ // Only wait for this DMA to complete if there isn't another running,
+ // as otherwise the next wait will fail!
+ if (!dma_in_progress) {
+ wait_for_dma_to_complete();
+ }
+ }
+ next_buffer_to_process = (next_buffer_to_process + 1) & DMA_BUFFER_MOD_MASK;
+ spikes_processed_this_time_step++;
+}
+
+//! \brief Store data for provenance and recordings
+//! \param[in] time The time step of the simulation
+static inline void store_data(uint32_t time) {
+ // Record the number of packets still left
+ count_input_buffer_packets_late += in_spikes_size();
+
+ // Record the number of packets received last time step
+ p_per_ts_struct.time = time;
+ recording_record(p_per_ts_region, &p_per_ts_struct, sizeof(p_per_ts_struct));
+
+ if (p_per_ts_struct.packets_this_time_step > max_spikes_received) {
+ max_spikes_received = p_per_ts_struct.packets_this_time_step;
+ }
+ if (spikes_processed_this_time_step > max_spikes_processed) {
+ max_spikes_processed = spikes_processed_this_time_step;
+ }
+}
+
+//! \brief Measure how long it takes to transfer buffers
+static inline void measure_transfer_time(void) {
+ // Measure the time to do an upload to know when to schedule the timer
+ tc[T2_LOAD] = 0xFFFFFFFF;
+ tc[T2_CONTROL] = 0x82;
+ transfer_buffers(0);
+ wait_for_dma_to_complete();
+ clocks_to_transfer = (0xFFFFFFFF - tc[T2_COUNT])
+ + sdram_inputs.time_for_transfer_overhead;
+ tc[T2_CONTROL] = 0;
+ log_info("Transfer of %u bytes to 0x%08x took %u cycles",
+ sdram_inputs.size_in_bytes, sdram_inputs.address, clocks_to_transfer);
+}
+
+//! \brief Prepare the start of a time step
+//! \param[in] time The time step being executed
+//! \return Whether we should proceed or not
+static inline bool prepare_timestep(uint32_t time) {
+ uint32_t cspr = spin1_int_disable();
+
+ // Reset these to ensure consistency
+ next_buffer_to_fill = 0;
+ next_buffer_to_process = 0;
+
+ // We do this here rather than during init, as it should have similar
+ // contention to the expected time of execution
+ if (clocks_to_transfer == 0) {
+ measure_transfer_time();
+ }
+
+ // Start timer2 to tell us when to stop
+ uint32_t timer = tc[T1_COUNT];
+ if (timer < clocks_to_transfer) {
+ return false;
+ }
+ uint32_t time_until_stop = timer - clocks_to_transfer;
+ tc[T2_CONTROL] = 0;
+ tc[T2_LOAD] = time_until_stop;
+ tc[T2_CONTROL] = 0xe3;
+
+ log_debug("Start of time step %d, timer = %d, loading with %d",
+ time, timer, time_until_stop);
+
+ // Store recording data from last time step
+ store_data(time);
+
+ // Clear the buffer if needed
+ if (clear_input_buffers_of_late_packets) {
+ in_spikes_clear();
+ }
+ p_per_ts_struct.packets_this_time_step = 0;
+ spikes_processed_this_time_step = 0;
+
+ synapses_flush_ring_buffers(time);
+ spin1_mode_restore(cspr);
+ return true;
+}
+
+//! \brief Perform synaptic rewiring for this time step
+//! \param[in] time The current time step
+//! \param[in] n_rewires The number of rewirings to try
+static inline void do_rewiring(uint32_t time, uint32_t n_rewires) {
+ uint32_t spike;
+ synaptic_row_t row;
+ uint32_t n_bytes;
+
+ uint32_t current_buffer = 0;
+ uint32_t next_buffer = 0;
+ bool dma_in_progress = false;
+
+ // Start the first transfer
+ uint32_t rewires_to_go = n_rewires;
+ while (rewires_to_go > 0 && !dma_in_progress) {
+ if (synaptogenesis_dynamics_rewire(time, &spike, &row, &n_bytes)) {
+ dma_buffers[next_buffer].sdram_writeback_address = row;
+ dma_buffers[next_buffer].n_bytes_transferred = n_bytes;
+ do_fast_dma_read(row, dma_buffers[next_buffer].row, n_bytes);
+ next_buffer = (next_buffer + 1) & DMA_BUFFER_MOD_MASK;
+ dma_in_progress = true;
+ }
+ rewires_to_go--;
+ }
+
+ // Go in a loop until all done
+ while (dma_in_progress) {
+
+ // Start the next DMA if possible
+ dma_in_progress = false;
+ while (rewires_to_go > 0 && !dma_in_progress) {
+ if (synaptogenesis_dynamics_rewire(time, &spike, &row, &n_bytes)) {
+ dma_in_progress = true;
+ }
+ rewires_to_go--;
+ }
+
+ // Wait for the last DMA to complete
+ wait_for_dma_to_complete();
+
+ // Start the next DMA read
+ if (dma_in_progress) {
+ dma_buffers[next_buffer].sdram_writeback_address = row;
+ dma_buffers[next_buffer].n_bytes_transferred = n_bytes;
+ do_fast_dma_read(row, dma_buffers[next_buffer].row, n_bytes);
+ next_buffer = (next_buffer + 1) & DMA_BUFFER_MOD_MASK;
+ }
+
+ // If the row has been restructured, transfer back to SDRAM
+ if (synaptogenesis_row_restructure(
+ time, dma_buffers[current_buffer].row)) {
+ n_successful_rewires++;
+ if (dma_in_progress) {
+ wait_for_dma_to_complete();
+ }
+ do_fast_dma_write(
+ dma_buffers[current_buffer].row,
+ dma_buffers[current_buffer].sdram_writeback_address,
+ dma_buffers[current_buffer].n_bytes_transferred);
+ if (!dma_in_progress) {
+ wait_for_dma_to_complete();
+ }
+ }
+ current_buffer = (current_buffer + 1) & DMA_BUFFER_MOD_MASK;
+ }
+}
+
+void spike_processing_fast_time_step_loop(uint32_t time, uint32_t n_rewires) {
+
+ // Prepare for the start
+ if (!prepare_timestep(time)) {
+ skipped_time_steps++;
+ process_end_of_time_step(time);
+ return;
+ }
+
+ // Do rewiring
+ do_rewiring(time, n_rewires);
+
+ // Loop until the end of a time step is reached
+ while (true) {
+
+ // Wait for a spike, or the timer to expire
+ uint32_t spike;
+ while (!is_end_of_time_step() && !get_next_spike(time, &spike)) {
+ // This doesn't wait for interrupt currently because there isn't
+ // a way to have a T2 interrupt without a callback function, and
+ // a callback function is too slow! This is therefore a busy wait.
+ // wait_for_interrupt();
+ }
+
+ // If the timer has gone off, that takes precedence
+ if (is_end_of_time_step()) {
+ clear_end_of_time_step();
+ process_end_of_time_step(time);
+ return;
+ }
+
+ // There must be a spike! Start a DMA processing loop...
+ bool dma_in_progress = start_first_dma(time, &spike);
+ while (dma_in_progress && !is_end_of_time_step()) {
+
+ // See if there is another DMA to do
+ synaptic_row_t row;
+ uint32_t n_bytes;
+ dma_in_progress = get_next_dma(time, &spike, &row, &n_bytes);
+
+ // Finish the current DMA before starting the next
+ if (!wait_for_dma_to_complete_or_end()) {
+ count_input_buffer_packets_late += 1;
+ break;
+ }
+ dma_complete_count++;
+ if (dma_in_progress) {
+ read_synaptic_row(spike, row, n_bytes);
+ }
+
+ // Process the row we already have while the DMA progresses
+ process_current_row(time, dma_in_progress);
+ }
+ }
+}
+
+//! \brief Called when a multicast packet is received
+//! \param[in] key: The key of the packet. The spike.
+//! \param payload: the payload of the packet. The count.
+void multicast_packet_received_callback(uint key, UNUSED uint unused) {
+ log_debug("Received spike %x", key);
+ p_per_ts_struct.packets_this_time_step++;
+ in_spikes_add_spike(key);
+}
+
+//! \brief Called when a multicast packet is received
+//! \param[in] key: The key of the packet. The spike.
+//! \param payload: the payload of the packet. The count.
+void multicast_packet_pl_received_callback(uint key, uint payload) {
+ log_debug("Received spike %x with payload %d", key, payload);
+ p_per_ts_struct.packets_this_time_step++;
+
+ // cycle through the packet insertion
+ for (uint count = payload; count > 0; count--) {
+ in_spikes_add_spike(key);
+ }
+}
+
+bool spike_processing_fast_initialise(
+ uint32_t row_max_n_words, uint32_t spike_buffer_size,
+ bool discard_late_packets, uint32_t pkts_per_ts_rec_region,
+ uint32_t multicast_priority, struct sdram_config sdram_inputs_param,
+ struct key_config key_config_param, weight_t *ring_buffers_param) {
+ // Allocate the DMA buffers
+ for (uint32_t i = 0; i < N_DMA_BUFFERS; i++) {
+ dma_buffers[i].row = spin1_malloc(row_max_n_words * sizeof(uint32_t));
+ if (dma_buffers[i].row == NULL) {
+ log_error("Could not initialise DMA buffers");
+ return false;
+ }
+ log_debug("DMA buffer %u allocated at 0x%08x",
+ i, dma_buffers[i].row);
+ }
+ next_buffer_to_fill = 0;
+ next_buffer_to_process = 0;
+
+ // Allocate incoming spike buffer
+ if (!in_spikes_initialize_spike_buffer(spike_buffer_size)) {
+ return false;
+ }
+
+ // Store parameters and data
+ clear_input_buffers_of_late_packets = discard_late_packets;
+ p_per_ts_region = pkts_per_ts_rec_region;
+ sdram_inputs = sdram_inputs_param;
+ key_config = key_config_param;
+ ring_buffers = ring_buffers_param;
+
+ // Configure for multicast reception
+ spin1_callback_on(MC_PACKET_RECEIVED, multicast_packet_received_callback,
+ multicast_priority);
+ spin1_callback_on(MCPL_PACKET_RECEIVED, multicast_packet_pl_received_callback,
+ multicast_priority);
+
+ // Wipe the inputs using word writes
+ for (uint32_t i = 0; i < (sdram_inputs.size_in_bytes >> 2); i++) {
+ sdram_inputs.address[i] = 0;
+ }
+
+ return true;
+}
+
+void spike_processing_fast_store_provenance(
+ struct spike_processing_fast_provenance *prov) {
+ prov->n_input_buffer_overflows = in_spikes_get_n_buffer_overflows();
+ prov->n_dmas_complete = dma_complete_count;
+ prov->n_spikes_processed = spike_processing_count;
+ prov->n_rewires = n_successful_rewires;
+ prov->n_packets_dropped_from_lateness = count_input_buffer_packets_late;
+ prov->max_filled_input_buffer_size = biggest_fill_size_of_input_buffer;
+ prov->max_spikes_processed = max_spikes_processed;
+ prov->max_spikes_received = max_spikes_received;
+ prov->n_transfer_timer_overruns = transfer_timer_overruns;
+ prov->n_skipped_time_steps = skipped_time_steps;
+ prov->max_transfer_timer_overrun = max_transfer_timer_overrun;
+}
diff --git a/neural_modelling/src/neuron/spike_processing_fast.h b/neural_modelling/src/neuron/spike_processing_fast.h
new file mode 100644
index 0000000000..e1468bb106
--- /dev/null
+++ b/neural_modelling/src/neuron/spike_processing_fast.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2020 The University of Manchester
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+//! \file
+//! \brief Spike processing fast API
+#ifndef _SPIKE_PROCESSING_FAST_H_
+#define _SPIKE_PROCESSING_FAST_H_
+
+#include
+#include
+#include
+#include "synapse_row.h"
+
+//! A region of SDRAM used to transfer synapses
+struct sdram_config {
+ //! The address of the input data to be transferred
+ uint32_t *address;
+ //! The size of the input data to be transferred
+ uint32_t size_in_bytes;
+ //! The time of the transfer in us
+ uint32_t time_for_transfer_overhead;
+};
+
+//! The key and mask being used to send spikes from neurons processed on this
+//! core.
+struct key_config {
+ //! The key
+ uint32_t key;
+ //! The mask
+ uint32_t mask;
+ //! The mask to get the spike ID
+ uint32_t spike_id_mask;
+ //! Is the node self connected
+ uint32_t self_connected;
+};
+
+// Provenance for spike processing
+struct spike_processing_fast_provenance {
+ //! A count of the times that the synaptic input circular buffers overflowed
+ uint32_t n_input_buffer_overflows;
+ //! The number of DMAs performed
+ uint32_t n_dmas_complete;
+ //! The number of spikes received and processed
+ uint32_t n_spikes_processed;
+ //! The number of rewirings performed.
+ uint32_t n_rewires;
+ //! The number of packets that were cleared at the end of timesteps
+ uint32_t n_packets_dropped_from_lateness;
+ //! The maximum size of the input buffer
+ uint32_t max_filled_input_buffer_size;
+ //! The maximum number of spikes received in a time step
+ uint32_t max_spikes_received;
+ //! The maximum number of spikes processed in a time step
+ uint32_t max_spikes_processed;
+ //! The number of times the transfer took longer than expected
+ uint32_t n_transfer_timer_overruns;
+ //! The number of times a time step was skipped entirely
+ uint32_t n_skipped_time_steps;
+ //! The maximum additional time taken to transfer
+ uint32_t max_transfer_timer_overrun;
+
+};
+
+//! \brief Set up spike processing
+//! \param[in] row_max_n_words The maximum row length in words
+//! \param[in] spike_buffer_size The size to make the spike buffer
+//! \param[in] discard_late_packets Whether to throw away packets not processed
+//! at the end of a time step or keep them for
+//! the next time step
+//! \param[in] pkts_per_ts_rec_region The ID of the recording region to record
+//! packets-per-time-step to
+//! \param[in] multicast_priority The priority of multicast processing
+//! \param[in] sdram_inputs_param Details of the SDRAM transfer for the ring buffers
+//! \param[in] key_config_param Details of the key used by the neuron core
+//! \param[in] ring_buffers_param The ring buffers to update with synapse weights
+//! \return Whether the setup was successful or not
+bool spike_processing_fast_initialise(
+ uint32_t row_max_n_words, uint32_t spike_buffer_size,
+ bool discard_late_packets, uint32_t pkts_per_ts_rec_region,
+ uint32_t multicast_priority, struct sdram_config sdram_inputs_param,
+ struct key_config key_config_param, weight_t *ring_buffers_param);
+
+//! \brief The main loop of spike processing to be run once per time step.
+//! Note that this function will not return until the end of the time
+//! step; it will only be interrupted by SDP or MC packets.
+//! \param[in] time The time step of the simulation
+//! \param[in] n_rewires The number of rewiring attempts to be done
+void spike_processing_fast_time_step_loop(uint32_t time, uint32_t n_rewires);
+
+//! \brief Store any provenance data gathered from spike processing
+//! \param[in] prov The structure to store the provenance data in
+void spike_processing_fast_store_provenance(
+ struct spike_processing_fast_provenance *prov);
+
+#endif // _SPIKE_PROCESSING_FAST_H_
diff --git a/neural_modelling/src/neuron/structural_plasticity/synaptogenesis/sp_structs.h b/neural_modelling/src/neuron/structural_plasticity/synaptogenesis/sp_structs.h
index 4f9e831c31..275cc33c8f 100644
--- a/neural_modelling/src/neuron/structural_plasticity/synaptogenesis/sp_structs.h
+++ b/neural_modelling/src/neuron/structural_plasticity/synaptogenesis/sp_structs.h
@@ -289,9 +289,9 @@ static inline uint8_t *sp_structs_read_in_common(
for (uint32_t i=0; i < n_elements; i++){
log_debug("index %d, pop index %d, sub pop index %d, neuron_index %d",
- i, post_to_pre_table[i]->pop_index,
- post_to_pre_table[i]->sub_pop_index,
- post_to_pre_table[i]->neuron_index);
+ i, (*post_to_pre_table)[i].pop_index,
+ (*post_to_pre_table)[i].sub_pop_index,
+ (*post_to_pre_table)[i].neuron_index);
}
data += n_elements * sizeof(post_to_pre_entry);
return (uint8_t *) data;
diff --git a/neural_modelling/src/neuron/structural_plasticity/synaptogenesis/topographic_map_impl.c b/neural_modelling/src/neuron/structural_plasticity/synaptogenesis/topographic_map_impl.c
index fde49cc02f..a5a9b30bb5 100644
--- a/neural_modelling/src/neuron/structural_plasticity/synaptogenesis/topographic_map_impl.c
+++ b/neural_modelling/src/neuron/structural_plasticity/synaptogenesis/topographic_map_impl.c
@@ -90,6 +90,10 @@ typedef struct structural_recording_values_t {
structural_recording_values_t structural_recording_values;
+
+//! Timer callbacks since last rewiring
+static uint32_t last_rewiring_time = 0;
+
void print_post_to_pre_entry(void) {
uint32_t n_elements =
rewiring_data.s_max * rewiring_data.machine_no_atoms;
@@ -349,11 +353,11 @@ static inline bool row_restructure(
return false;
}
} else {
- // A synapse cannot be added if one exists between the current pair of neurons
- if (!synapse_dynamics_find_neuron(
- current_state->post_syn_id, row,
- &(current_state->weight), &(current_state->delay),
- &(current_state->offset), &(current_state->synapse_type))) {
+ // A synapse cannot be added if one exists between the current pair of neurons
+ if (!synapse_dynamics_find_neuron(
+ current_state->post_syn_id, row,
+ &(current_state->weight), &(current_state->delay),
+ &(current_state->offset), &(current_state->synapse_type))) {
if (synaptogenesis_formation_rule(current_state,
formation_params[current_state->post_to_pre.pop_index], time, row)) {
// Create recorded value
@@ -388,14 +392,20 @@ bool synaptogenesis_row_restructure(uint32_t time, synaptic_row_t row) {
return return_value;
}
-int32_t synaptogenesis_rewiring_period(void) {
- return rewiring_data.p_rew;
+void synaptogenesis_spike_received(uint32_t time, spike_t spike) {
+ partner_spike_received(time, spike);
}
-bool synaptogenesis_is_fast(void) {
- return rewiring_data.fast == 1;
-}
+uint32_t synaptogenesis_n_updates(void) {
+ if (rewiring_data.fast) {
+ return rewiring_data.p_rew;
+ }
-void synaptogenesis_spike_received(uint32_t time, spike_t spike) {
- partner_spike_received(time, spike);
+ last_rewiring_time++;
+ if (last_rewiring_time >= rewiring_data.p_rew) {
+ last_rewiring_time = 0;
+ return 1;
+ }
+
+ return 0;
}
diff --git a/neural_modelling/src/neuron/structural_plasticity/synaptogenesis_dynamics.h b/neural_modelling/src/neuron/structural_plasticity/synaptogenesis_dynamics.h
index 13aba04dcf..ae1f1ca476 100644
--- a/neural_modelling/src/neuron/structural_plasticity/synaptogenesis_dynamics.h
+++ b/neural_modelling/src/neuron/structural_plasticity/synaptogenesis_dynamics.h
@@ -51,24 +51,16 @@ bool synaptogenesis_dynamics_rewire(uint32_t time,
//! \return True if the row was changed and needs to be written back
bool synaptogenesis_row_restructure(uint32_t time, synaptic_row_t row);
-//! \brief Get the period of rewiring
-//! \return Based on synaptogenesis_is_fast(), this can either be how many times
-//! rewiring happens in a timestep, or how many timesteps have to pass until
-//! rewiring happens.
-int32_t synaptogenesis_rewiring_period(void);
-
-//! \brief Get whether rewiring is attempted multiple times per timestep
-//! or after a number of timesteps.
-//! \return true if the result of synaptogenesis_rewiring_period() is the number
-//! of attempts to try per timestep.
-bool synaptogenesis_is_fast(void);
-
//! \brief Indicates that a spike has been received
//! \param[in] time: The time that the spike was received at
//! \param[in] spike: The received spike
void synaptogenesis_spike_received(uint32_t time, spike_t spike);
-//! Print a certain data object
+//! \brief Number of updates to do of synaptogenesis this time step
+//! \return The number of updates to do this time step
+uint32_t synaptogenesis_n_updates(void);
+
+//! \brief Print a certain data object
void print_post_to_pre_entry(void);
#endif // _SYNAPTOGENESIS_DYNAMICS_H_
diff --git a/neural_modelling/src/neuron/structural_plasticity/synaptogenesis_dynamics_static_impl.c b/neural_modelling/src/neuron/structural_plasticity/synaptogenesis_dynamics_static_impl.c
index 8edba3eab3..78bb72518a 100644
--- a/neural_modelling/src/neuron/structural_plasticity/synaptogenesis_dynamics_static_impl.c
+++ b/neural_modelling/src/neuron/structural_plasticity/synaptogenesis_dynamics_static_impl.c
@@ -42,15 +42,11 @@ bool synaptogenesis_row_restructure(
return false;
}
-int32_t synaptogenesis_rewiring_period(void) {
- return -1;
-}
-
-bool synaptogenesis_is_fast(void) {
- return false;
+void synaptogenesis_spike_received(UNUSED uint32_t time, UNUSED spike_t spike) {
}
-void synaptogenesis_spike_received(UNUSED uint32_t time, UNUSED spike_t spike) {
+uint32_t synaptogenesis_n_updates(void) {
+ return 0;
}
void print_post_to_pre_entry(void) {
diff --git a/neural_modelling/src/neuron/synapse_row.h b/neural_modelling/src/neuron/synapse_row.h
index 979a0fbe04..0a627dd65f 100644
--- a/neural_modelling/src/neuron/synapse_row.h
+++ b/neural_modelling/src/neuron/synapse_row.h
@@ -96,15 +96,6 @@
#define SYNAPSE_WEIGHT_BITS 16
#endif
-//! how many bits the synapse delay will take
-#ifndef SYNAPSE_DELAY_BITS
-#define SYNAPSE_DELAY_BITS 4
-#endif
-
-// Create some masks based on the number of bits
-//! the mask for the synapse delay in the row
-#define SYNAPSE_DELAY_MASK ((1 << SYNAPSE_DELAY_BITS) - 1)
-
#ifdef SYNAPSE_WEIGHTS_SIGNED
//! Define the type of the weights
typedef __int_t(SYNAPSE_WEIGHT_BITS) weight_t;
@@ -229,8 +220,8 @@ static inline index_t synapse_row_sparse_type_index(
//! Number of bits for the synapse type and index (depends on type)
//! \return the delay
static inline index_t synapse_row_sparse_delay(
- uint32_t x, uint32_t synapse_type_index_bits) {
- return (x >> synapse_type_index_bits) & SYNAPSE_DELAY_MASK;
+ uint32_t x, uint32_t synapse_type_index_bits, uint32_t synapse_delay_mask) {
+ return (x >> synapse_type_index_bits) & synapse_delay_mask;
}
//! \brief Get the weight from an encoded synapse descriptor
@@ -240,4 +231,73 @@ static inline weight_t synapse_row_sparse_weight(uint32_t x) {
return x >> (32 - SYNAPSE_WEIGHT_BITS);
}
+//! \brief Converts a weight stored in a synapse row to an input
+//! \param[in] weight: the weight to convert in synapse-row form
+//! \param[in] left_shift: the shift to use when decoding
+//! \return the actual input weight for the model
+static inline input_t synapse_row_convert_weight_to_input(
+ weight_t weight, uint32_t left_shift) {
+ union {
+ int_k_t input_type;
+ s1615 output_type;
+ } converter;
+
+ converter.input_type = (int_k_t) (weight) << left_shift;
+
+ return converter.output_type;
+}
+
+//! \brief Get the index of the ring buffer for a given timestep, synapse type
+//! and neuron index
+//! \param[in] simulation_timestep:
+//! \param[in] synapse_type_index:
+//! \param[in] neuron_index:
+//! \param[in] synapse_type_index_bits:
+//! \param[in] synapse_index_bits:
+//! \return Index into the ring buffer
+static inline index_t synapse_row_get_ring_buffer_index(
+ uint32_t simulation_timestep, uint32_t synapse_type_index,
+ uint32_t neuron_index, uint32_t synapse_type_index_bits,
+ uint32_t synapse_index_bits, uint32_t synapse_delay_mask) {
+ return ((simulation_timestep & synapse_delay_mask) << synapse_type_index_bits)
+ | (synapse_type_index << synapse_index_bits)
+ | neuron_index;
+}
+
+//! \brief Get the index of the ring buffer for time 0, synapse type
+//! and neuron index
+//! \param[in] synapse_type_index:
+//! \param[in] neuron_index:
+//! \param[in] synapse_index_bits:
+//! \return Index into the ring buffer
+static inline index_t synapse_row_get_ring_buffer_index_time_0(
+ uint32_t synapse_type_index, uint32_t neuron_index,
+ uint32_t synapse_index_bits) {
+ return (synapse_type_index << synapse_index_bits) | neuron_index;
+}
+
+//! \brief Get the index of the first ring buffer for a given timestep
+//! \param[in] simulation_timestep:
+//! \param[in] synapse_type_index_bits:
+//! \return Index into the ring buffer
+static inline index_t synapse_row_get_first_ring_buffer_index(
+ uint32_t simulation_timestep, uint32_t synapse_type_index_bits,
+ int32_t synapse_delay_mask) {
+ return (simulation_timestep & synapse_delay_mask) << synapse_type_index_bits;
+}
+
+//! \brief Get the index of the ring buffer for a given timestep and combined
+//! synapse type and neuron index (as stored in a synapse row)
+//! \param[in] simulation_timestep:
+//! \param[in] combined_synapse_neuron_index:
+//! \param[in] synapse_type_index_bits:
+//! \return Index into the ring buffer
+static inline index_t synapse_row_get_ring_buffer_index_combined(
+ uint32_t simulation_timestep,
+ uint32_t combined_synapse_neuron_index,
+ uint32_t synapse_type_index_bits, uint32_t synapse_delay_mask) {
+ return ((simulation_timestep & synapse_delay_mask) << synapse_type_index_bits)
+ | combined_synapse_neuron_index;
+}
+
#endif // SYNAPSE_ROW_H
diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c
index 545dd02a04..c8d1a5840d 100644
--- a/neural_modelling/src/neuron/synapses.c
+++ b/neural_modelling/src/neuron/synapses.c
@@ -46,6 +46,9 @@ static weight_t *ring_buffers;
//! Ring buffer size
static uint32_t ring_buffer_size;
+//! Ring buffer mask
+static uint32_t ring_buffer_mask;
+
//! Amount to left shift the ring buffer by to make it an input
static uint32_t *ring_buffer_to_input_left_shifts;
@@ -54,25 +57,31 @@ static uint32_t *ring_buffer_to_input_left_shifts;
//! ```
//! synapse_index_bits + synapse_type_bits
//! ```
-static uint32_t synapse_type_index_bits;
+uint32_t synapse_type_index_bits;
//! \brief Mask to pick out the synapse type and index.
//! \details
//! ```
//! synapse_index_mask | synapse_type_mask
//! ```
-static uint32_t synapse_type_index_mask;
+uint32_t synapse_type_index_mask;
//! Number of bits in the synapse index
-static uint32_t synapse_index_bits;
+uint32_t synapse_index_bits;
//! Mask to pick out the synapse index.
-static uint32_t synapse_index_mask;
+uint32_t synapse_index_mask;
//! Number of bits in the synapse type
-static uint32_t synapse_type_bits;
+uint32_t synapse_type_bits;
//! Mask to pick out the synapse type.
-static uint32_t synapse_type_mask;
+uint32_t synapse_type_mask;
+//! Number of bits in the delay
+uint32_t synapse_delay_bits;
+//! Mask to pick out the delay
+uint32_t synapse_delay_mask;
//! Count of the number of times the ring buffers have saturated
uint32_t synapses_saturation_count = 0;
+static uint32_t n_neurons_peak;
+
/* PRIVATE FUNCTIONS */
@@ -117,10 +126,11 @@ static inline void print_synaptic_row(synaptic_row_t synaptic_row) {
synapses_print_weight(synapse_row_sparse_weight(synapse),
ring_buffer_to_input_left_shifts[synapse_type]);
io_printf(IO_BUF, "nA) d: %2u, %s, n = %3u)] - {%08x %08x}\n",
- synapse_row_sparse_delay(synapse, synapse_type_index_bits),
+ synapse_row_sparse_delay(synapse, synapse_type_index_bits,
+ synapse_delay_mask),
get_type_char(synapse_type),
synapse_row_sparse_index(synapse, synapse_index_mask),
- SYNAPSE_DELAY_MASK, synapse_type_index_bits);
+ synapse_delay_mask, synapse_type_index_bits);
}
// If there's a plastic region
@@ -146,10 +156,10 @@ static inline void print_ring_buffers(uint32_t time) {
for (uint32_t n = 0; n < n_neurons; n++) {
for (uint32_t t = 0; t < n_synapse_types; t++) {
// Determine if this row can be omitted
- for (uint32_t d = 0; d < (1 << SYNAPSE_DELAY_BITS); d++) {
- if (ring_buffers[synapses_get_ring_buffer_index(
+ for (uint32_t d = 0; d < (1 << synapse_delay_bits); d++) {
+ if (ring_buffers[synapse_row_get_ring_buffer_index(
d + time, t, n, synapse_type_index_bits,
- synapse_index_bits)] != 0) {
+ synapse_index_bits, synapse_delay_mask)] != 0) {
goto doPrint;
}
}
@@ -157,11 +167,11 @@ static inline void print_ring_buffers(uint32_t time) {
doPrint:
// Have to print the row
io_printf(IO_BUF, "%3d(%s):", n, get_type_char(t));
- for (uint32_t d = 0; d < (1 << SYNAPSE_DELAY_BITS); d++) {
+ for (uint32_t d = 0; d < (1 << synapse_delay_bits); d++) {
io_printf(IO_BUF, " ");
- uint32_t ring_buffer_index = synapses_get_ring_buffer_index(
+ uint32_t ring_buffer_index = synapse_row_get_ring_buffer_index(
d + time, t, n, synapse_type_index_bits,
- synapse_index_bits);
+ synapse_index_bits, synapse_delay_mask);
synapses_print_weight(ring_buffers[ring_buffer_index],
ring_buffer_to_input_left_shifts[t]);
}
@@ -172,45 +182,34 @@ static inline void print_ring_buffers(uint32_t time) {
#endif // LOG_LEVEL >= LOG_DEBUG
}
-//! \brief Print the neuron inputs.
-//! \details Only does anything when debugging.
-static inline void print_inputs(void) {
-#if LOG_LEVEL >= LOG_DEBUG
- log_debug("Inputs");
- neuron_print_inputs();
-#endif // LOG_LEVEL >= LOG_DEBUG
-}
-
//! \brief The "inner loop" of the neural simulation.
//! \details Every spike event could cause up to 256 different weights to
//! be put into the ring buffer.
//! \param[in] fixed_region: The fixed region of the synaptic matrix
//! \param[in] time: The current simulation time
-static inline void process_fixed_synapses(
+static inline bool process_fixed_synapses(
synapse_row_fixed_part_t *fixed_region, uint32_t time) {
uint32_t *synaptic_words = synapse_row_fixed_weight_controls(fixed_region);
uint32_t fixed_synapse = synapse_row_num_fixed_synapses(fixed_region);
num_fixed_pre_synaptic_events += fixed_synapse;
+ // Pre-mask the time
+ uint32_t masked_time = (time & synapse_delay_mask) << synapse_type_index_bits;
+
for (; fixed_synapse > 0; fixed_synapse--) {
// Get the next 32 bit word from the synaptic_row
// (should auto increment pointer in single instruction)
uint32_t synaptic_word = *synaptic_words++;
- // Extract components from this word
- uint32_t delay =
- synapse_row_sparse_delay(synaptic_word, synapse_type_index_bits);
- uint32_t combined_synapse_neuron_index = synapse_row_sparse_type_index(
- synaptic_word, synapse_type_index_mask);
+ // The ring buffer index can be found by adding on the time to the delay
+ // in the synaptic word directly, and then masking off the whole index.
+ // The addition of the masked time to the delay even with the mask might
+ // overflow into the weight at worst but can't affect the lower bits.
+ uint32_t ring_buffer_index = (synaptic_word + masked_time) & ring_buffer_mask;
uint32_t weight = synapse_row_sparse_weight(synaptic_word);
- // Convert into ring buffer offset
- uint32_t ring_buffer_index = synapses_get_ring_buffer_index_combined(
- delay + time, combined_synapse_neuron_index,
- synapse_type_index_bits);
-
// Add weight to current ring buffer value
uint32_t accumulation = ring_buffers[ring_buffer_index] + weight;
@@ -227,6 +226,7 @@ static inline void process_fixed_synapses(
// Store saturated value back in ring-buffer
ring_buffers[ring_buffer_index] = accumulation;
}
+ return true;
}
//! Print output debug data on the synapses
@@ -239,15 +239,38 @@ static inline void print_synapse_parameters(void) {
#endif // LOG_LEVEL >= LOG_DEBUG
}
+//! The layout of the synapse parameters region
+struct synapse_params {
+ uint32_t n_neurons;
+ uint32_t n_synapse_types;
+ uint32_t log_n_neurons;
+ uint32_t log_n_synapse_types;
+ uint32_t log_max_delay;
+ uint32_t drop_late_packets;
+ uint32_t incoming_spike_buffer_size;
+ uint32_t ring_buffer_shifts[];
+};
+
/* INTERFACE FUNCTIONS */
bool synapses_initialise(
- address_t synapse_params_address, uint32_t n_neurons_value,
- uint32_t n_synapse_types_value,
+ address_t synapse_params_address,
+ uint32_t *n_neurons_out, uint32_t *n_synapse_types_out,
+ weight_t **ring_buffers_out,
uint32_t **ring_buffer_to_input_buffer_left_shifts,
- bool* clear_input_buffers_of_late_packets_init) {
+ bool* clear_input_buffers_of_late_packets_init,
+ uint32_t *incoming_spike_buffer_size) {
log_debug("synapses_initialise: starting");
- n_neurons = n_neurons_value;
- n_synapse_types = n_synapse_types_value;
+ struct synapse_params *params = (struct synapse_params *) synapse_params_address;
+ *clear_input_buffers_of_late_packets_init = params->drop_late_packets;
+ *incoming_spike_buffer_size = params->incoming_spike_buffer_size;
+ n_neurons = params->n_neurons;
+ *n_neurons_out = n_neurons;
+ n_synapse_types = params->n_synapse_types;
+ *n_synapse_types_out = n_synapse_types;
+
+ uint32_t log_n_neurons = params->log_n_neurons;
+ uint32_t log_n_synapse_types = params->log_n_synapse_types;
+ uint32_t log_max_delay = params->log_max_delay;
// Set up ring buffer left shifts
ring_buffer_to_input_left_shifts =
@@ -257,15 +280,9 @@ bool synapses_initialise(
return false;
}
- // read bool flag about dropping packets that arrive too late
- *clear_input_buffers_of_late_packets_init = synapse_params_address[0];
-
- // shift read by 1 word.
- synapse_params_address += 1;
-
// read in ring buffer to input left shifts
spin1_memcpy(
- ring_buffer_to_input_left_shifts, synapse_params_address,
+ ring_buffer_to_input_left_shifts, params->ring_buffer_shifts,
n_synapse_types * sizeof(uint32_t));
*ring_buffer_to_input_buffer_left_shifts =
ring_buffer_to_input_left_shifts;
@@ -273,24 +290,22 @@ bool synapses_initialise(
log_debug("synapses_initialise: completed successfully");
print_synapse_parameters();
- uint32_t n_neurons_power_2 = n_neurons;
- uint32_t log_n_neurons = 1;
- if (n_neurons != 1) {
- if (!is_power_of_2(n_neurons)) {
- n_neurons_power_2 = next_power_of_2(n_neurons);
- }
- log_n_neurons = ilog_2(n_neurons_power_2);
- }
- uint32_t n_synapse_types_power_2 = n_synapse_types;
- if (!is_power_of_2(n_synapse_types)) {
- n_synapse_types_power_2 = next_power_of_2(n_synapse_types);
- }
- uint32_t log_n_synapse_types = ilog_2(n_synapse_types_power_2);
+ synapse_type_index_bits = log_n_neurons + log_n_synapse_types;
+ synapse_type_index_mask = (1 << synapse_type_index_bits) - 1;
+ synapse_index_bits = log_n_neurons;
+ synapse_index_mask = (1 << synapse_index_bits) - 1;
+ synapse_type_bits = log_n_synapse_types;
+ synapse_type_mask = (1 << log_n_synapse_types) - 1;
+ synapse_delay_bits = log_max_delay;
+ synapse_delay_mask = (1 << synapse_delay_bits) - 1;
+
+ n_neurons_peak = 1 << log_n_neurons;
uint32_t n_ring_buffer_bits =
- log_n_neurons + log_n_synapse_types + SYNAPSE_DELAY_BITS;
+ log_n_neurons + log_n_synapse_types + synapse_delay_bits;
ring_buffer_size = 1 << (n_ring_buffer_bits);
+ ring_buffer_mask = ring_buffer_size - 1;
ring_buffers = spin1_malloc(ring_buffer_size * sizeof(weight_t));
if (ring_buffers == NULL) {
@@ -301,45 +316,27 @@ bool synapses_initialise(
for (uint32_t i = 0; i < ring_buffer_size; i++) {
ring_buffers[i] = 0;
}
+ *ring_buffers_out = ring_buffers;
+
+ log_info("Ready to process synapses for %u neurons with %u synapse types",
+ n_neurons, n_synapse_types);
- synapse_type_index_bits = log_n_neurons + log_n_synapse_types;
- synapse_type_index_mask = (1 << synapse_type_index_bits) - 1;
- synapse_index_bits = log_n_neurons;
- synapse_index_mask = (1 << synapse_index_bits) - 1;
- synapse_type_bits = log_n_synapse_types;
- synapse_type_mask = (1 << log_n_synapse_types) - 1;
return true;
}
-void synapses_do_timestep_update(timer_t time) {
- print_ring_buffers(time);
-
- // Transfer the input from the ring buffers into the input buffers
- for (uint32_t neuron_index = 0; neuron_index < n_neurons;
- neuron_index++) {
- // Loop through all synapse types
- for (uint32_t synapse_type_index = 0;
- synapse_type_index < n_synapse_types; synapse_type_index++) {
- // Get index in the ring buffers for the current time slot for
- // this synapse type and neuron
- uint32_t ring_buffer_index = synapses_get_ring_buffer_index(
- time, synapse_type_index, neuron_index,
- synapse_type_index_bits, synapse_index_bits);
-
- // Convert ring-buffer entry to input and add on to correct
- // input for this synapse type and neuron
- neuron_add_inputs(
- synapse_type_index, neuron_index,
- synapses_convert_weight_to_input(
- ring_buffers[ring_buffer_index],
- ring_buffer_to_input_left_shifts[synapse_type_index]));
-
- // Clear ring buffer
+void synapses_flush_ring_buffers(timer_t time) {
+ uint32_t synapse_index = 0;
+ uint32_t ring_buffer_index = synapse_row_get_first_ring_buffer_index(
+ time, synapse_type_index_bits, synapse_delay_mask);;
+ for (uint32_t s_i = n_synapse_types; s_i > 0; s_i--) {
+ uint32_t neuron_index = 0;
+ for (uint32_t n_i = n_neurons_peak; n_i > 0; n_i--) {
ring_buffers[ring_buffer_index] = 0;
+ ring_buffer_index++;
+ neuron_index++;
}
+ synapse_index++;
}
-
- print_inputs();
}
bool synapses_process_synaptic_row(
@@ -374,9 +371,8 @@ bool synapses_process_synaptic_row(
// **NOTE** this is done after initiating DMA in an attempt
// to hide cost of DMA behind this loop to improve the chance
// that the DMA controller is ready to read next synaptic row afterwards
- process_fixed_synapses(fixed_region, time);
+ return process_fixed_synapses(fixed_region, time);
//}
- return true;
}
uint32_t synapses_get_pre_synaptic_events(void) {
@@ -384,18 +380,12 @@ uint32_t synapses_get_pre_synaptic_events(void) {
synapse_dynamics_get_plastic_pre_synaptic_events());
}
-void synapses_flush_ring_buffers(void) {
- for (uint32_t i = 0; i < ring_buffer_size; i++) {
- ring_buffers[i] = 0;
+void synapses_resume(uint32_t time) {
+ // If the time has been reset to zero then the ring buffers need to be
+ // flushed in case there is a delayed spike left over from a previous run
+ if (time == 0) {
+ for (uint32_t i = 0; i < ring_buffer_size; i++) {
+ ring_buffers[i] = 0;
+ }
}
}
-
-//! \brief Clear DTCM used by synapses
-//! \return true if successful
-bool synapses_shut_down(void) {
- sark_free(ring_buffer_to_input_left_shifts);
- sark_free(ring_buffers);
- num_fixed_pre_synaptic_events = 0;
- synapses_saturation_count = 0;
- return true;
-}
diff --git a/neural_modelling/src/neuron/synapses.h b/neural_modelling/src/neuron/synapses.h
index a5cfb625f7..06b399068b 100644
--- a/neural_modelling/src/neuron/synapses.h
+++ b/neural_modelling/src/neuron/synapses.h
@@ -21,60 +21,38 @@
#define _SYNAPSES_H_
#include
+#include
#include "synapse_row.h"
-#include "neuron.h"
+
+//! \brief Number of bits needed for the synapse type and index
+//! \details
+//! ```
+//! synapse_index_bits + synapse_type_bits
+//! ```
+extern uint32_t synapse_type_index_bits;
+//! \brief Mask to pick out the synapse type and index.
+//! \details
+//! ```
+//! synapse_index_mask | synapse_type_mask
+//! ```
+extern uint32_t synapse_type_index_mask;
+//! Number of bits in the synapse index
+extern uint32_t synapse_index_bits;
+//! Mask to pick out the synapse index.
+extern uint32_t synapse_index_mask;
+//! Number of bits in the synapse type
+extern uint32_t synapse_type_bits;
+//! Mask to pick out the synapse type.
+extern uint32_t synapse_type_mask;
+//! Number of bits in the delay
+extern uint32_t synapse_delay_bits;
+//! Mask to pick out the delay
+extern uint32_t synapse_delay_mask;
//! Count of the number of times the synapses have saturated their weights.
extern uint32_t synapses_saturation_count;
-//! \brief Get the index of the ring buffer for a given timestep, synapse type
-//! and neuron index
-//! \param[in] simulation_timestep:
-//! \param[in] synapse_type_index:
-//! \param[in] neuron_index:
-//! \param[in] synapse_type_index_bits:
-//! \param[in] synapse_index_bits:
-//! \return Index into the ring buffer
-static inline index_t synapses_get_ring_buffer_index(
- uint32_t simulation_timestep, uint32_t synapse_type_index,
- uint32_t neuron_index, uint32_t synapse_type_index_bits,
- uint32_t synapse_index_bits) {
- return ((simulation_timestep & SYNAPSE_DELAY_MASK) << synapse_type_index_bits)
- | (synapse_type_index << synapse_index_bits)
- | neuron_index;
-}
-
-//! \brief Get the index of the ring buffer for a given timestep and combined
-//! synapse type and neuron index (as stored in a synapse row)
-//! \param[in] simulation_timestep:
-//! \param[in] combined_synapse_neuron_index:
-//! \param[in] synapse_type_index_bits:
-//! \return Index into the ring buffer
-static inline index_t synapses_get_ring_buffer_index_combined(
- uint32_t simulation_timestep,
- uint32_t combined_synapse_neuron_index,
- uint32_t synapse_type_index_bits) {
- return ((simulation_timestep & SYNAPSE_DELAY_MASK) << synapse_type_index_bits)
- | combined_synapse_neuron_index;
-}
-
-//! \brief Converts a weight stored in a synapse row to an input
-//! \param[in] weight: the weight to convert in synapse-row form
-//! \param[in] left_shift: the shift to use when decoding
-//! \return the actual input weight for the model
-static inline input_t synapses_convert_weight_to_input(
- weight_t weight, uint32_t left_shift) {
- union {
- int_k_t input_type;
- s1615 output_type;
- } converter;
-
- converter.input_type = (int_k_t) (weight) << left_shift;
-
- return converter.output_type;
-}
-
//! \brief Print the weight of a synapse
//! \param[in] weight: the weight to print in synapse-row form
//! \param[in] left_shift: the shift to use when decoding
@@ -82,7 +60,7 @@ static inline void synapses_print_weight(
weight_t weight, uint32_t left_shift) {
if (weight != 0) {
io_printf(IO_BUF, "%12.6k",
- synapses_convert_weight_to_input(weight, left_shift));
+ synapse_row_convert_weight_to_input(weight, left_shift));
} else {
io_printf(IO_BUF, " ");
}
@@ -90,21 +68,24 @@ static inline void synapses_print_weight(
//! \brief Initialise the synapse processing
//! \param[in] synapse_params_address: Synapse configuration in SDRAM
-//! \param[in] n_neurons: Number of neurons to simulate
-//! \param[in] n_synapse_types: Number of synapse types
+//! \param[out] n_neurons: Number of neurons that will be simulated
+//! \param[out] n_synapse_types: Number of synapse types that will be simulated
+//! \param[out] ring_buffers: The ring buffers that will be used
//! \param[out] ring_buffer_to_input_buffer_left_shifts:
//! Array of shifts to use when converting from ring buffer values to input
//! buffer values
+//! \param[out] clear_input_buffers_of_late_packets:
+//! Inicates whether to clear the input buffers each time step
+//! \param[out] incoming_spike_buffer_size:
+//! The number of spikes to support in the incoming spike circular buffer
//! \return True if successfully initialised. False otherwise.
bool synapses_initialise(
address_t synapse_params_address,
- uint32_t n_neurons, uint32_t n_synapse_types,
+ uint32_t *n_neurons, uint32_t *n_synapse_types,
+ weight_t **ring_buffers,
uint32_t **ring_buffer_to_input_buffer_left_shifts,
- bool* clear_input_buffers_of_late_packets_init);
-
-//! \brief Do all the synapse processing for a timestep.
-//! \param[in] time: the current simulation time
-void synapses_do_timestep_update(timer_t time);
+ bool* clear_input_buffers_of_late_packets_init,
+ uint32_t *incoming_spike_buffer_size);
//! \brief process a synaptic row
//! \param[in] time: the simulated time
@@ -120,11 +101,12 @@ bool synapses_process_synaptic_row(
//! \return the counter for plastic and fixed pre synaptic events or 0
uint32_t synapses_get_pre_synaptic_events(void);
-//! \brief flush the ring buffers
-void synapses_flush_ring_buffers(void);
+//! \brief Resume processing of synapses after a pause
+//! \param[in] time: The time at which the simulation is to start
+void synapses_resume(timer_t time);
-//! \brief allows clearing of DTCM used by synapses
-//! \return true if successful, false otherwise
-bool synapses_shut_down(void);
+//! \brief Reset the ring buffers to 0 at the given time
+//! \param[in] time: the simulated time to reset the buffers at
+void synapses_flush_ring_buffers(timer_t time);
#endif // _SYNAPSES_H_
diff --git a/neural_modelling/src/spike_source/poisson/spike_source_poisson.c b/neural_modelling/src/spike_source/poisson/spike_source_poisson.c
index b2b0cc2568..393acff3a9 100644
--- a/neural_modelling/src/spike_source/poisson/spike_source_poisson.c
+++ b/neural_modelling/src/spike_source/poisson/spike_source_poisson.c
@@ -27,7 +27,7 @@
#include
#include
#include
-#include
+#include
#include
#include
#include
@@ -45,9 +45,6 @@
// ----------------------------------------------------------------------
-//! Spin1 API ticks, to know when the timer wraps
-extern uint ticks;
-
//! data structure for Poisson sources
typedef struct spike_source_t {
//! When the current control regime starts, in timer ticks
@@ -88,6 +85,7 @@ typedef enum region {
PROVENANCE_REGION, //!< provenance region
PROFILER_REGION, //!< profiling region
TDMA_REGION, //!< tdma processing region
+ SDRAM_PARAMS_REGION //!< SDRAM transfer parameters region
} region;
//! The number of recording regions
@@ -109,6 +107,14 @@ typedef enum ssp_callback_priorities {
TIMER = 2
} callback_priorities;
+//! An RNG seed of 4 words
+typedef struct {
+ uint32_t x;
+ uint32_t y;
+ uint32_t z;
+ uint32_t c;
+} rng_seed_t;
+
//! Parameters of the SpikeSourcePoisson
typedef struct global_parameters {
//! True if there is a key to transmit, False otherwise
@@ -129,8 +135,10 @@ typedef struct global_parameters {
uint32_t first_source_id;
//! The number of sources in this sub-population
uint32_t n_spike_sources;
+ //! Maximum expected spikes per tick (for recording)
+ uint32_t max_spikes_per_tick;
//! The seed for the Poisson generation process
- mars_kiss64_seed_t spike_source_seed;
+ rng_seed_t spike_source_seed;
} global_parameters;
//! Structure of the provenance data
@@ -153,6 +161,19 @@ typedef struct source_info {
spike_source_t poissons[];
} source_info;
+//! A region of SDRAM used to transfer synapses
+struct sdram_config {
+ //! The address of the input data to be transferred
+ uint32_t *address;
+ //! The size of the input data to be transferred
+ uint32_t size_in_bytes;
+ //! The offset into the data to write the weights (to account for different
+ //! synapse types)
+ uint32_t offset;
+ //! The weight to send for each active Poisson source
+ uint16_t weights[];
+};
+
//! Array of pointers to sequences of rate data
static source_info **source_data;
@@ -187,6 +208,67 @@ static uint32_t spike_buffer_size;
//! The timer period
static uint32_t timer_period;
+//! Where synaptic input is to be written
+static struct sdram_config *sdram_inputs;
+
+//! The inputs to be sent at the end of this timestep
+static uint16_t *input_this_timestep;
+
+//! \brief Random number generation for the Poisson sources.
+//! This is a local version for speed of operation.
+//! \return A random number
+static inline uint32_t rng(void) {
+ ssp_params.spike_source_seed.x = 314527869 * ssp_params.spike_source_seed.x + 1234567;
+ ssp_params.spike_source_seed.y ^= ssp_params.spike_source_seed.y << 5;
+ ssp_params.spike_source_seed.y ^= ssp_params.spike_source_seed.y >> 7;
+ ssp_params.spike_source_seed.y ^= ssp_params.spike_source_seed.y << 22;
+ uint64_t t = 4294584393ULL * ssp_params.spike_source_seed.z + ssp_params.spike_source_seed.c;
+ ssp_params.spike_source_seed.c = t >> 32;
+ ssp_params.spike_source_seed.z = t;
+
+ return (uint32_t) ssp_params.spike_source_seed.x
+ + ssp_params.spike_source_seed.y + ssp_params.spike_source_seed.z;
+}
+
+//! \brief How many spikes to generate for a fast Poisson source
+//! \param[in] exp_minus_lambda e^(-mean_rate)
+//! \return How many spikes to generate
+static inline uint32_t n_spikes_poisson_fast(UFRACT exp_minus_lambda) {
+ UFRACT p = UFRACT_CONST(1.0);
+ uint32_t k = 0;
+
+ do {
+ k++;
+ // p = p * ulrbits(uni_rng(seed_arg));
+ // Possibly faster multiplication by using DRL's routines
+ p = ulrbits(__stdfix_smul_ulr(bitsulr(p), rng()));
+ } while (bitsulr(p) > bitsulr(exp_minus_lambda));
+ return k - 1;
+}
+
+//! \brief How many time steps until the next spike for a slow Poisson source
+//! \return The number of time steps until the next spike
+static inline REAL n_steps_until_next(void) {
+ REAL A = REAL_CONST(0.0);
+ uint32_t U, U0, USTAR;
+
+ while (true) {
+ U = rng();
+ U0 = U;
+
+ do {
+ USTAR = rng();
+ if (U < USTAR) {
+ return A + (REAL) ulrbits(U0);
+ }
+
+ U = rng();
+ } while (U < USTAR);
+
+ A += 1.0k;
+ }
+}
+
// ----------------------------------------------------------------------
//! \brief Writes the provenance data
@@ -230,11 +312,8 @@ static inline void reset_spikes(void) {
static inline uint32_t slow_spike_source_get_time_to_spike(
uint32_t mean_inter_spike_interval_in_ticks) {
// Round (dist variate * ISI_SCALE_FACTOR), convert to uint32
- int nbits = 15;
uint32_t value = (uint32_t) roundk(
- exponential_dist_variate(
- mars_kiss64_seed, ssp_params.spike_source_seed)
- * ISI_SCALE_FACTOR, nbits);
+ n_steps_until_next() * ISI_SCALE_FACTOR, (15));
// Now multiply by the mean ISI
uint32_t exp_variate = value * mean_inter_spike_interval_in_ticks;
// Note that this will be compared to ISI_SCALE_FACTOR in the main loop!
@@ -253,8 +332,7 @@ static inline uint32_t fast_spike_source_get_num_spikes(
if (bitsulr(exp_minus_lambda) == bitsulr(UFRACT_CONST(0.0))) {
return 0;
}
- return poisson_dist_variate_exp_minus_lambda(
- mars_kiss64_seed, ssp_params.spike_source_seed, exp_minus_lambda);
+ return n_spikes_poisson_fast(exp_minus_lambda);
}
//! \brief Determine how many spikes to transmit this timer tick, for a faster
@@ -266,11 +344,10 @@ static inline uint32_t fast_spike_source_get_num_spikes(
static inline uint32_t faster_spike_source_get_num_spikes(
REAL sqrt_lambda) {
// First we do x = (inv_gauss_cdf(U(0, 1)) * 0.5) + sqrt(lambda)
- REAL x = (gaussian_dist_variate(mars_kiss64_seed, ssp_params.spike_source_seed)
- * HALF) + sqrt_lambda;
+ uint32_t U = rng();
+ REAL x = (norminv_urt(U) * HALF) + sqrt_lambda;
// Then we return int(roundk(x * x))
- int nbits = 15;
- return (uint32_t) roundk(x * x, nbits);
+ return (uint32_t) roundk(x * x, 15);
}
#if LOG_LEVEL >= LOG_DEBUG
@@ -307,10 +384,10 @@ static bool read_global_parameters(global_parameters *sdram_globals) {
log_info("\tkey = %08x, set rate mask = %08x",
ssp_params.key, ssp_params.set_rate_neuron_id_mask);
- log_info("\tseed = %u %u %u %u", ssp_params.spike_source_seed[0],
- ssp_params.spike_source_seed[1],
- ssp_params.spike_source_seed[2],
- ssp_params.spike_source_seed[3]);
+ log_info("\tseed = %u %u %u %u", ssp_params.spike_source_seed.c,
+ ssp_params.spike_source_seed.x,
+ ssp_params.spike_source_seed.y,
+ ssp_params.spike_source_seed.z);
log_info("\tspike sources = %u, starting at %u",
ssp_params.n_spike_sources, ssp_params.first_source_id);
@@ -398,6 +475,33 @@ static bool initialise_recording(data_specification_metadata_t *ds_regions) {
return success;
}
+//! \brief Expand the space for recording spikes.
+//! \param[in] n_spikes: New number of spikes to hold
+static inline void expand_spike_recording_buffer(uint32_t n_spikes) {
+ uint32_t new_size = 8 + (n_spikes * spike_buffer_size);
+ timed_out_spikes *new_spikes = spin1_malloc(new_size);
+ if (new_spikes == NULL) {
+ log_error("Cannot reallocate spike buffer");
+ rt_error(RTE_SWERR);
+ }
+
+ // bzero the new buffer
+ uint32_t *data = (uint32_t *) new_spikes;
+ for (uint32_t n = new_size >> 2; n > 0; n--) {
+ data[n - 1] = 0;
+ }
+
+ // Copy over old buffer if we have it
+ if (spikes != NULL) {
+ spin1_memcpy(new_spikes, spikes,
+ 8 + n_spike_buffers_allocated * spike_buffer_size);
+ sark_free(spikes);
+ }
+
+ spikes = new_spikes;
+ n_spike_buffers_allocated = n_spikes;
+}
+
//! \brief Initialise the model by reading in the regions and checking
//! recording data.
//! \return Whether it successfully read all the regions and set up
@@ -457,11 +561,38 @@ static bool initialize(void) {
n_spike_buffers_allocated = 0;
n_spike_buffer_words = get_bit_field_size(ssp_params.n_spike_sources);
spike_buffer_size = n_spike_buffer_words * sizeof(uint32_t);
+ expand_spike_recording_buffer(ssp_params.max_spikes_per_tick);
// Setup profiler
profiler_init(
data_specification_get_region(PROFILER_REGION, ds_regions));
+ // Setup SDRAM transfer
+ struct sdram_config *sdram_conf = data_specification_get_region(
+ SDRAM_PARAMS_REGION, ds_regions);
+ uint32_t sdram_inputs_size = sizeof(struct sdram_config) + (
+ ssp_params.n_spike_sources * sizeof(uint16_t));
+ sdram_inputs = spin1_malloc(sdram_inputs_size);
+ if (sdram_inputs == NULL) {
+ log_error("Could not allocate %d bytes for SDRAM inputs",
+ sdram_inputs_size);
+ return false;
+ }
+ spin1_memcpy(sdram_inputs, sdram_conf, sdram_inputs_size);
+ log_info("Writing output to address 0x%08x, size in total %d,"
+ "offset in half-words %d, size to write %d", sdram_inputs->address,
+ sdram_inputs->size_in_bytes, sdram_inputs->offset,
+ ssp_params.n_spike_sources * sizeof(uint16_t));
+ if (sdram_inputs->size_in_bytes != 0) {
+ input_this_timestep = spin1_malloc(sdram_inputs->size_in_bytes);
+ if (input_this_timestep == NULL) {
+ log_error("Could not allocate %d bytes for input this timestep",
+ sdram_inputs->size_in_bytes);
+ return false;
+ }
+ sark_word_set(input_this_timestep, 0, sdram_inputs->size_in_bytes);
+ }
+
log_info("Initialise: completed successfully");
return true;
@@ -527,33 +658,6 @@ static bool store_poisson_parameters(void) {
return true;
}
-//! \brief Expand the space for recording spikes.
-//! \param[in] n_spikes: New number of spikes to hold
-static inline void expand_spike_recording_buffer(uint32_t n_spikes) {
- uint32_t new_size = 8 + (n_spikes * spike_buffer_size);
- timed_out_spikes *new_spikes = spin1_malloc(new_size);
- if (new_spikes == NULL) {
- log_error("Cannot reallocate spike buffer");
- rt_error(RTE_SWERR);
- }
-
- // bzero the new buffer
- uint32_t *data = (uint32_t *) new_spikes;
- for (uint32_t n = new_size >> 2; n > 0; n--) {
- data[n - 1] = 0;
- }
-
- // Copy over old buffer if we have it
- if (spikes != NULL) {
- spin1_memcpy(new_spikes, spikes,
- 8 + n_spike_buffers_allocated * spike_buffer_size);
- sark_free(spikes);
- }
-
- spikes = new_spikes;
- n_spike_buffers_allocated = n_spikes;
-}
-
//! \brief records spikes as needed
//! \param[in] neuron_id: the neurons to store spikes from
//! \param[in] n_spikes: the number of times this neuron has spiked
@@ -623,6 +727,9 @@ static void process_fast_source(
const uint32_t spike_key = ssp_params.key | s_id;
tdma_processing_send_packet(
spike_key, num_spikes, WITH_PAYLOAD, timer_count);
+ } else if (sdram_inputs->address != 0) {
+ input_this_timestep[sdram_inputs->offset + s_id] +=
+ sdram_inputs->weights[s_id] * num_spikes;
}
}
}
@@ -659,6 +766,9 @@ static void process_slow_source(
// Send package
tdma_processing_send_packet(
ssp_params.key | s_id, count, WITH_PAYLOAD, timer_count);
+ } else if (sdram_inputs->address != 0) {
+ input_this_timestep[sdram_inputs->offset + s_id] +=
+ sdram_inputs->weights[s_id] * count;
}
}
@@ -708,6 +818,11 @@ static void timer_callback(uint timer_count, UNUSED uint unused) {
return;
}
+ // Reset the inputs this timestep if using them
+ if (sdram_inputs->address != 0) {
+ sark_word_set(input_this_timestep, 0, sdram_inputs->size_in_bytes);
+ }
+
// Loop through spike sources
tdma_processing_reset_phase();
for (index_t s_id = 0; s_id < ssp_params.n_spike_sources; s_id++) {
@@ -730,6 +845,12 @@ static void timer_callback(uint timer_count, UNUSED uint unused) {
profiler_write_entry_disable_irq_fiq(PROFILER_EXIT | PROFILER_TIMER);
+ // If transferring over SDRAM, transfer now
+ if (sdram_inputs->address != 0) {
+ spin1_dma_transfer(0, sdram_inputs->address, input_this_timestep,
+ DMA_WRITE, sdram_inputs->size_in_bytes);
+ }
+
// Record output spikes if required
if (recording_flags > 0) {
record_spikes(time);
diff --git a/neural_modelling/src/synapse_expander/matrix_generator.c b/neural_modelling/src/synapse_expander/matrix_generator.c
index 2d0edc6068..e028b83377 100644
--- a/neural_modelling/src/synapse_expander/matrix_generator.c
+++ b/neural_modelling/src/synapse_expander/matrix_generator.c
@@ -217,6 +217,13 @@ bool matrix_generator_generate(
max_n_synapses, indices);
log_debug("Generated %u synapses", n_indices);
+ for (uint32_t j = 0; j < n_indices; j++) {
+ if (indices[j] >= post_slice_count) {
+ log_error("Index %u out of bounds for %u neurons", indices[j], post_slice_count);
+ rt_error(RTE_SWERR);
+ }
+ }
+
accum delay_params[n_indices], weight_params[n_indices];
uint16_t delays[n_indices], weights[n_indices];
diff --git a/neural_modelling/src/synapse_expander/matrix_generators/matrix_generator_static.h b/neural_modelling/src/synapse_expander/matrix_generators/matrix_generator_static.h
index 28b18c07fa..0251c20727 100644
--- a/neural_modelling/src/synapse_expander/matrix_generators/matrix_generator_static.h
+++ b/neural_modelling/src/synapse_expander/matrix_generators/matrix_generator_static.h
@@ -25,6 +25,7 @@
#include
#include "matrix_generator_common.h"
#include
+#include
/**
* \brief How to initialise the static synaptic matrix generator
@@ -53,11 +54,6 @@ static void matrix_generator_static_free(UNUSED void *generator) {
*/
#define SYNAPSE_WEIGHT_MASK 0xFFFF
-/**
- * \brief The mask of a delay before shifting
- */
-#define SYNAPSE_DELAY_MASK 0xFF
-
//! The layout of a purely static row of a synaptic matrix.
typedef struct {
uint32_t plastic_plastic_size; //!< the plastic-plastic size within a row
@@ -74,18 +70,20 @@ typedef struct {
* \param[in] post_index: The core-relative index of the target neuron
* \param[in] synapse_type_bits: The number of bits for the synapse type
* \param[in] synapse_index_bits: The number of bits for the target neuron id
+ * \param[in] delay_bits: The number of bits for the synaptic delay
* \return a synaptic word
*/
static uint32_t build_static_word(
uint16_t weight, uint16_t delay, uint32_t type,
uint16_t post_index, uint32_t synapse_type_bits,
- uint32_t synapse_index_bits) {
+ uint32_t synapse_index_bits, uint32_t delay_bits) {
uint32_t synapse_index_mask = (1 << synapse_index_bits) - 1;
uint32_t synapse_type_mask = (1 << synapse_type_bits) - 1;
+ uint32_t synapse_delay_mask = (1 << delay_bits) - 1;
uint32_t wrd = post_index & synapse_index_mask;
wrd |= (type & synapse_type_mask) << synapse_index_bits;
- wrd |= (delay & SYNAPSE_DELAY_MASK) <<
+ wrd |= (delay & synapse_delay_mask) <<
(synapse_index_bits + synapse_type_bits);
wrd |= (weight & SYNAPSE_WEIGHT_MASK) << SYNAPSE_WEIGHT_SHIFT;
return wrd;
@@ -176,6 +174,15 @@ static void matrix_generator_static_write_row(
log_debug("write[%u] = 0x%08x", i, write_address[i]);
}
+ uint32_t max_delay_power_2 = max_delay_per_stage;
+ uint32_t log_max_delay = 1;
+ if (max_delay_power_2 != 1) {
+ if (!is_power_of_2(max_delay_power_2)) {
+ max_delay_power_2 = next_power_of_2(max_delay_power_2);
+ }
+ log_max_delay = ilog_2(max_delay_power_2);
+ }
+
// Go through the synapses
for (uint32_t synapse = 0; synapse < n_synapses; synapse++) {
@@ -205,7 +212,7 @@ static void matrix_generator_static_write_row(
// Build synaptic word
uint32_t word = build_static_word(
weight, delay.delay, synapse_type, post_index, synapse_type_bits,
- synapse_index_bits);
+ synapse_index_bits, log_max_delay);
// Write the word
log_debug("Writing word to 0x%08x", &write_address[delay.stage][0]);
diff --git a/neural_modelling/src/synapse_expander/matrix_generators/matrix_generator_stdp.h b/neural_modelling/src/synapse_expander/matrix_generators/matrix_generator_stdp.h
index 21037f65dd..85913942c2 100644
--- a/neural_modelling/src/synapse_expander/matrix_generators/matrix_generator_stdp.h
+++ b/neural_modelling/src/synapse_expander/matrix_generators/matrix_generator_stdp.h
@@ -27,11 +27,7 @@
#include
#include "matrix_generator_common.h"
#include
-
-/**
- * \brief The mask for a delay before shifting
- */
-#define SYNAPSE_DELAY_MASK 0xFF
+#include
//! The layout of the initial plastic synapse part of the row
typedef struct {
@@ -89,18 +85,20 @@ void matrix_generator_stdp_free(void *generator) {
* \param[in] post_index: The core-relative index of the target neuron
* \param[in] synapse_type_bits: The number of bits for the synapse type
* \param[in] synapse_index_bits: The number of bits for the target neuron id
+ * \param[in] delay_bits: The number of bits for the synaptic delay
* \return A half-word fixed-plastic synapse
*/
static uint16_t build_fixed_plastic_half_word(
uint16_t delay, uint32_t type,
uint32_t post_index, uint32_t synapse_type_bits,
- uint32_t synapse_index_bits) {
+ uint32_t synapse_index_bits, uint32_t delay_bits) {
uint16_t synapse_index_mask = (1 << synapse_index_bits) - 1;
uint16_t synapse_type_mask = (1 << synapse_type_bits) - 1;
+ uint16_t delay_mask = (1 << delay_bits) - 1;
uint16_t wrd = post_index & synapse_index_mask;
wrd |= (type & synapse_type_mask) << synapse_index_bits;
- wrd |= (delay & SYNAPSE_DELAY_MASK) <<
+ wrd |= (delay & delay_mask) <<
(synapse_index_bits + synapse_type_bits);
// wrd |= (delay & SYNAPSE_DELAY_MASK) << synapse_type_bits;
@@ -264,6 +262,15 @@ void matrix_generator_stdp_write_row(
}
}
+ uint32_t max_delay_power_2 = max_delay_per_stage;
+ uint32_t log_max_delay = 1;
+ if (max_delay_power_2 != 1) {
+ if (!is_power_of_2(max_delay_power_2)) {
+ max_delay_power_2 = next_power_of_2(max_delay_power_2);
+ }
+ log_max_delay = ilog_2(max_delay_power_2);
+ }
+
// Write the fixed-plastic part of the row
for (uint32_t synapse = 0; synapse < n_synapses; synapse++) {
// Post-neuron index
@@ -275,7 +282,7 @@ void matrix_generator_stdp_write_row(
// Build synaptic word
uint16_t fp_half_word = build_fixed_plastic_half_word(
delay.delay, synapse_type, post_index, synapse_type_bits,
- synapse_index_bits);
+ synapse_index_bits, log_max_delay);
// Write the half-word
*fp_address[delay.stage]++ = fp_half_word;
diff --git a/neural_modelling/src/synapse_expander/synapse_expander.c b/neural_modelling/src/synapse_expander/synapse_expander.c
index 318d637ad3..286efa2bfa 100644
--- a/neural_modelling/src/synapse_expander/synapse_expander.c
+++ b/neural_modelling/src/synapse_expander/synapse_expander.c
@@ -21,7 +21,6 @@
* \file
* \brief The synapse expander for neuron cores
*/
-#include
#include "matrix_generator.h"
#include "connection_generator.h"
#include "param_generator.h"
@@ -55,6 +54,7 @@ struct connection_builder_config {
//! The configuration of the synapse expander
struct expander_config {
+ uint32_t synaptic_matrix_region;
uint32_t n_in_edges;
uint32_t post_slice_start;
uint32_t post_slice_count;
@@ -153,13 +153,13 @@ static bool read_connection_builder_region(address_t *in_region,
/**
* \brief Read the data for the expander
+ * \param[in] ds_regions: The data specification regions
* \param[in] params_address: The address of the expander parameters
- * \param[in] synaptic_matrix_region: The address of the synaptic matrices
* \return True if the expander finished correctly, False if there was an
* error
*/
-static bool run_synapse_expander(
- address_t params_address, address_t synaptic_matrix_region) {
+static bool run_synapse_expander(data_specification_metadata_t *ds_regions,
+ address_t params_address) {
// Read in the global parameters
struct expander_config config;
fast_memcpy(&config, params_address, sizeof(config));
@@ -173,6 +173,10 @@ static bool run_synapse_expander(
sizeof(unsigned long accum) * config.n_synapse_types);
params_address += 2 * config.n_synapse_types;
+ // Get the synaptic matrix region
+ address_t synaptic_matrix_region = data_specification_get_region(
+ config.synaptic_matrix_region, ds_regions);
+
// Go through each connector and generate
for (uint32_t edge = 0; edge < config.n_in_edges; edge++) {
if (!read_connection_builder_region(
@@ -191,20 +195,22 @@ static bool run_synapse_expander(
void c_main(void) {
sark_cpu_state(CPU_STATE_RUN);
- // Get the addresses of the regions
log_info("Starting To Build Connectors");
+
+ // Get pointer to 1st virtual processor info struct in SRAM and get USER1;
+ // This is the ID of the connection builder region from which to read the
+ // rest of the data
+ vcpu_t *virtual_processor_table = (vcpu_t*) SV_VCPU;
+ uint user1 = virtual_processor_table[spin1_get_core_id()].user1;
+
+ // Get the addresses of the regions
data_specification_metadata_t *ds_regions =
data_specification_get_data_address();
- address_t params_address = data_specification_get_region(
- CONNECTOR_BUILDER_REGION, ds_regions);
- address_t syn_mtx_addr = data_specification_get_region(
- SYNAPTIC_MATRIX_REGION, ds_regions);
- log_info("\tReading SDRAM at 0x%08x, writing to matrix at 0x%08x",
- params_address, syn_mtx_addr);
+ address_t params_address = data_specification_get_region(user1, ds_regions);
+ log_info("\tReading SDRAM at 0x%08x", params_address);
// Run the expander
- if (!run_synapse_expander(
- (address_t) params_address, (address_t) syn_mtx_addr)) {
+ if (!run_synapse_expander(ds_regions, params_address)) {
log_info("!!! Error reading SDRAM data !!!");
rt_error(RTE_ABORT);
}
diff --git a/spynnaker/pyNN/extra_algorithms/delay_support_adder.py b/spynnaker/pyNN/extra_algorithms/delay_support_adder.py
index 56817a8f17..2b525bf9d3 100644
--- a/spynnaker/pyNN/extra_algorithms/delay_support_adder.py
+++ b/spynnaker/pyNN/extra_algorithms/delay_support_adder.py
@@ -13,6 +13,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import logging
+import math
from spinn_utilities.log import FormatAdapter
from spinn_utilities.progress_bar import ProgressBar
@@ -54,9 +55,9 @@ class DelaySupportAdder(object):
"does not have accepts_edges_from_delay_vertex turned off.")
NOT_SUPPORTED_DELAY_ERROR_MSG = (
- "The maximum delay {} for projection {} is not supported "
- "by the splitter {} (max supported delay of the splitter is {} and "
- "a delay extension can add {} extra delay). either reduce "
+ "The maximum delay {}ms for projection {} is not supported "
+ "by the splitter {} (max supported delay of the splitter is {}ms and "
+ "a delay extension can add {}ms extra delay). either reduce "
"the delay, or use a splitter which supports a larger delay, or "
"finally implement the code to allow multiple delay extensions. "
"good luck.")
@@ -93,7 +94,7 @@ def __call__(self, app_graph, user_max_delay):
# figure the max delay and if we need a delay extension
synapse_infos = app_edge.synapse_information
- (max_delay_needed, post_vertex_max_delay,
+ (n_delay_stages, delay_steps_per_stage,
need_delay_extension) = self._check_delay_values(
app_edge, user_max_delay, synapse_infos)
@@ -102,14 +103,8 @@ def __call__(self, app_graph, user_max_delay):
delay_app_vertex = (
self._create_delay_app_vertex_and_pre_edge(
app_outgoing_edge_partition, app_edge,
- post_vertex_max_delay, app_graph,
- max_delay_needed))
-
- # update the delay extension for the max delay slots.
- # NOTE do it accumulately. coz else more loops.
- delay_app_vertex. \
- set_new_n_delay_stages_and_delay_per_stage(
- post_vertex_max_delay, max_delay_needed)
+ delay_steps_per_stage, app_graph,
+ n_delay_stages))
# add the edge from the delay extension to the
# dest vertex
@@ -155,8 +150,8 @@ def _create_post_delay_edge(self, delay_app_vertex, app_edge):
app_edge.delay_edge = delay_edge
def _create_delay_app_vertex_and_pre_edge(
- self, app_outgoing_edge_partition, app_edge, post_vertex_max_delay,
- app_graph, max_delay_needed):
+ self, app_outgoing_edge_partition, app_edge, delay_per_stage,
+ app_graph, n_delay_stages):
""" creates the delay extension app vertex and the edge from the src\
vertex to this delay extension. Adds to the graph, as safe to do\
so.
@@ -164,8 +159,8 @@ def _create_delay_app_vertex_and_pre_edge(
:param OutgoingEdgePartition app_outgoing_edge_partition:
the original outgoing edge partition.
:param AppEdge app_edge: the undelayed app edge.
- :param int post_vertex_max_delay: delay supported by post vertex.
- :param int max_delay_needed: the max delay needed by this app edge.
+ :param int delay_per_stage: delay for each delay stage
+ :param int n_delay_stages: the number of delay stages needed
:param ApplicationGraph app_graph: the app graph.
:return: the DelayExtensionAppVertex
"""
@@ -177,9 +172,8 @@ def _create_delay_app_vertex_and_pre_edge(
# build delay app vertex
delay_name = "{}_delayed".format(app_edge.pre_vertex.label)
delay_app_vertex = DelayExtensionVertex(
- app_edge.pre_vertex.n_atoms, post_vertex_max_delay,
- max_delay_needed - post_vertex_max_delay, app_edge.pre_vertex,
- label=delay_name)
+ app_edge.pre_vertex.n_atoms, delay_per_stage, n_delay_stages,
+ app_edge.pre_vertex, label=delay_name)
# set trackers
delay_app_vertex.splitter = (
@@ -194,6 +188,9 @@ def _create_delay_app_vertex_and_pre_edge(
label="{}_to_DelayExtension".format(
app_edge.pre_vertex.label))
self._delay_pre_edges.append(delay_pre_edge)
+ else:
+ delay_app_vertex.set_new_n_delay_stages_and_delay_per_stage(
+ n_delay_stages, delay_per_stage)
return delay_app_vertex
def _check_delay_values(
@@ -204,17 +201,17 @@ def _check_delay_values(
:param ApplicationEdge app_edge: the undelayed app edge
:param int user_max_delay: user max delay of the sim.
:param iterable[SynapseInfo] synapse_infos: iterable of synapse infos
- :return:tuple of max_delay_needed, post_vertex_max_delay, bool.
+ :return: tuple(n_delay_stages, delay_steps_per_stage, extension_needed)
"""
# get max delay required
- max_delay_needed = max(
+ max_delay_needed_ms = max(
synapse_info.synapse_dynamics.get_delay_maximum(
synapse_info.connector, synapse_info)
for synapse_info in synapse_infos)
# check max delay works
- if max_delay_needed > user_max_delay:
+ if max_delay_needed_ms > user_max_delay:
logger.warning(self.END_USER_MAX_DELAY_DEFILING_ERROR_MESSAGE)
# get if the post vertex needs a delay extension
@@ -225,13 +222,12 @@ def _check_delay_values(
self.INVALID_SPLITTER_FOR_DELAYS_ERROR_MSG.format(
app_edge.post_vertex, post_splitter, app_edge))
- post_vertex_max_delay = (
- app_edge.post_vertex.splitter.max_support_delay() *
- machine_time_step_ms())
+ max_delay_steps = app_edge.post_vertex.splitter.max_support_delay()
+ max_delay_ms = max_delay_steps * machine_time_step_ms()
# if does not need a delay extension, run away
- if post_vertex_max_delay >= max_delay_needed:
- return max_delay_needed, post_vertex_max_delay, False
+ if max_delay_ms >= max_delay_needed_ms:
+ return 0, max_delay_steps, False
# Check post vertex is ok with getting a delay
if not post_splitter.accepts_edges_from_delay_vertex():
@@ -241,18 +237,16 @@ def _check_delay_values(
# needs a delay extension, check can be supported with 1 delay
# extension. coz we dont do more than 1 at the moment
- total_supported_delay = (
- post_vertex_max_delay +
- (DelayExtensionVertex.get_max_delay_ticks_supported(
- post_vertex_max_delay) * machine_time_step_ms()))
- if total_supported_delay < max_delay_needed:
+ ext_provided_ms = (DelayExtensionVertex.get_max_delay_ticks_supported(
+ max_delay_steps) * machine_time_step_ms())
+ total_delay_ms = ext_provided_ms + max_delay_ms
+ if total_delay_ms < max_delay_needed_ms:
raise DelayExtensionException(
self.NOT_SUPPORTED_DELAY_ERROR_MSG.format(
- max_delay_needed, app_edge,
+ max_delay_needed_ms, app_edge,
app_edge.post_vertex.splitter,
- post_vertex_max_delay,
- DelayExtensionVertex.get_max_delay_ticks_supported(
- post_vertex_max_delay)))
+ max_delay_ms, ext_provided_ms))
# return data for building delay extensions
- return max_delay_needed, post_vertex_max_delay, True
+ n_stages = int(math.ceil(max_delay_needed_ms / max_delay_ms)) - 1
+ return n_stages, max_delay_steps, True
diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/__init__.py b/spynnaker/pyNN/extra_algorithms/splitter_components/__init__.py
index e73a6692ec..c5b7fa76ef 100644
--- a/spynnaker/pyNN/extra_algorithms/splitter_components/__init__.py
+++ b/spynnaker/pyNN/extra_algorithms/splitter_components/__init__.py
@@ -20,9 +20,15 @@
SplitterAbstractPopulationVertexSlice)
from .splitter_delay_vertex_slice import SplitterDelayVertexSlice
from .spynnaker_splitter_slice_legacy import SpynnakerSplitterSliceLegacy
+from .splitter_abstract_pop_vertex_neurons_synapses import (
+ SplitterAbstractPopulationVertexNeuronsSynapses)
+from .splitter_poisson_delegate import SplitterPoissonDelegate
+from .abstract_supports_one_to_one_sdram_input import (
+ AbstractSupportsOneToOneSDRAMInput)
__all__ = [
'AbstractSpynnakerSplitterDelay', 'SplitterAbstractPopulationVertexSlice',
- 'SplitterDelayVertexSlice',
- 'SpynnakerSplitterPartitioner', 'SpynnakerSplitterSelector',
- 'SpynnakerSplitterSliceLegacy']
+ 'SplitterDelayVertexSlice', 'SpynnakerSplitterPartitioner',
+ 'SpynnakerSplitterSelector', 'SpynnakerSplitterSliceLegacy',
+ 'SplitterAbstractPopulationVertexNeuronsSynapses',
+ 'SplitterPoissonDelegate', 'AbstractSupportsOneToOneSDRAMInput']
diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_supports_one_to_one_sdram_input.py b/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_supports_one_to_one_sdram_input.py
new file mode 100644
index 0000000000..b343aefc64
--- /dev/null
+++ b/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_supports_one_to_one_sdram_input.py
@@ -0,0 +1,24 @@
+# Copyright (c) 2020-2021 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+from six import add_metaclass
+from spinn_utilities.abstract_base import AbstractBase
+
+
+@add_metaclass(AbstractBase)
+class AbstractSupportsOneToOneSDRAMInput(object):
+ """ A marker interface for a splitter that supports one-to-one input using
+ SDRAM. The splitter is assumed to handle the splitting on any inputs
+ that are actually one-to-one, as it will have to create the vertices
+ """
diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_neurons_synapses.py b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_neurons_synapses.py
new file mode 100644
index 0000000000..44ba9214cb
--- /dev/null
+++ b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_neurons_synapses.py
@@ -0,0 +1,785 @@
+# Copyright (c) 2020-2021 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+import math
+import logging
+from collections import defaultdict
+from spinn_utilities.overrides import overrides
+from pacman.exceptions import PacmanConfigurationException
+from pacman.model.resources import (
+ ResourceContainer, DTCMResource, CPUCyclesPerTickResource,
+ MultiRegionSDRAM)
+from pacman.model.partitioner_splitters.abstract_splitters import (
+ AbstractSplitterCommon)
+from pacman.model.graphs.common.slice import Slice
+from pacman.model.graphs.machine import (
+ MachineEdge, SourceSegmentedSDRAMMachinePartition, SDRAMMachineEdge)
+from pacman.utilities.algorithm_utilities.\
+ partition_algorithm_utilities import get_remaining_constraints
+from spinn_front_end_common.utilities.globals_variables import (
+ machine_time_step_ms)
+from spynnaker.pyNN.models.neuron import (
+ PopulationNeuronsMachineVertex, PopulationSynapsesMachineVertexLead,
+ PopulationSynapsesMachineVertexShared, NeuronProvenance, SynapseProvenance,
+ AbstractPopulationVertex, SpikeProcessingFastProvenance)
+from spynnaker.pyNN.models.neuron.population_neurons_machine_vertex import (
+ SDRAM_PARAMS_SIZE as NEURONS_SDRAM_PARAMS_SIZE, NeuronMainProvenance)
+from data_specification.reference_context import ReferenceContext
+from spynnaker.pyNN.models.neuron.synapse_dynamics import (
+ SynapseDynamicsStatic, AbstractSynapseDynamicsStructural)
+from spinn_front_end_common.utilities.constants import BYTES_PER_WORD
+from spinn_front_end_common.utilities.exceptions import ConfigurationException
+from spynnaker.pyNN.models.neuron.population_synapses_machine_vertex_common \
+ import (SDRAM_PARAMS_SIZE as SYNAPSES_SDRAM_PARAMS_SIZE, KEY_CONFIG_SIZE,
+ SynapseRegions)
+from spynnaker.pyNN.utilities.constants import (
+ SYNAPSE_SDRAM_PARTITION_ID, SPIKE_PARTITION_ID)
+from spynnaker.pyNN.models.spike_source import SpikeSourcePoissonVertex
+from spynnaker.pyNN.models.neural_projections.connectors import (
+ OneToOneConnector)
+from spynnaker.pyNN.utilities.utility_calls import get_n_bits
+from spynnaker.pyNN.exceptions import SynapticConfigurationException
+from spynnaker.pyNN.models.neuron.master_pop_table import (
+ MasterPopTableAsBinarySearch)
+from spynnaker.pyNN.utilities.bit_field_utilities import (
+ get_estimated_sdram_for_bit_field_region,
+ get_estimated_sdram_for_key_region,
+ exact_sdram_for_bit_field_builder_region)
+from spynnaker.pyNN.models.neural_projections import DelayedApplicationEdge
+from .splitter_poisson_delegate import SplitterPoissonDelegate
+from .abstract_spynnaker_splitter_delay import AbstractSpynnakerSplitterDelay
+from .abstract_supports_one_to_one_sdram_input import (
+ AbstractSupportsOneToOneSDRAMInput)
+
+logger = logging.getLogger(__name__)
+
+# The maximum number of bits for the ring buffer index that are likely to
+# fit in DTCM (14-bits = 16,384 16-bit ring buffer entries = 32Kb DTCM
+MAX_RING_BUFFER_BITS = 14
+
+
+class SplitterAbstractPopulationVertexNeuronsSynapses(
+ AbstractSplitterCommon, AbstractSpynnakerSplitterDelay,
+ AbstractSupportsOneToOneSDRAMInput):
+ """ Splits an AbstractPopulationVertex so that there are separate neuron
+ cores each being fed by one or more synapse cores. Incoming one-to-one
+ Poisson cores are also added here if they meet the criteria.
+ """
+
+ __slots__ = [
+ # All the neuron cores
+ "__neuron_vertices",
+ # All the synapse cores
+ "__synapse_vertices",
+ # The synapse cores split by neuron core
+ "__synapse_verts_by_neuron",
+ # The number of synapse cores per neuron core
+ "__n_synapse_vertices",
+ # Any application edges from Poisson sources that are handled here
+ "__poisson_edges",
+ # The maximum delay supported
+ "__max_delay",
+ # The user-set maximum delay, for reset
+ "__user_max_delay",
+ # Whether to allow delay extensions to be created
+ "__allow_delay_extension",
+ # The user-set allowing of delay extensions
+ "__user_allow_delay_extension",
+ # The fixed slices the vertices are divided into
+ "__slices",
+ # The next synapse core to use for an incoming machine edge
+ "__next_synapse_index"]
+
+ SPLITTER_NAME = "SplitterAbstractPopulationVertexNeuronsSynapses"
+
+ INVALID_POP_ERROR_MESSAGE = (
+ "The vertex {} cannot be supported by the "
+ "SplitterAbstractPopVertexNeuronsSynapses as"
+ " the only vertex supported by this splitter is a "
+ "AbstractPopulationVertex. Please use the correct splitter for "
+ "your vertex and try again.")
+
+ def __init__(self, n_synapse_vertices=1,
+ max_delay=None,
+ allow_delay_extension=None):
+ """
+
+ :param int n_synapse_vertices:
+ The number of synapse cores per neuron core
+ :param max_delay:
+ The maximum delay supported by each synapse core; by default this
+ is computed based on the number of atoms per core, the number of
+ synapse types, and the space available for delays on the core
+ :type max_delay: int or None
+ :param allow_delay_extension:
+ Whether delay extensions are allowed in the network. If max_delay
+ is provided, this will default to True. If max_delay is not
+ provided, and this is given as None, it will be computed based on
+ whether delay extensions should be needed.
+ :type allow_delay_extension: bool or None
+ """
+ super(SplitterAbstractPopulationVertexNeuronsSynapses, self).__init__(
+ self.SPLITTER_NAME)
+ AbstractSpynnakerSplitterDelay.__init__(self)
+ self.__n_synapse_vertices = n_synapse_vertices
+ self.__max_delay = max_delay
+ self.__user_max_delay = max_delay
+ self.__allow_delay_extension = allow_delay_extension
+ self.__user_allow_delay_extension = allow_delay_extension
+ self.__slices = None
+ self.__next_synapse_index = 0
+
+ if (self.__max_delay is not None and
+ self.__allow_delay_extension is None):
+ self.__allow_delay_extension = True
+
+ @overrides(AbstractSplitterCommon.set_governed_app_vertex)
+ def set_governed_app_vertex(self, app_vertex):
+ AbstractSplitterCommon.set_governed_app_vertex(self, app_vertex)
+ if not isinstance(app_vertex, AbstractPopulationVertex):
+ raise PacmanConfigurationException(
+ self.INVALID_POP_ERROR_MESSAGE.format(app_vertex))
+
+ @overrides(AbstractSplitterCommon.create_machine_vertices)
+ def create_machine_vertices(self, resource_tracker, machine_graph):
+ app_vertex = self._governed_app_vertex
+ label = app_vertex.label
+ constraints = get_remaining_constraints(app_vertex)
+
+ # Structural plasticity can only be run on a single synapse core
+ if (isinstance(app_vertex.synapse_dynamics,
+ AbstractSynapseDynamicsStructural) and
+ self.__n_synapse_vertices != 1):
+ raise SynapticConfigurationException(
+ "The current implementation of structural plasticity can only"
+ " be run on a single synapse core. Please ensure the number"
+ " of synapse cores is set to 1")
+
+ # Do some checks to make sure everything is likely to fit
+ atoms_per_core = min(
+ app_vertex.get_max_atoms_per_core(), app_vertex.n_atoms)
+ n_synapse_types = app_vertex.neuron_impl.get_n_synapse_types()
+ if (get_n_bits(atoms_per_core) + get_n_bits(n_synapse_types) +
+ get_n_bits(self.__get_max_delay)) > MAX_RING_BUFFER_BITS:
+ raise SynapticConfigurationException(
+ "The combination of the number of neurons per core ({}), "
+ "the number of synapse types ({}), and the maximum delay per "
+ "core ({}) will require too much DTCM. Please reduce one or "
+ "more of these values.".format(
+ atoms_per_core, n_synapse_types, self.__get_max_delay))
+
+ self.__neuron_vertices = list()
+ self.__synapse_vertices = list()
+ self.__synapse_verts_by_neuron = defaultdict(list)
+
+ incoming_direct_poisson = self.__handle_poisson_sources(
+ label, machine_graph)
+
+ # Work out the ring buffer shifts based on all incoming things
+ rb_shifts = app_vertex.get_ring_buffer_shifts(
+ app_vertex.incoming_projections)
+ weight_scales = app_vertex.get_weight_scales(rb_shifts)
+
+ # Get resources for synapses
+ independent_synapse_sdram = self.__independent_synapse_sdram()
+ proj_dependent_sdram = self.__proj_dependent_synapse_sdram(
+ app_vertex.incoming_projections)
+
+ for index, vertex_slice in enumerate(self.__get_fixed_slices()):
+
+ # Find the maximum number of cores on any chip available
+ max_crs = resource_tracker.get_maximum_cores_available_on_a_chip()
+ if max_crs < (self.__n_synapse_vertices + 1):
+ raise ConfigurationException(
+ "No chips remaining with enough cores for"
+ f" {self.__n_synapse_vertices} synapse cores and a neuron"
+ " core")
+ max_crs -= self.__n_synapse_vertices + 1
+
+ # Create the neuron vertex for the slice
+ neuron_vertex, neuron_resources = self.__add_neuron_core(
+ vertex_slice, label, index, rb_shifts, weight_scales,
+ machine_graph, constraints)
+
+ # Keep track of synapse vertices for each neuron vertex and
+ # resources used by each core (neuron core is added later)
+ synapse_vertices = list()
+ self.__synapse_verts_by_neuron[neuron_vertex] = synapse_vertices
+ all_resources = []
+
+ # Add the first vertex
+ synapse_references, syn_label = self.__add_lead_synapse_core(
+ vertex_slice, independent_synapse_sdram, proj_dependent_sdram,
+ label, rb_shifts, weight_scales, all_resources, machine_graph,
+ synapse_vertices, neuron_vertex, constraints)
+
+ # Do the remaining synapse cores
+ for i in range(1, self.__n_synapse_vertices):
+ self.__add_shared_synapse_core(
+ syn_label, i, vertex_slice, synapse_references,
+ all_resources, machine_graph, synapse_vertices,
+ neuron_vertex, constraints)
+
+ # Add resources for Poisson vertices up to core limit
+ poisson_vertices = incoming_direct_poisson[vertex_slice]
+ remaining_poisson_vertices = list()
+ added_poisson_vertices = list()
+ for poisson_vertex, poisson_edge in poisson_vertices:
+ if max_crs <= 0:
+ remaining_poisson_vertices.append(poisson_vertex)
+ self.__add_poisson_multicast(
+ poisson_vertex, synapse_vertices, machine_graph,
+ poisson_edge)
+ else:
+ all_resources.append(
+ (poisson_vertex.resources_required, []))
+ added_poisson_vertices.append(poisson_vertex)
+ max_crs -= 1
+
+ if remaining_poisson_vertices:
+ logger.warn(
+ f"Vertex {label} is using multicast for"
+ f" {len(remaining_poisson_vertices)} one-to-one Poisson"
+ " sources as not enough cores exist to put them on the"
+ " same chip")
+
+ # Create an SDRAM edge partition
+ sdram_label = "SDRAM {} Synapses-->Neurons:{}-{}".format(
+ label, vertex_slice.lo_atom, vertex_slice.hi_atom)
+ source_vertices = added_poisson_vertices + synapse_vertices
+ sdram_partition = SourceSegmentedSDRAMMachinePartition(
+ SYNAPSE_SDRAM_PARTITION_ID, sdram_label, source_vertices)
+ machine_graph.add_outgoing_edge_partition(sdram_partition)
+ neuron_vertex.set_sdram_partition(sdram_partition)
+
+ # Add SDRAM edges for synapse vertices
+ for source_vertex in source_vertices:
+ edge_label = "SDRAM {}-->{}".format(
+ source_vertex.label, neuron_vertex.label)
+ machine_graph.add_edge(
+ SDRAMMachineEdge(
+ source_vertex, neuron_vertex, edge_label),
+ SYNAPSE_SDRAM_PARTITION_ID)
+ source_vertex.set_sdram_partition(sdram_partition)
+
+ # Add SDRAM edge requirements to the neuron SDRAM, as the resource
+ # tracker will otherwise try to add another core for it
+ extra_sdram = MultiRegionSDRAM()
+ extra_sdram.merge(neuron_resources.sdram)
+ extra_sdram.add_cost(
+ len(extra_sdram.regions) + 1,
+ sdram_partition.total_sdram_requirements())
+ neuron_resources_plus = ResourceContainer(
+ sdram=extra_sdram, dtcm=neuron_resources.dtcm,
+ cpu_cycles=neuron_resources.cpu_cycles,
+ iptags=neuron_resources.iptags,
+ reverse_iptags=neuron_resources.reverse_iptags)
+ all_resources.append((neuron_resources_plus, constraints))
+
+ # Allocate all the resources to ensure they all fit
+ resource_tracker.allocate_constrained_group_resources(
+ all_resources)
+
+ return True
+
+ def __add_poisson_multicast(
+ self, poisson_vertex, synapse_vertices, machine_graph, app_edge):
+ """ Add an edge from a one-to-one Poisson source to one of the
+ synapse vertices using multicast
+
+ :param MachineVertex poisson_vertex:
+ The Poisson machine vertex to use as a source
+ :param list(MachineVertex) synapse_vertices:
+ The list of synapse vertices that can be used as targets
+ :param MachineGraph machine_graph: The machine graph to add the edge to
+ :param ProjectionEdge app_edge: The application edge of the connection
+ """
+ post_vertex = synapse_vertices[self.__next_synapse_index]
+ self.__next_synapse_index = (
+ (self.__next_synapse_index + 1) % self.__n_synapse_vertices)
+ edge = MachineEdge(poisson_vertex, post_vertex, app_edge=app_edge,
+ label=f"Machine edge for {app_edge.label}")
+ machine_graph.add_edge(edge, SPIKE_PARTITION_ID)
+
+ def __add_neuron_core(
+ self, vertex_slice, label, index, rb_shifts, weight_scales,
+ machine_graph, constraints):
+ """ Add a neuron core for for a slice of neurons
+
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of neurons to put on the core
+ :param str label: The name to give the core
+ :param int index: The index of the slice in the ordered list of slices
+ :param list(int) rb_shifts:
+ The computed ring-buffer shift values to use to get the weights
+ back to S1615 values
+ :param list(int) weight_scales:
+ The scale to apply to weights to encode them in the 16-bit synapses
+ :param ~pacman.model.graphs.machine.MachineGraph machine_graph:
+ The graph to add the core to
+ :param list(~pacman.model.constraints.AbstractConstraint) constraints:
+ Constraints to add
+ :return: The neuron vertex created and the resources used
+ :rtype: tuple(PopulationNeuronsMachineVertex, \
+ ~pacman.model.resources.ResourceContainer)
+ """
+ app_vertex = self._governed_app_vertex
+ neuron_resources = self.__get_neuron_resources(vertex_slice)
+ neuron_label = "{}_Neurons:{}-{}".format(
+ label, vertex_slice.lo_atom, vertex_slice.hi_atom)
+ neuron_vertex = PopulationNeuronsMachineVertex(
+ neuron_resources, neuron_label, constraints, app_vertex,
+ vertex_slice, index, rb_shifts, weight_scales)
+ machine_graph.add_vertex(neuron_vertex)
+ self.__neuron_vertices.append(neuron_vertex)
+
+ return neuron_vertex, neuron_resources
+
+ def __add_lead_synapse_core(
+ self, vertex_slice, independent_synapse_sdram,
+ proj_dependent_sdram, label, rb_shifts, weight_scales,
+ all_resources, machine_graph, synapse_vertices, neuron_vertex,
+ constraints):
+ """ Add the first synapse core for a neuron core. This core will
+ generate all the synaptic data required.
+
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of neurons on the neuron core
+ :param int independent_synapse_sdram:
+ The SDRAM that will be used by every lead synapse core
+ :param int proj_dependent_sdram:
+ The SDRAM that will be used by the synapse core to handle a given
+ set of projections
+ :param str label: The name to give the core
+ :param list(int) rb_shifts:
+ The computed ring-buffer shift values to use to get the weights
+ back to S1615 values
+ :param list(int) weight_scales:
+ The scale to apply to weights to encode them in the 16-bit synapses
+ :param list(~pacman.model.resources.ResourceContainer) all_resources:
+ A list to add the resources of the vertex to
+ :param ~pacman.model.graphs.machine.MachineGraph machine_graph:
+ The graph to add the core to
+ :param list(~pacman.model.graphs.machine.MachineVertex) \
+ synapse_vertices:
+ A list to add the core to
+ :param PopulationNeuronsMachineVertex neuron_vertex:
+ The neuron vertex the synapses will feed into
+ :param list(~pacman.model.constraints.AbstractConstraint) constraints:
+ Constraints to add
+ :return: References to the synapse regions that can be used by a shared
+ synapse core, and the basic label for the synapse cores
+ :rtype: tuple(SynapseRegions, str)
+ """
+ # Get common synapse resources
+ app_vertex = self._governed_app_vertex
+ structural_sz = app_vertex.get_structural_dynamics_size(
+ vertex_slice, app_vertex.incoming_projections)
+ dynamics_sz = self._governed_app_vertex.get_synapse_dynamics_size(
+ vertex_slice)
+ all_syn_block_sz = app_vertex.get_synapses_size(
+ vertex_slice, app_vertex.incoming_projections)
+ # Need a minimum size to make it possible to reference
+ structural_sz = max(structural_sz, BYTES_PER_WORD)
+ dynamics_sz = max(dynamics_sz, BYTES_PER_WORD)
+ all_syn_block_sz = max(all_syn_block_sz, BYTES_PER_WORD)
+ shared_sdram = self.__shared_synapse_sdram(
+ independent_synapse_sdram, proj_dependent_sdram,
+ all_syn_block_sz, structural_sz, dynamics_sz)
+ synapse_references = self.__synapse_references
+ syn_label = "{}_Synapses:{}-{}".format(
+ label, vertex_slice.lo_atom, vertex_slice.hi_atom)
+
+ # Do the lead synapse core
+ lead_synapse_resources = self.__get_synapse_resources(
+ vertex_slice, shared_sdram)
+ lead_synapse_vertex = PopulationSynapsesMachineVertexLead(
+ lead_synapse_resources, "{}(0)".format(syn_label), constraints,
+ app_vertex, vertex_slice, rb_shifts, weight_scales,
+ all_syn_block_sz, structural_sz, synapse_references)
+ all_resources.append((lead_synapse_resources, constraints))
+ machine_graph.add_vertex(lead_synapse_vertex)
+ self.__synapse_vertices.append(lead_synapse_vertex)
+ synapse_vertices.append(lead_synapse_vertex)
+
+ self.__add_plastic_feedback(
+ machine_graph, neuron_vertex, lead_synapse_vertex)
+
+ return synapse_references, syn_label
+
+ def __add_shared_synapse_core(
+ self, syn_label, s_index, vertex_slice, synapse_references,
+ all_resources, machine_graph, synapse_vertices,
+ neuron_vertex, constraints):
+ """ Add a second or subsequent synapse core. This will reference the
+ synaptic data generated by the lead synapse core.
+
+ :param str syn_label: The basic synapse core label to be extended
+ :param int s_index: The index of the synapse core (0 is the lead core)
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of neurons on the neuron core
+ :param SynapseRegions synapse_references:
+ References to the synapse regions
+ :param list(~pacman.model.resources.ResourceContainer) all_resources:
+ A list to add the resources of the vertex to
+ :param ~pacman.model.graphs.machine.MachineGraph machine_graph:
+ The graph to add the core to
+ :param list(~pacman.model.graphs.machine.MachineVertex) \
+ synapse_vertices:
+ A list to add the core to
+ :param PopulationNeuronsMachineVertex neuron_vertex:
+ The neuron vertex the synapses will feed into
+ :param list(~pacman.model.constraints.AbstractConstraint) constraints:
+ Constraints to add
+ """
+ app_vertex = self._governed_app_vertex
+ synapse_label = "{}({})".format(syn_label, s_index)
+ synapse_resources = self.__get_synapse_resources(vertex_slice)
+ synapse_vertex = PopulationSynapsesMachineVertexShared(
+ synapse_resources, synapse_label, constraints, app_vertex,
+ vertex_slice, synapse_references)
+ all_resources.append((synapse_resources, constraints))
+ machine_graph.add_vertex(synapse_vertex)
+ self.__synapse_vertices.append(synapse_vertex)
+ synapse_vertices.append(synapse_vertex)
+
+ self.__add_plastic_feedback(
+ machine_graph, neuron_vertex, synapse_vertex)
+
+ def __add_plastic_feedback(
+ self, machine_graph, neuron_vertex, synapse_vertex):
+ """ Add an edge if needed from the neuron core back to the synapse core
+ to allow the synapse core to process plastic synapses
+
+ :param ~pacman.model.graphs.machine.MachineGraph machine_graph:
+ The graph to add the core to
+ :param PopulationNeuronsMachineVertex neuron_vertex:
+ The neuron vertex to start the edge at
+ :param PopulationSynapsesMachineVertexCommon synapse_vertex:
+ A synapse vertex to feed the spikes back to
+ """
+
+ # If synapse dynamics is not simply static, link the neuron vertex
+ # back to the synapse vertex
+ app_vertex = self._governed_app_vertex
+ if (app_vertex.synapse_dynamics is not None and
+ not isinstance(app_vertex.synapse_dynamics,
+ SynapseDynamicsStatic) and
+ app_vertex.self_projection is None):
+ neuron_to_synapse_edge = MachineEdge(neuron_vertex, synapse_vertex)
+ machine_graph.add_edge(neuron_to_synapse_edge, SPIKE_PARTITION_ID)
+ synapse_vertex.set_neuron_to_synapse_edge(neuron_to_synapse_edge)
+
+ def __handle_poisson_sources(self, label, machine_graph):
+ """ Go through the incoming projections and find Poisson sources with
+ splitters that work with us, and one-to-one connections that will
+ then work with SDRAM
+
+ :param str label: Base label to give to the Poisson cores
+ :param ~pacman.model.graphs.machine.MachineGraph machine_graph:
+ The graph to add any Poisson cores to
+ """
+ self.__poisson_edges = set()
+ incoming_direct_poisson = defaultdict(list)
+ for proj in self._governed_app_vertex.incoming_projections:
+ pre_vertex = proj._projection_edge.pre_vertex
+ connector = proj._synapse_information.connector
+ if self.__is_direct_poisson_source(pre_vertex, connector):
+ # Create the direct Poisson vertices here; the splitter
+ # for the Poisson will create any others as needed
+ for vertex_slice in self.__get_fixed_slices():
+ resources = pre_vertex.get_resources_used_by_atoms(
+ vertex_slice)
+ poisson_label = "{}_Poisson:{}-{}".format(
+ label, vertex_slice.lo_atom, vertex_slice.hi_atom)
+ poisson_m_vertex = pre_vertex.create_machine_vertex(
+ vertex_slice, resources, label=poisson_label)
+ machine_graph.add_vertex(poisson_m_vertex)
+ incoming_direct_poisson[vertex_slice].append(
+ (poisson_m_vertex, proj._projection_edge))
+
+ # Keep track of edges that have been used for this
+ self.__poisson_edges.add(proj._projection_edge)
+ return incoming_direct_poisson
+
+ def __is_direct_poisson_source(self, pre_vertex, connector):
+ """ Determine if a given Poisson source can be created by this splitter
+
+ :param ~pacman.model.graphs.application.ApplicationVertex pre_vertex:
+ The vertex sending into the Projection
+ :param ~spynnaker.pyNN.models.neural_projections.connectors\
+ .AbstractConnector:
+ The connector in use in the Projection
+ :rtype: bool
+ """
+ return (isinstance(pre_vertex, SpikeSourcePoissonVertex) and
+ isinstance(pre_vertex.splitter, SplitterPoissonDelegate) and
+ len(pre_vertex.outgoing_projections) == 1 and
+ isinstance(connector, OneToOneConnector))
+
+ def __get_fixed_slices(self):
+ """ Get a list of fixed slices from the Application vertex
+
+ :rtype: list(~pacman.model.graphs.common.Slice)
+ """
+ if self.__slices is not None:
+ return self.__slices
+ atoms_per_core = self._governed_app_vertex.get_max_atoms_per_core()
+ n_atoms = self._governed_app_vertex.n_atoms
+ self.__slices = [Slice(low, min(low + atoms_per_core - 1, n_atoms - 1))
+ for low in range(0, n_atoms, atoms_per_core)]
+ return self.__slices
+
+ @overrides(AbstractSplitterCommon.get_in_coming_slices)
+ def get_in_coming_slices(self):
+ return self.__get_fixed_slices(), True
+
+ @overrides(AbstractSplitterCommon.get_out_going_slices)
+ def get_out_going_slices(self):
+ return self.__get_fixed_slices(), True
+
+ @overrides(AbstractSplitterCommon.get_out_going_vertices)
+ def get_out_going_vertices(self, edge, outgoing_edge_partition):
+ return {v: [MachineEdge] for v in self.__neuron_vertices}
+
+ @overrides(AbstractSplitterCommon.get_in_coming_vertices)
+ def get_in_coming_vertices(
+ self, edge, outgoing_edge_partition, src_machine_vertex):
+ # If the edge is delayed, get the real edge
+ if isinstance(edge, DelayedApplicationEdge):
+ edge = edge.undelayed_edge
+
+ # Filter out edges from Poisson sources being done using SDRAM
+ if edge in self.__poisson_edges:
+ return {}
+
+ # Pick the same synapse vertex index for each neuron vertex
+ index = self.__next_synapse_index
+ self.__next_synapse_index = (
+ (self.__next_synapse_index + 1) % self.__n_synapse_vertices)
+ return {self.__synapse_verts_by_neuron[neuron][index]: [MachineEdge]
+ for neuron in self.__neuron_vertices}
+
+ @overrides(AbstractSplitterCommon.machine_vertices_for_recording)
+ def machine_vertices_for_recording(self, variable_to_record):
+ if self._governed_app_vertex.neuron_recorder.is_recordable(
+ variable_to_record):
+ return self.__neuron_vertices
+ return self.__synapse_vertices
+
+ @overrides(AbstractSplitterCommon.reset_called)
+ def reset_called(self):
+ self.__neuron_vertices = None
+ self.__synapse_vertices = None
+ self.__synapse_verts_by_neuron = None
+ self.__max_delay = self.__user_max_delay
+ self.__allow_delay_extension = self.__user_allow_delay_extension
+
+ @property
+ def __synapse_references(self):
+ """ Get reference identifiers for the shared synapse regions
+
+ :rtype: SynapseRegions
+ """
+ references = [
+ ReferenceContext.next()
+ for _ in range(
+ len(PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS))]
+ return SynapseRegions(*references)
+
+ def __get_neuron_resources(self, vertex_slice):
+ """ Gets the resources of the neurons of a slice of atoms from a given
+ app vertex.
+
+ :param ~pacman.model.graphs.common.Slice vertex_slice: the slice
+ :rtype: ~pacman.model.resources.ResourceContainer
+ """
+ n_record = len(self._governed_app_vertex.neuron_recordables)
+ variable_sdram = self._governed_app_vertex.get_neuron_variable_sdram(
+ vertex_slice)
+ sdram = MultiRegionSDRAM()
+ sdram.merge(self._governed_app_vertex.get_common_constant_sdram(
+ n_record, NeuronProvenance.N_ITEMS + NeuronMainProvenance.N_ITEMS,
+ PopulationNeuronsMachineVertex.COMMON_REGIONS))
+ sdram.merge(self._governed_app_vertex.get_neuron_constant_sdram(
+ vertex_slice, PopulationNeuronsMachineVertex.NEURON_REGIONS))
+ sdram.add_cost(
+ PopulationNeuronsMachineVertex.REGIONS.SDRAM_EDGE_PARAMS.value,
+ NEURONS_SDRAM_PARAMS_SIZE)
+ sdram.nest(
+ len(PopulationNeuronsMachineVertex.REGIONS) + 1, variable_sdram)
+ dtcm = self._governed_app_vertex.get_common_dtcm()
+ dtcm += self._governed_app_vertex.get_neuron_dtcm(vertex_slice)
+ cpu_cycles = self._governed_app_vertex.get_common_cpu()
+ cpu_cycles += self._governed_app_vertex.get_neuron_cpu(vertex_slice)
+
+ # set resources required from this object
+ container = ResourceContainer(
+ sdram=sdram, dtcm=DTCMResource(dtcm),
+ cpu_cycles=CPUCyclesPerTickResource(cpu_cycles))
+
+ # return the total resources.
+ return container
+
+ def __shared_synapse_sdram(
+ self, independent_synapse_sdram, proj_dependent_sdram,
+ all_syn_block_sz, structural_sz, dynamics_sz):
+ """ Get the SDRAM shared between synapse cores
+
+ :rtype: ~pacman.model.resources.MultiRegionSDRAM
+ """
+ sdram = MultiRegionSDRAM()
+ sdram.merge(independent_synapse_sdram)
+ sdram.merge(proj_dependent_sdram)
+ sdram.add_cost(
+ PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS
+ .synaptic_matrix, all_syn_block_sz)
+ sdram.add_cost(
+ PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS.direct_matrix,
+ max(self._governed_app_vertex.all_single_syn_size, BYTES_PER_WORD))
+ sdram.add_cost(
+ PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS
+ .structural_dynamics, structural_sz)
+ sdram.add_cost(
+ PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS
+ .synapse_dynamics, dynamics_sz)
+ return sdram
+
+ def __get_synapse_resources(self, vertex_slice, shared_sdram=None):
+ """ Get the resources of the synapses of a slice of atoms from a
+ given app vertex.
+
+ :param ~pacman.model.graphs.common.Slice vertex_slice: the slice
+ :param ~pacman.model.resources.MultiRegionSDRAM shared_sdram:
+ The SDRAM shared between cores, if this is to be included
+ :rtype: ~pacman.model.resources.ResourceContainer
+ """
+ n_record = len(self._governed_app_vertex.synapse_recordables)
+ variable_sdram = self._governed_app_vertex.get_synapse_variable_sdram(
+ vertex_slice)
+ sdram = MultiRegionSDRAM()
+ sdram.merge(self._governed_app_vertex.get_common_constant_sdram(
+ n_record,
+ SynapseProvenance.N_ITEMS + SpikeProcessingFastProvenance.N_ITEMS,
+ PopulationSynapsesMachineVertexLead.COMMON_REGIONS))
+
+ sdram.add_cost(
+ PopulationSynapsesMachineVertexLead.REGIONS
+ .SDRAM_EDGE_PARAMS.value, SYNAPSES_SDRAM_PARAMS_SIZE)
+ sdram.add_cost(
+ PopulationSynapsesMachineVertexLead.REGIONS.KEY_REGION.value,
+ KEY_CONFIG_SIZE)
+ sdram.nest(
+ len(PopulationSynapsesMachineVertexLead.REGIONS) + 1,
+ variable_sdram)
+ if shared_sdram is not None:
+ sdram.merge(shared_sdram)
+ dtcm = self._governed_app_vertex.get_common_dtcm()
+ dtcm += self._governed_app_vertex.get_synapse_dtcm(vertex_slice)
+ cpu_cycles = self._governed_app_vertex.get_common_cpu()
+ cpu_cycles += self._governed_app_vertex.get_synapse_cpu(vertex_slice)
+
+ # set resources required from this object
+ container = ResourceContainer(
+ sdram=sdram, dtcm=DTCMResource(dtcm),
+ cpu_cycles=CPUCyclesPerTickResource(cpu_cycles))
+
+ # return the total resources.
+ return container
+
+ def __independent_synapse_sdram(self):
+ """ Get the SDRAM used by all synapse cores independent of projections
+
+ :rtype: ~pacman.model.resources.MultiRegionSDRAM
+ """
+ app_vertex = self._governed_app_vertex
+ sdram = MultiRegionSDRAM()
+ sdram.add_cost(
+ PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS.synapse_params,
+ max(app_vertex.get_synapse_params_size(), BYTES_PER_WORD))
+ sdram.add_cost(
+ PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS
+ .bitfield_builder,
+ max(exact_sdram_for_bit_field_builder_region(), BYTES_PER_WORD))
+ return sdram
+
+ def __proj_dependent_synapse_sdram(self, incoming_projections):
+ """ Get the SDRAM used by synapse cores dependent on the projections
+
+ :param list(~spynnaker.pyNN.models.Projection) incoming_projections:
+ The projections to consider in the calculations
+ :rtype: ~pacman.model.resources.MultiRegionSDRAM
+ """
+ app_vertex = self._governed_app_vertex
+ sdram = MultiRegionSDRAM()
+ sdram.add_cost(
+ PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS.pop_table,
+ max(MasterPopTableAsBinarySearch.get_master_population_table_size(
+ incoming_projections), BYTES_PER_WORD))
+ sdram.add_cost(
+ PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS
+ .connection_builder,
+ max(app_vertex.get_synapse_expander_size(incoming_projections),
+ BYTES_PER_WORD))
+ sdram.add_cost(
+ PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS
+ .bitfield_filter,
+ max(get_estimated_sdram_for_bit_field_region(incoming_projections),
+ BYTES_PER_WORD))
+ sdram.add_cost(
+ PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS
+ .bitfield_key_map,
+ max(get_estimated_sdram_for_key_region(incoming_projections),
+ BYTES_PER_WORD))
+ return sdram
+
+ @property
+ def __get_max_delay(self):
+ if self.__max_delay is not None:
+ return self.__max_delay
+
+ # Find the maximum delay from incoming synapses
+ app_vertex = self._governed_app_vertex
+ max_delay_ms = 0
+ for proj in app_vertex.incoming_projections:
+ s_info = proj._synapse_information
+ proj_max_delay = s_info.synapse_dynamics.get_delay_maximum(
+ s_info.connector, s_info)
+ max_delay_ms = max(max_delay_ms, proj_max_delay)
+ max_delay_steps = math.ceil(max_delay_ms / machine_time_step_ms())
+ max_delay_bits = get_n_bits(max_delay_steps)
+
+ # Find the maximum possible delay
+ n_atom_bits = get_n_bits(min(
+ app_vertex.get_max_atoms_per_core(), app_vertex.n_atoms))
+ n_synapse_bits = get_n_bits(
+ app_vertex.neuron_impl.get_n_synapse_types())
+ n_delay_bits = MAX_RING_BUFFER_BITS - (n_atom_bits + n_synapse_bits)
+
+ # Pick the smallest between the two, so that not too many bits are used
+ final_n_delay_bits = min(n_delay_bits, max_delay_bits)
+ self.__max_delay = 2 ** final_n_delay_bits
+ if self.__allow_delay_extension is None:
+ self.__allow_delay_extension = max_delay_bits > final_n_delay_bits
+ return self.__max_delay
+
+ @overrides(AbstractSpynnakerSplitterDelay.max_support_delay)
+ def max_support_delay(self):
+ return self.__get_max_delay
+
+ @overrides(AbstractSpynnakerSplitterDelay.accepts_edges_from_delay_vertex)
+ def accepts_edges_from_delay_vertex(self):
+ if self.__allow_delay_extension is None:
+ self.__get_max_delay
+ return self.__allow_delay_extension
diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_slice.py b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_slice.py
index b8ac7de15e..14105df800 100644
--- a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_slice.py
+++ b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_slice.py
@@ -12,27 +12,30 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-import os
-
from spinn_utilities.overrides import overrides
from pacman.exceptions import PacmanConfigurationException
-from pacman.executor.injection_decorator import inject_items
from pacman.model.constraints.partitioner_constraints import (
MaxVertexAtomsConstraint, FixedVertexAtomsConstraint,
AbstractPartitionerConstraint)
from pacman.model.graphs.machine import MachineEdge
from pacman.model.resources import (
- ResourceContainer, ConstantSDRAM, DTCMResource, CPUCyclesPerTickResource)
+ ResourceContainer, DTCMResource, CPUCyclesPerTickResource,
+ MultiRegionSDRAM)
from pacman.model.partitioner_splitters.abstract_splitters import (
AbstractSplitterSlice)
from pacman.utilities import utility_calls
-from spinn_front_end_common.interface.profiling import profile_utils
-from spinn_front_end_common.utilities.constants import (
- SYSTEM_BYTES_REQUIREMENT)
-from .abstract_spynnaker_splitter_delay import AbstractSpynnakerSplitterDelay
from spynnaker.pyNN.models.neuron import (
AbstractPopulationVertex, PopulationMachineVertex)
-from spynnaker.pyNN.utilities import bit_field_utilities
+from spynnaker.pyNN.models.neuron.population_machine_vertex import (
+ NeuronProvenance, SynapseProvenance, MainProvenance,
+ SpikeProcessingProvenance)
+from spynnaker.pyNN.models.neuron.master_pop_table import (
+ MasterPopTableAsBinarySearch)
+from .abstract_spynnaker_splitter_delay import AbstractSpynnakerSplitterDelay
+from spynnaker.pyNN.utilities.bit_field_utilities import (
+ get_estimated_sdram_for_bit_field_region,
+ get_estimated_sdram_for_key_region,
+ exact_sdram_for_bit_field_builder_region)
from spynnaker.pyNN.models.neuron.synapse_dynamics import (
AbstractSynapseDynamicsStructural)
@@ -42,14 +45,27 @@ class SplitterAbstractPopulationVertexSlice(
""" handles the splitting of the AbstractPopulationVertex via slice logic.
"""
- __slots__ = []
-
- _NEURON_BASE_N_CPU_CYCLES_PER_NEURON = 22
- _NEURON_BASE_N_CPU_CYCLES = 10
- _C_MAIN_BASE_N_CPU_CYCLES = 0
+ __slots__ = [
+ # The pre-calculated ring buffer shifts
+ "__ring_buffer_shifts",
+ # The pre-calculated weight scales
+ "__weight_scales",
+ # The size of all the synapses on a core
+ "__all_syn_block_sz",
+ # The size of the structural plasticity data
+ "__structural_sz",
+ # The size of the synaptic expander data
+ "__synapse_expander_sz",
+ # The size of all the bitfield data
+ "__bitfield_sz",
+ # The next index to use for a synapse core
+ "__next_index"
+ ]
+ """ The name of the splitter """
SPLITTER_NAME = "SplitterAbstractPopulationVertexSlice"
+ """ The message to use when the Population is invalid """
INVALID_POP_ERROR_MESSAGE = (
"The vertex {} cannot be supported by the "
"SplitterAbstractPopulationVertexSlice as"
@@ -59,6 +75,13 @@ class SplitterAbstractPopulationVertexSlice(
def __init__(self):
super().__init__(self.SPLITTER_NAME)
+ self.__ring_buffer_shifts = None
+ self.__weight_scales = None
+ self.__all_syn_block_sz = dict()
+ self.__structural_sz = dict()
+ self.__synapse_expander_sz = None
+ self.__bitfield_sz = None
+ self.__next_index = 0
@overrides(AbstractSplitterSlice.set_governed_app_vertex)
def set_governed_app_vertex(self, app_vertex):
@@ -67,6 +90,14 @@ def set_governed_app_vertex(self, app_vertex):
raise PacmanConfigurationException(
self.INVALID_POP_ERROR_MESSAGE.format(app_vertex))
+ @overrides(AbstractSplitterSlice.create_machine_vertices)
+ def create_machine_vertices(self, resource_tracker, machine_graph):
+ app_vertex = self._governed_app_vertex
+ app_vertex.synapse_recorder.add_region_offset(
+ len(app_vertex.neuron_recorder.get_recordable_variables()))
+ return super(SplitterAbstractPopulationVertexSlice, self)\
+ .create_machine_vertices(resource_tracker, machine_graph)
+
@overrides(AbstractSplitterSlice.get_out_going_vertices)
def get_out_going_vertices(self, edge, outgoing_edge_partition):
return self._get_map([MachineEdge])
@@ -79,129 +110,199 @@ def get_in_coming_vertices(
@overrides(AbstractSplitterSlice.create_machine_vertex)
def create_machine_vertex(
self, vertex_slice, resources, label, remaining_constraints):
+
+ if self.__ring_buffer_shifts is None:
+ app_vertex = self._governed_app_vertex
+ self.__ring_buffer_shifts = app_vertex.get_ring_buffer_shifts(
+ app_vertex.incoming_projections)
+ self.__weight_scales = app_vertex.get_weight_scales(
+ self.__ring_buffer_shifts)
+
+ index = self.__next_index
+ self.__next_index += 1
return PopulationMachineVertex(
- resources,
- self._governed_app_vertex.neuron_recorder.recorded_ids_by_slice(
- vertex_slice),
- label, remaining_constraints, self._governed_app_vertex,
- vertex_slice,
- self._governed_app_vertex.synapse_manager.drop_late_spikes,
- self.__get_binary_file_name())
-
- @inject_items({"graph": "MemoryApplicationGraph"})
- @overrides(
- AbstractSplitterSlice.get_resources_used_by_atoms,
- additional_arguments=["graph"])
- def get_resources_used_by_atoms(self, vertex_slice, graph):
- """ Gets the resources of a slice of atoms from a given app vertex.
+ resources, label, remaining_constraints, self._governed_app_vertex,
+ vertex_slice, index, self.__ring_buffer_shifts,
+ self.__weight_scales, self.__all_syn_block_size(vertex_slice),
+ self.__structural_size(vertex_slice))
+
+ @overrides(AbstractSplitterSlice.get_resources_used_by_atoms)
+ def get_resources_used_by_atoms(self, vertex_slice):
+ """ Gets the resources of a slice of atoms
:param ~pacman.model.graphs.common.Slice vertex_slice: the slice
- :param ~pacman.model.graphs.machine.MachineGraph graph: app graph
:rtype: ~pacman.model.resources.ResourceContainer
"""
# pylint: disable=arguments-differ
- variable_sdram = self.get_variable_sdram(vertex_slice)
- constant_sdram = self.constant_sdram(vertex_slice, graph)
+ variable_sdram = self.__get_variable_sdram(vertex_slice)
+ constant_sdram = self.__get_constant_sdram(vertex_slice)
+ sdram = MultiRegionSDRAM()
+ sdram.nest(len(PopulationMachineVertex.REGIONS) + 1, variable_sdram)
+ sdram.merge(constant_sdram)
# set resources required from this object
container = ResourceContainer(
- sdram=variable_sdram + constant_sdram,
- dtcm=self.dtcm_cost(vertex_slice),
- cpu_cycles=self.cpu_cost(vertex_slice))
+ sdram=sdram, dtcm=self.__get_dtcm_cost(vertex_slice),
+ cpu_cycles=self.__get_cpu_cost(vertex_slice))
# return the total resources.
return container
- def get_variable_sdram(self, vertex_slice):
- """ returns the variable sdram from the recorder.
+ def __get_variable_sdram(self, vertex_slice):
+ """ returns the variable sdram from the recorders
:param ~pacman.model.graphs.common.Slice vertex_slice:
the atom slice for recording sdram
:return: the variable sdram used by the neuron recorder
:rtype: VariableSDRAM
"""
- s_dynamics = self._governed_app_vertex.synapse_manager.synapse_dynamics
+ s_dynamics = self._governed_app_vertex.synapse_dynamics
if isinstance(s_dynamics, AbstractSynapseDynamicsStructural):
max_rewires_per_ts = s_dynamics.get_max_rewires_per_ts()
- self._governed_app_vertex.neuron_recorder.set_max_rewires_per_ts(
+ self._governed_app_vertex.synapse_recorder.set_max_rewires_per_ts(
max_rewires_per_ts)
- return self._governed_app_vertex.neuron_recorder.\
- get_variable_sdram_usage(vertex_slice)
+ return (
+ self._governed_app_vertex.get_neuron_variable_sdram(vertex_slice) +
+ self._governed_app_vertex.get_synapse_variable_sdram(vertex_slice))
- def constant_sdram(self, vertex_slice, graph):
+ def __get_constant_sdram(self, vertex_slice):
""" returns the constant sdram used by the vertex slice.
:param ~pacman.model.graphs.common.Slice vertex_slice:
the atoms to get constant sdram of
- :param ~pacman.model.graphs.application.ApplicationGraph graph:
- app graph
- :rtype: ConstantSDRAM
+ :rtype: ~pacman.model.resources.MultiRegionSDRAM
+ """
+ n_record = (
+ len(self._governed_app_vertex.neuron_recordables) +
+ len(self._governed_app_vertex.synapse_recordables))
+ n_provenance = (
+ NeuronProvenance.N_ITEMS + SynapseProvenance.N_ITEMS +
+ MainProvenance.N_ITEMS + SpikeProcessingProvenance.N_ITEMS)
+ sdram = MultiRegionSDRAM()
+ sdram.merge(self._governed_app_vertex.get_common_constant_sdram(
+ n_record, n_provenance, PopulationMachineVertex.COMMON_REGIONS))
+ sdram.merge(self._governed_app_vertex.get_neuron_constant_sdram(
+ vertex_slice, PopulationMachineVertex.NEURON_REGIONS))
+ sdram.merge(self.__get_synapse_constant_sdram(vertex_slice))
+ return sdram
+
+ def __get_synapse_constant_sdram(self, vertex_slice):
+
+ """ Get the amount of fixed SDRAM used by synapse parts
+
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of neurons to get the size of
+
+ :rtype: ~pacman.model.resources.MultiRegionSDRAM
+ """
+ sdram = MultiRegionSDRAM()
+ app_vertex = self._governed_app_vertex
+ sdram.add_cost(
+ PopulationMachineVertex.SYNAPSE_REGIONS.synapse_params,
+ app_vertex.get_synapse_params_size())
+ sdram.add_cost(
+ PopulationMachineVertex.SYNAPSE_REGIONS.synapse_dynamics,
+ app_vertex.get_synapse_dynamics_size(vertex_slice))
+ sdram.add_cost(
+ PopulationMachineVertex.SYNAPSE_REGIONS.structural_dynamics,
+ self.__structural_size(vertex_slice))
+ sdram.add_cost(
+ PopulationMachineVertex.SYNAPSE_REGIONS.synaptic_matrix,
+ self.__all_syn_block_size(vertex_slice))
+ sdram.add_cost(
+ PopulationMachineVertex.SYNAPSE_REGIONS.direct_matrix,
+ app_vertex.all_single_syn_size)
+ sdram.add_cost(
+ PopulationMachineVertex.SYNAPSE_REGIONS.pop_table,
+ MasterPopTableAsBinarySearch.get_master_population_table_size(
+ app_vertex.incoming_projections))
+ sdram.add_cost(
+ PopulationMachineVertex.SYNAPSE_REGIONS.connection_builder,
+ self.__synapse_expander_size())
+ sdram.merge(self.__bitfield_size())
+ return sdram
+
+ def __all_syn_block_size(self, vertex_slice):
+ """ Work out how much SDRAM is needed for all the synapses
+
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of neurons to get the size of
+ :rtype: int
+ """
+ if vertex_slice in self.__all_syn_block_sz:
+ return self.__all_syn_block_sz[vertex_slice]
+ all_syn_block_sz = self._governed_app_vertex.get_synapses_size(
+ vertex_slice, self._governed_app_vertex.incoming_projections)
+ self.__all_syn_block_sz[vertex_slice] = all_syn_block_sz
+ return all_syn_block_sz
+
+ def __structural_size(self, vertex_slice):
+ """ Work out how much SDRAM is needed by the structural plasticity data
+
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of neurons to get the size of
+ :rtype: int
+ """
+ if vertex_slice in self.__structural_sz:
+ return self.__structural_sz[vertex_slice]
+ structural_sz = self._governed_app_vertex.get_structural_dynamics_size(
+ vertex_slice, self._governed_app_vertex.incoming_projections)
+ self.__structural_sz[vertex_slice] = structural_sz
+ return structural_sz
+
+ def __synapse_expander_size(self):
+ """ Work out how much SDRAM is needed for the synapse expander
+
+ :rtype: int
+ """
+ if self.__synapse_expander_sz is None:
+ self.__synapse_expander_sz = \
+ self._governed_app_vertex.get_synapse_expander_size(
+ self._governed_app_vertex.incoming_projections)
+ return self.__synapse_expander_sz
+
+ def __bitfield_size(self):
+ """ Work out how much SDRAM is needed by the bit fields
+
+ :rtype: ~pacman.model.resources.MultiRegionSDRAM
"""
- sdram_requirement = (
- SYSTEM_BYTES_REQUIREMENT +
- self._governed_app_vertex.get_sdram_usage_for_neuron_params(
- vertex_slice) +
- self._governed_app_vertex.neuron_recorder.get_static_sdram_usage(
- vertex_slice) +
- PopulationMachineVertex.get_provenance_data_size(
- len(PopulationMachineVertex.EXTRA_PROVENANCE_DATA_ENTRIES)) +
- self._governed_app_vertex.synapse_manager.get_sdram_usage_in_bytes(
- vertex_slice, graph, self._governed_app_vertex) +
- profile_utils.get_profile_region_size(
- self._governed_app_vertex.n_profile_samples) +
- bit_field_utilities.get_estimated_sdram_for_bit_field_region(
- graph, self._governed_app_vertex) +
- bit_field_utilities.get_estimated_sdram_for_key_region(
- graph, self._governed_app_vertex) +
- bit_field_utilities.exact_sdram_for_bit_field_builder_region())
- return ConstantSDRAM(sdram_requirement)
-
- def dtcm_cost(self, vertex_slice):
+ if self.__bitfield_sz is None:
+ sdram = MultiRegionSDRAM()
+ projections = self._governed_app_vertex.incoming_projections
+ sdram.add_cost(
+ PopulationMachineVertex.SYNAPSE_REGIONS.bitfield_filter,
+ get_estimated_sdram_for_bit_field_region(projections))
+ sdram.add_cost(
+ PopulationMachineVertex.SYNAPSE_REGIONS.bitfield_key_map,
+ get_estimated_sdram_for_key_region(projections))
+ sdram.add_cost(
+ PopulationMachineVertex.SYNAPSE_REGIONS.bitfield_builder,
+ exact_sdram_for_bit_field_builder_region())
+ self.__bitfield_sz = sdram
+ return self.__bitfield_sz
+
+ def __get_dtcm_cost(self, vertex_slice):
""" get the dtcm cost for the slice of atoms
:param Slice vertex_slice: atom slice for dtcm calc.
:rtype: DTCMResource
"""
return DTCMResource(
- self._governed_app_vertex.neuron_impl.get_dtcm_usage_in_bytes(
- vertex_slice.n_atoms) +
- self._governed_app_vertex.neuron_recorder.get_dtcm_usage_in_bytes(
- vertex_slice) +
- self._governed_app_vertex.synapse_manager.
- get_dtcm_usage_in_bytes())
-
- def cpu_cost(self, vertex_slice):
+ self._governed_app_vertex.get_common_dtcm() +
+ self._governed_app_vertex.get_neuron_dtcm(vertex_slice) +
+ self._governed_app_vertex.get_synapse_dtcm(vertex_slice))
+
+ def __get_cpu_cost(self, vertex_slice):
""" get cpu cost for a slice of atoms
:param Slice vertex_slice: slice of atoms
:rtype: CPUCyclesPerTickResourcer
"""
return CPUCyclesPerTickResource(
- self._NEURON_BASE_N_CPU_CYCLES + self._C_MAIN_BASE_N_CPU_CYCLES +
- (self._NEURON_BASE_N_CPU_CYCLES_PER_NEURON *
- vertex_slice.n_atoms) +
- self._governed_app_vertex.neuron_recorder.get_n_cpu_cycles(
- vertex_slice.n_atoms) +
- self._governed_app_vertex.neuron_impl.get_n_cpu_cycles(
- vertex_slice.n_atoms) +
- self._governed_app_vertex.synapse_manager.get_n_cpu_cycles())
-
- def __get_binary_file_name(self):
- """ returns the binary name for the machine vertices.
-
- :rtype: str
- """
-
- # Split binary name into title and extension
- binary_title, binary_extension = os.path.splitext(
- self._governed_app_vertex.neuron_impl.binary_name)
-
- # Reunite title and extension and return
- return (
- binary_title +
- self._governed_app_vertex.synapse_manager.
- vertex_executable_suffix + binary_extension)
+ self._governed_app_vertex.get_common_cpu() +
+ self._governed_app_vertex.get_neuron_cpu(vertex_slice) +
+ self._governed_app_vertex.get_synapse_cpu(vertex_slice))
@overrides(AbstractSplitterSlice.check_supported_constraints)
def check_supported_constraints(self):
@@ -210,3 +311,12 @@ def check_supported_constraints(self):
supported_constraints=[
MaxVertexAtomsConstraint, FixedVertexAtomsConstraint],
abstract_constraint_type=AbstractPartitionerConstraint)
+
+ @overrides(AbstractSplitterSlice.reset_called)
+ def reset_called(self):
+ super(SplitterAbstractPopulationVertexSlice, self).reset_called()
+ self.__ring_buffer_shifts = None
+ self.__weight_scales = None
+ self.__all_syn_block_sz = dict()
+ self.__structural_sz = dict()
+ self.__next_index = 0
diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_delay_vertex_slice.py b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_delay_vertex_slice.py
index a6a95ad0b3..811a84d9c1 100644
--- a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_delay_vertex_slice.py
+++ b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_delay_vertex_slice.py
@@ -106,9 +106,9 @@ def create_machine_vertices(
self.NEED_EXACT_ERROR_MESSAGE)
# create vertices correctly
- for vertex_slice in pre_slices:
+ for index, vertex_slice in enumerate(pre_slices):
vertex = self.create_machine_vertex(
- vertex_slice, resource_tracker,
+ vertex_slice, index, resource_tracker,
self.DELAY_EXTENSION_SLICE_LABEL.format(
self._other_splitter.governed_app_vertex, vertex_slice),
get_remaining_constraints(self._governed_app_vertex),
@@ -138,7 +138,7 @@ def set_governed_app_vertex(self, app_vertex):
self.INVALID_POP_ERROR_MESSAGE.format(app_vertex))
def create_machine_vertex(
- self, vertex_slice, resource_tracker, label,
+ self, vertex_slice, index, resource_tracker, label,
remaining_constraints, graph):
""" creates a delay extension machine vertex and adds to the tracker.
@@ -158,7 +158,7 @@ def create_machine_vertex(
machine_vertex = DelayExtensionMachineVertex(
resources, label, remaining_constraints,
- self._governed_app_vertex, vertex_slice)
+ self._governed_app_vertex, vertex_slice, index)
self._machine_vertex_by_slice[vertex_slice] = machine_vertex
return machine_vertex
@@ -170,7 +170,7 @@ def get_resources_used_by_atoms(self, vertex_slice, graph):
:param graph: app graph
:rtype: ResourceContainer
"""
- constant_sdram = self.constant_sdram(graph)
+ constant_sdram = self.constant_sdram(graph, vertex_slice)
# set resources required from this object
container = ResourceContainer(
@@ -181,15 +181,17 @@ def get_resources_used_by_atoms(self, vertex_slice, graph):
# return the total resources.
return container
- def constant_sdram(self, graph):
+ def constant_sdram(self, graph, vertex_slice):
""" returns the sdram used by the delay extension
:param ApplicationGraph graph: app graph
+ :param Slice vertex_slice: The slice to get the size of
:rtype: ConstantSDRAM
"""
out_edges = graph.get_edges_starting_at_vertex(self)
return ConstantSDRAM(
SYSTEM_BYTES_REQUIREMENT +
+ self._governed_app_vertex.delay_params_size(vertex_slice) +
self._governed_app_vertex.tdma_sdram_size_in_bytes +
DelayExtensionMachineVertex.get_provenance_data_size(
DelayExtensionMachineVertex.N_EXTRA_PROVENANCE_DATA_ENTRIES) +
diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_poisson_delegate.py b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_poisson_delegate.py
new file mode 100644
index 0000000000..61665c3878
--- /dev/null
+++ b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_poisson_delegate.py
@@ -0,0 +1,91 @@
+# Copyright (c) 2020-2021 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+from spinn_utilities.overrides import overrides
+from pacman.model.partitioner_splitters.abstract_splitters import (
+ AbstractSplitterCommon)
+from pacman.exceptions import PacmanConfigurationException
+from spynnaker.pyNN.models.spike_source import SpikeSourcePoissonVertex
+from spynnaker.pyNN.models.neuron import AbstractPopulationVertex
+from spynnaker.pyNN.models.neural_projections.connectors import (
+ OneToOneConnector)
+from .spynnaker_splitter_slice_legacy import SpynnakerSplitterSliceLegacy
+from .abstract_supports_one_to_one_sdram_input import (
+ AbstractSupportsOneToOneSDRAMInput)
+
+
+class SplitterPoissonDelegate(SpynnakerSplitterSliceLegacy):
+ """ A splitter for Poisson sources that will ignore sources that are
+ one-to-one connected to a single Population
+ """
+
+ # Message to display on error
+ INVALID_POP_ERROR_MESSAGE = (
+ "The vertex {} cannot be supported by the SplitterPoissonDelegate as"
+ " the only vertex supported by this splitter is a "
+ "SpikeSourcePoissonVertex. Please use the correct splitter for "
+ "your vertex and try again.")
+
+ @property
+ def send_over_sdram(self):
+ """ Determine if this vertex is to be sent using SDRAM
+
+ :rtype: bool
+ """
+ # If there is only one outgoing projection, and it is one-to-one
+ # connected to the target, and the target knows what to do, leave
+ # it to the target
+ if len(self._governed_app_vertex.outgoing_projections) == 1:
+ proj = self._governed_app_vertex.outgoing_projections[0]
+ post_vertex = proj._projection_edge.post_vertex
+ connector = proj._synapse_information.connector
+ if (isinstance(post_vertex, AbstractPopulationVertex) and
+ isinstance(post_vertex.splitter,
+ AbstractSupportsOneToOneSDRAMInput) and
+ isinstance(connector, OneToOneConnector)):
+ return True
+ return False
+
+ @overrides(SpynnakerSplitterSliceLegacy.set_governed_app_vertex)
+ def set_governed_app_vertex(self, app_vertex):
+ AbstractSplitterCommon.set_governed_app_vertex(self, app_vertex)
+ if not isinstance(app_vertex, SpikeSourcePoissonVertex):
+ raise PacmanConfigurationException(
+ self.INVALID_POP_ERROR_MESSAGE.format(app_vertex))
+
+ @overrides(SpynnakerSplitterSliceLegacy.create_machine_vertices)
+ def create_machine_vertices(self, resource_tracker, machine_graph):
+ # If sending over SDRAM, let the target handle this
+ if self.send_over_sdram:
+ return
+
+ # If we passed this part, use the super class
+ return super(SplitterPoissonDelegate, self).create_machine_vertices(
+ resource_tracker, machine_graph)
+
+ @overrides(AbstractSplitterCommon.get_in_coming_slices)
+ def get_in_coming_slices(self):
+ if self.send_over_sdram:
+ proj = self._governed_app_vertex.outgoing_projections[0]
+ post_vertex = proj._projection_edge.post_vertex
+ return post_vertex.splitter.get_in_coming_slices()
+ return super(SplitterPoissonDelegate, self).get_in_coming_slices()
+
+ @overrides(AbstractSplitterCommon.get_out_going_slices)
+ def get_out_going_slices(self):
+ if self.send_over_sdram:
+ proj = self._governed_app_vertex.outgoing_projections[0]
+ post_vertex = proj._projection_edge.post_vertex
+ return post_vertex.splitter.get_out_going_slices()
+ return super(SplitterPoissonDelegate, self).get_out_going_slices()
diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/spynnaker_splitter_partitioner.py b/spynnaker/pyNN/extra_algorithms/splitter_components/spynnaker_splitter_partitioner.py
index 388c7c091c..212b2c0ec0 100644
--- a/spynnaker/pyNN/extra_algorithms/splitter_components/spynnaker_splitter_partitioner.py
+++ b/spynnaker/pyNN/extra_algorithms/splitter_components/spynnaker_splitter_partitioner.py
@@ -15,6 +15,7 @@
from spinn_utilities.overrides import overrides
from pacman.model.partitioner_interfaces import AbstractSlicesConnect
from pacman.operations.partition_algorithms import SplitterPartitioner
+from data_specification import ReferenceContext
class SpynnakerSplitterPartitioner(SplitterPartitioner):
@@ -37,9 +38,10 @@ def __call__(
:raise PacmanPartitionException: when it cant partition
"""
- # do partitioning in same way
- machine_graph, chips_used = super().__call__(
- app_graph, machine, plan_n_time_steps, pre_allocated_resources)
+ # do partitioning in same way, but in a context of references
+ with ReferenceContext():
+ machine_graph, chips_used = super().__call__(
+ app_graph, machine, plan_n_time_steps, pre_allocated_resources)
# return the accepted things
return machine_graph, chips_used
diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/spynnaker_splitter_selector.py b/spynnaker/pyNN/extra_algorithms/splitter_components/spynnaker_splitter_selector.py
index 01c3abc8ce..c30b65edcd 100644
--- a/spynnaker/pyNN/extra_algorithms/splitter_components/spynnaker_splitter_selector.py
+++ b/spynnaker/pyNN/extra_algorithms/splitter_components/spynnaker_splitter_selector.py
@@ -24,6 +24,7 @@
from .splitter_abstract_pop_vertex_slice import (
SplitterAbstractPopulationVertexSlice)
from .spynnaker_splitter_slice_legacy import SpynnakerSplitterSliceLegacy
+from .splitter_poisson_delegate import SplitterPoissonDelegate
from spynnaker.pyNN.models.neuron import AbstractPopulationVertex
from spynnaker.pyNN.models.spike_source.spike_source_array_vertex import (
SpikeSourceArrayVertex)
@@ -116,4 +117,5 @@ def spike_source_poisson_heuristic(app_vertex):
:param ~pacman.model.graphs.application.ApplicationGraph app_vertex:
app vertex
"""
- app_vertex.splitter = SpynnakerSplitterSliceLegacy()
+ # app_vertex.splitter = SpynnakerSplitterSliceLegacy()
+ app_vertex.splitter = SplitterPoissonDelegate()
diff --git a/spynnaker/pyNN/extra_algorithms/spynnaker_machine_bit_field_router_compressor.py b/spynnaker/pyNN/extra_algorithms/spynnaker_machine_bit_field_router_compressor.py
index c990dd6b3c..dda4ec78b6 100644
--- a/spynnaker/pyNN/extra_algorithms/spynnaker_machine_bit_field_router_compressor.py
+++ b/spynnaker/pyNN/extra_algorithms/spynnaker_machine_bit_field_router_compressor.py
@@ -25,6 +25,8 @@
machine_bit_field_router_compressor import (
MachineBitFieldPairRouterCompressor,
MachineBitFieldOrderedCoveringCompressor)
+from spinn_front_end_common.utilities.helpful_functions import (
+ write_address_to_user1)
from spinn_front_end_common.utilities.system_control_logic import (
run_system_application)
from spinn_front_end_common.utilities.utility_objs import ExecutableType
@@ -85,7 +87,8 @@ def __call__(
# adjust cores to exclude the ones which did not give sdram.
expander_chip_cores = self._locate_expander_rerun_targets(
- compressor_executable_targets, executable_finder, placements)
+ compressor_executable_targets, executable_finder, placements,
+ transceiver)
# just rerun the synaptic expander for safety purposes
self._rerun_synaptic_cores(
@@ -99,11 +102,14 @@ def _compressor_factory(self):
"Method to call the specific compressor to use"
def _locate_expander_rerun_targets(
- self, bitfield_targets, executable_finder, placements):
+ self, bitfield_targets, executable_finder, placements,
+ transceiver):
""" removes host based cores for synaptic matrix regeneration
:param ~.ExecutableTargets bitfield_targets: the cores that were used
:param ~.ExecutableFinder executable_finder: way to get binary path
+ :param ~.Placements placements: placements on machine
+ :param ~.Transceiver transceiver: spinnman instance
:return: new targets for synaptic expander
:rtype: ~.ExecutableTargets
"""
@@ -121,6 +127,10 @@ def _locate_expander_rerun_targets(
expander_executable_path,
placement.x, placement.y, placement.p,
executable_type=ExecutableType.SYSTEM)
+ # Write the region to USER1, as that is the best we can do
+ write_address_to_user1(
+ transceiver, placement.x, placement.y, placement.p,
+ placement.vertex.connection_generator_region)
return new_cores
@staticmethod
diff --git a/spynnaker/pyNN/extra_algorithms/synapse_expander.py b/spynnaker/pyNN/extra_algorithms/synapse_expander.py
index 2fe18e9c75..69ce97fb8b 100644
--- a/spynnaker/pyNN/extra_algorithms/synapse_expander.py
+++ b/spynnaker/pyNN/extra_algorithms/synapse_expander.py
@@ -24,6 +24,8 @@
AbstractSynapseExpandable, SYNAPSE_EXPANDER_APLX)
from spynnaker.pyNN.models.utility_models.delays import (
DelayExtensionMachineVertex, DELAY_EXPANDER_APLX)
+from spinn_front_end_common.utilities.helpful_functions import (
+ write_address_to_user1)
logger = FormatAdapter(logging.getLogger(__name__))
@@ -50,7 +52,7 @@ def synapse_expander(
# Find the places where the synapse expander and delay receivers should run
expander_cores, expanded_pop_vertices = _plan_expansion(
- placements, synapse_bin, delay_bin)
+ placements, synapse_bin, delay_bin, transceiver)
progress = ProgressBar(expander_cores.total_processors,
"Expanding Synapses")
@@ -64,8 +66,19 @@ def synapse_expander(
_fill_in_connection_data(expanded_pop_vertices, transceiver)
-def _plan_expansion(placements, synapse_expander_bin,
- delay_expander_bin):
+def _plan_expansion(
+ placements, synapse_expander_bin, delay_expander_bin, transceiver):
+ """ Plan the expansion of synapses and set up the regions using USER1
+
+ :param ~pacman.model.placements.Placements: The placements of the vertices
+ :param str synapse_expander_bin: The binary name of the synapse expander
+ :param str delay_expander_bin: The binary name of the delay expander
+ :param ~spinnman.transceiver.Transceiver transceiver:
+ How to talk to the machine
+ :return: The places to load the synapse expander and delay expander
+ executables, and the target machine vertices to read synapses back from
+ :rtype: (ExecutableTargets, list(MachineVertex, Placement))
+ """
expander_cores = ExecutableTargets()
expanded_pop_vertices = list()
@@ -81,6 +94,11 @@ def _plan_expansion(placements, synapse_expander_bin,
placement.x, placement.y, placement.p,
executable_type=ExecutableType.SYSTEM)
expanded_pop_vertices.append((vertex, placement))
+ # Write the region to USER1, as that is the best we can do
+ write_address_to_user1(
+ transceiver, placement.x, placement.y, placement.p,
+ vertex.connection_generator_region)
+
elif isinstance(vertex, DelayExtensionMachineVertex):
if vertex.gen_on_machine():
expander_cores.add_processor(
@@ -94,6 +112,11 @@ def _plan_expansion(placements, synapse_expander_bin,
def _fill_in_connection_data(expanded_pop_vertices, transceiver):
""" Once expander has run, fill in the connection data
+ :param list(MachineVertex, Placement) expanded_pop_vertices:
+ List of machine vertices to read data from
+ :param ~spinnman.transceiver.Transceiver transceiver:
+ How to talk to the machine
+
:rtype: None
"""
for vertex, placement in expanded_pop_vertices:
diff --git a/spynnaker/pyNN/models/abstract_models/__init__.py b/spynnaker/pyNN/models/abstract_models/__init__.py
index 9266ca67a4..290619ac98 100644
--- a/spynnaker/pyNN/models/abstract_models/__init__.py
+++ b/spynnaker/pyNN/models/abstract_models/__init__.py
@@ -25,10 +25,16 @@
from .abstract_synapse_expandable import (
AbstractSynapseExpandable, SYNAPSE_EXPANDER_APLX)
from .abstract_weight_updatable import AbstractWeightUpdatable
+from .sends_synaptic_inputs_over_sdram import SendsSynapticInputsOverSDRAM
+from .receives_synaptic_inputs_over_sdram import (
+ ReceivesSynapticInputsOverSDRAM)
+from .has_synapses import HasSynapses
__all__ = ["AbstractAcceptsIncomingSynapses", "AbstractContainsUnits",
"AbstractHasDelayStages",
"AbstractMaxSpikes", "AbstractPopulationInitializable",
"AbstractPopulationSettable", "AbstractReadParametersBeforeSet",
"AbstractSettable", "AbstractSynapseExpandable",
- "AbstractWeightUpdatable", "SYNAPSE_EXPANDER_APLX"]
+ "AbstractWeightUpdatable", "SYNAPSE_EXPANDER_APLX",
+ "SendsSynapticInputsOverSDRAM", "ReceivesSynapticInputsOverSDRAM",
+ "HasSynapses"]
diff --git a/spynnaker/pyNN/models/abstract_models/abstract_synapse_expandable.py b/spynnaker/pyNN/models/abstract_models/abstract_synapse_expandable.py
index 1e4703ff95..fb53778781 100644
--- a/spynnaker/pyNN/models/abstract_models/abstract_synapse_expandable.py
+++ b/spynnaker/pyNN/models/abstract_models/abstract_synapse_expandable.py
@@ -13,7 +13,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-from spinn_utilities.abstract_base import AbstractBase, abstractmethod
+from spinn_utilities.abstract_base import (
+ AbstractBase, abstractmethod, abstractproperty)
from spinn_utilities.require_subclass import require_subclass
from pacman.model.graphs.machine.machine_vertex import MachineVertex
@@ -26,9 +27,6 @@ class AbstractSynapseExpandable(object, metaclass=AbstractBase):
:py:class:`~pacman.model.graphs.machine.MachineVertex`) \
that has may need to run the SYNAPSE_EXPANDER aplx
- Cores that do not use the synapse_manager should not implement this
- API even though their app vertex may hold a synapse_manager.
-
.. note::
This is *not* implemented by the
:py:class:`~.DelayExtensionMachineVertex`,
@@ -49,14 +47,19 @@ def gen_on_machine(self):
:rtype: bool
"""
+ @abstractproperty
+ def connection_generator_region(self):
+ """ The region containing the parameters of synaptic expansion
+
+ :rtype: int
+
+ :rtype: bool
+ """
+
@abstractmethod
def read_generated_connection_holders(self, transceiver, placement):
""" Fill in the connection holders
- .. note::
- The typical implementation for this method will be to ask the
- app_vertex's synapse_manager
-
:param ~spinnman.transceiver.Transceiver transceiver:
How the data is to be read
:param ~pacman.model.placements.Placement placement:
diff --git a/spynnaker/pyNN/models/abstract_models/has_synapses.py b/spynnaker/pyNN/models/abstract_models/has_synapses.py
new file mode 100644
index 0000000000..c555461443
--- /dev/null
+++ b/spynnaker/pyNN/models/abstract_models/has_synapses.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2020-2021 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+from six import add_metaclass
+from spinn_utilities.abstract_base import AbstractBase, abstractmethod
+
+
+@add_metaclass(AbstractBase)
+class HasSynapses(object):
+
+ @abstractmethod
+ def get_connections_from_machine(
+ self, transceiver, placement, app_edge, synapse_info):
+ """ Get the connections from the machine for this vertex.
+
+ :param ~spinnman.transceiver.Transceiver transceiver:
+ How to read the connection data
+ :param ~pacman.model.placement.Placement placement:
+ Where the connection data is on the machine
+ :param ProjectionApplicationEdge app_edge:
+ The edge for which the data is being read
+ :param SynapseInformation synapse_info:
+ The specific projection within the edge
+ """
+
+ @abstractmethod
+ def clear_connection_cache(self):
+ """ Flush the cache of connection information; needed for a second run
+ """
diff --git a/spynnaker/pyNN/models/abstract_models/receives_synaptic_inputs_over_sdram.py b/spynnaker/pyNN/models/abstract_models/receives_synaptic_inputs_over_sdram.py
new file mode 100644
index 0000000000..12b7bac46d
--- /dev/null
+++ b/spynnaker/pyNN/models/abstract_models/receives_synaptic_inputs_over_sdram.py
@@ -0,0 +1,66 @@
+# Copyright (c) 2020-2021 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+from six import add_metaclass
+from spinn_utilities.abstract_base import AbstractBase, abstractproperty
+from pacman.model.graphs import AbstractSupportsSDRAMEdges
+from spinn_front_end_common.utilities.constants import BYTES_PER_SHORT
+
+
+@add_metaclass(AbstractBase)
+class ReceivesSynapticInputsOverSDRAM(AbstractSupportsSDRAMEdges):
+ """ An object that receives synaptic inputs over SDRAM.
+
+ The number of neurons to be sent per synapse type is rounded up to be
+ a power of 2. A sender must send N_BYTES_PER_INPUT of data for each
+ synapse type for each neuron, formatted as all the data for each neuron
+ for the first synapse type, followed by all the data for each neuron
+ for the second, and so on for each synapse type. Each input is an
+ accumulated weight value for the timestep, scaled with the given weight
+ scales.
+ """
+
+ # The size of each input in bytes
+ N_BYTES_PER_INPUT = BYTES_PER_SHORT
+
+ @abstractproperty
+ def n_target_neurons(self):
+ """ The number of neurons expecting to receive input
+
+ :rtype: int
+ """
+
+ @abstractproperty
+ def n_target_synapse_types(self):
+ """ The number of synapse types expecting to receive input
+
+ :rtype: int
+ """
+
+ @abstractproperty
+ def weight_scales(self):
+ """ A list of scale factors to be applied to weights that get passed
+ over SDRAM, one for each synapse type.
+
+ :rtype: list(int)
+ """
+
+ @abstractproperty
+ def n_bytes_for_transfer(self):
+ """ The number of bytes to be sent over the channel. This will be
+ calculated using the above numbers, but also rounded up to a number
+ of words, and with the number of neurons rounded up to a power of 2
+
+ :rtype: int
+ """
diff --git a/spynnaker/pyNN/models/abstract_models/sends_synaptic_inputs_over_sdram.py b/spynnaker/pyNN/models/abstract_models/sends_synaptic_inputs_over_sdram.py
new file mode 100644
index 0000000000..404453d10e
--- /dev/null
+++ b/spynnaker/pyNN/models/abstract_models/sends_synaptic_inputs_over_sdram.py
@@ -0,0 +1,20 @@
+# Copyright (c) 2020-2021 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+from pacman.model.graphs import AbstractSupportsSDRAMEdges
+
+
+class SendsSynapticInputsOverSDRAM(AbstractSupportsSDRAMEdges):
+ """ A marker interface for an object that sends synaptic inputs over SDRAM
+ """
diff --git a/spynnaker/pyNN/models/common/abstract_neuron_recordable.py b/spynnaker/pyNN/models/common/abstract_neuron_recordable.py
index f160482efb..f0efbb6617 100644
--- a/spynnaker/pyNN/models/common/abstract_neuron_recordable.py
+++ b/spynnaker/pyNN/models/common/abstract_neuron_recordable.py
@@ -93,16 +93,3 @@ def get_neuron_sampling_interval(self, variable):
:return: Sampling interval in microseconds
:rtype: float
"""
-
- @abstractmethod
- def get_expected_n_rows(
- self, n_machine_time_steps, sampling_rate, vertex, variable):
- """ Returns the number of expected rows for a given runtime
-
- :param int n_machine_time_steps: map of vertex to steps.
- :param int sampling_rate: the sampling rate for this vertex
- :param ~pacman.model.graphs.machine.MachineVertex vertex:
- the machine vertex
- :param str variable: the variable being recorded
- :return: int the number of rows expected.
- """
diff --git a/spynnaker/pyNN/models/common/neuron_recorder.py b/spynnaker/pyNN/models/common/neuron_recorder.py
index 9c15a197f4..9678e94d85 100644
--- a/spynnaker/pyNN/models/common/neuron_recorder.py
+++ b/spynnaker/pyNN/models/common/neuron_recorder.py
@@ -26,10 +26,6 @@
BYTES_PER_WORD, BITS_PER_WORD)
from spinn_front_end_common.utilities.globals_variables import (
machine_time_step_ms)
-from spinn_front_end_common.interface.buffer_management.recording_utilities \
- import (
- get_recording_header_array, get_recording_header_size,
- get_recording_data_constant_size)
logger = FormatAdapter(logging.getLogger(__name__))
@@ -117,6 +113,9 @@ class NeuronRecorder(object):
#: max_rewires
MAX_REWIRES = "max_rewires"
+ #: number of words per rewiring entry
+ REWIRING_N_WORDS = 2
+
#: rewiring: shift values to decode recorded value
_PRE_ID_SHIFT = 9
_POST_ID_SHIFT = 1
@@ -151,20 +150,26 @@ def __init__(
self.__events_per_ts = dict()
self.__events_per_ts[self.MAX_REWIRES] = 0 # record('all')
- self.__region_ids = dict()
- region_id = 0
- for region_id, variable in enumerate(itertools.chain(
- allowed_variables, bitfield_variables)):
+ # Get info on variables like these
+ for variable in itertools.chain(allowed_variables, bitfield_variables):
self.__sampling_rates[variable] = 0
self.__indexes[variable] = None
+
+ # Get region ids for all variables
+ self.__region_ids = dict()
+ for region_id, variable in enumerate(itertools.chain(
+ allowed_variables, bitfield_variables,
+ events_per_core_variables, per_timestep_variables)):
self.__region_ids[variable] = region_id
- event_region_id = region_id
- for event_region_id, variable in enumerate(
- events_per_core_variables, start=region_id + 1):
- self.__region_ids[variable] = event_region_id
- for ts_region_id, variable in enumerate(
- per_timestep_variables, start=event_region_id + 1):
- self.__region_ids[variable] = ts_region_id
+
+ def add_region_offset(self, offset):
+ """ Add an offset to the regions. Used when there are multiple\
+ recorders on a single core
+
+ :param int offset: The offset to add
+ """
+ self.__region_ids = dict((var, region + offset)
+ for var, region in self.__region_ids.items())
def _count_recording_per_slice(
self, variable, vertex_slice):
@@ -299,17 +304,6 @@ def _get_placement_matrix_data(
sampling_rate, label, placement_data)
return placement_data
- @staticmethod
- def expected_rows_for_a_run_time(n_machine_time_steps, sampling_rate):
- """ determines how many rows to see based off how long its ran for
-
- :param int n_machine_time_steps: map of vertex to time steps
- :param float sampling_rate: the sampling rate for a given variable
- :return: how many rows there should be.
- :rtype: int
- """
- return int(math.ceil(n_machine_time_steps / sampling_rate))
-
def __read_data(
self, label, buffer_manager, placements, application_vertex,
sampling_rate, data_type, variable, n_machine_time_steps):
@@ -326,8 +320,8 @@ def __read_data(
indexes = []
for i, vertex in enumerate(progress.over(vertices)):
- expected_rows = application_vertex.get_expected_n_rows(
- n_machine_time_steps, sampling_rate, vertex, variable)
+ expected_rows = int(
+ math.ceil(n_machine_time_steps / sampling_rate))
n_items_per_timestep = 1
if variable in self.__sampling_rates:
@@ -555,10 +549,6 @@ def _get_rewires(
if neurons_recording == 0:
continue
- # Read the rewiring data
- n_words = int(math.ceil(neurons_recording / BITS_PER_WORD))
- n_words_with_timestamp = n_words + 1
-
# for buffering output info is taken form the buffer manager
region = self.__region_ids[variable]
record_raw, data_missing = buffer_manager.get_data_by_placement(
@@ -569,7 +559,7 @@ def _get_rewires(
if len(record_raw) > 0:
raw_data = (
numpy.asarray(record_raw, dtype="uint8").view(
- dtype=" 0
- except KeyError as e:
- if variable in self.__per_timestep_recording:
- return True
- elif variable in self.__events_per_core_recording:
+ except KeyError:
+ if (variable in self.__events_per_core_recording or
+ variable in self.__per_timestep_recording):
return True
- elif variable not in self.__per_timestep_variables and\
- variable not in self.__events_per_core_variables:
- msg = ("Variable {} is not supported. Supported variables are"
- "{}".format(variable, self.get_recordable_variables()))
- raise ConfigurationException(msg) from e
+ return False
+
+ def is_recordable(self, variable):
+ """ Identify if the given variable can be recorded
+
+ :param str variable: The variable to check for
+ :rtype: bool
+ """
+ return (variable in self.__sampling_rates or
+ variable in self.__per_timestep_variables or
+ variable in self.__events_per_core_variables)
@property
def recording_variables(self):
@@ -948,9 +943,10 @@ def set_recording(self, variable, new_state, sampling_interval=None,
raise ConfigurationException("Variable {} is not supported".format(
variable))
- def _get_buffered_sdram(
- self, vertex_slice, n_machine_time_steps):
- """
+ def get_region_sizes(self, vertex_slice, n_machine_time_steps):
+ """ Get the sizes of the regions for the variables, whether they are
+ recorded or not, with those that are not having a size of 0
+
:param ~pacman.model.graphs.commmon.Slice vertex_slice:
:param int n_machine_time_steps:
:rtype: list(int)
@@ -964,20 +960,16 @@ def _get_buffered_sdram(
return values
def write_neuron_recording_region(
- self, spec, neuron_recording_region, vertex_slice,
- data_n_time_steps):
+ self, spec, neuron_recording_region, vertex_slice):
""" recording data specification
:param ~data_specification.DataSpecificationGenerator spec: dsg spec
:param int neuron_recording_region: the recording region
:param ~pacman.model.graphs.common.Slice vertex_slice:
the vertex slice
- :param int data_n_time_steps: how many time steps to run this time
:rtype: None
"""
spec.switch_write_focus(neuron_recording_region)
- spec.write_array(get_recording_header_array(
- self._get_buffered_sdram(vertex_slice, data_n_time_steps)))
# Write the number of variables and bitfields (ignore per-timestep)
n_vars = len(self.__sampling_rates) - len(self.__bitfield_variables)
@@ -1080,7 +1072,7 @@ def get_sampling_overflow_sdram(self, vertex_slice):
def get_buffered_sdram(
self, variable, vertex_slice, n_machine_time_steps):
- """ Returns the SDRAM used for this may time steps
+ """ Returns the SDRAM used for this many time steps for a variable
If required the total is rounded up so the space will always fit
@@ -1119,8 +1111,9 @@ def __n_bytes_to_n_words(n_bytes):
"""
return (n_bytes + (BYTES_PER_WORD - 1)) // BYTES_PER_WORD
- def get_sdram_usage_in_bytes(self, vertex_slice):
- """
+ def get_metadata_sdram_usage_in_bytes(self, vertex_slice):
+ """ Get the SDRAM usage of the metadata for recording
+
:param ~pacman.model.graphs.common.Slice vertex_slice:
:rtype: int
"""
@@ -1154,36 +1147,6 @@ def _get_fixed_sdram_usage(self, vertex_slice):
fixed_sdram += self._N_BYTES_PER_INDEX * vertex_slice.n_atoms
return fixed_sdram
- def get_exact_static_sdram_usage(self, vertex_slice):
- """ gets the exact sdram needed by the dsg region.
- :param ~pacman.model.graphs.common.Slice vertex_slice:
- :rtype: int
-
- NOTE: does not take into account the struct that's being allocated
- by the c code
- """
- n_record = (
- len(self.__sampling_rates) + len(self.__events_per_core_variables)
- + len(self.__per_timestep_variables))
- sdram = (
- get_recording_header_size(n_record) +
- self.get_sdram_usage_in_bytes(vertex_slice))
- return int(sdram)
-
- def get_static_sdram_usage(self, vertex_slice):
- """
- :param ~pacman.model.graphs.common.Slice vertex_slice:
- :rtype: int
- """
- n_record = (
- len(self.__sampling_rates) + len(self.__events_per_core_variables)
- + len(self.__per_timestep_variables))
- sdram = (
- get_recording_header_size(n_record) +
- get_recording_data_constant_size(n_record) +
- self.get_sdram_usage_in_bytes(vertex_slice))
- return int(sdram)
-
def get_variable_sdram_usage(self, vertex_slice):
"""
:param ~pacman.model.graphs.common.Slice vertex_slice:
@@ -1222,7 +1185,7 @@ def get_dtcm_usage_in_bytes(self, vertex_slice):
"""
# Note: Per-timestep variables uses no DTCM
# *_rate + n_neurons_recording_* + *_indexes
- usage = self.get_sdram_usage_in_bytes(vertex_slice)
+ usage = self.get_metadata_sdram_usage_in_bytes(vertex_slice)
# *_count + *_increment
usage += (len(self.__sampling_rates) * (
diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py
index 081970761f..db67f77bdc 100644
--- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py
+++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py
@@ -140,7 +140,7 @@ def _param_generator_params(values, seed):
if numpy.isscalar(values):
return numpy.array(
[DataType.S1615.encode_as_int(values)],
- dtype="uint32")
+ dtype=numpy.uint32)
if isinstance(values, RandomDistribution):
parameters = (
@@ -153,7 +153,7 @@ def _param_generator_params(values, seed):
params = [
DataType.S1615.encode_as_int(param) for param in parameters]
params.extend(seed)
- return numpy.array(params, dtype="uint32")
+ return numpy.array(params, dtype=numpy.uint32)
raise ValueError("Unexpected value {}".format(values))
diff --git a/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py
index bdd3cb11f4..783c91a4dd 100644
--- a/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py
+++ b/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py
@@ -137,8 +137,14 @@ def _update_synapses_per_post_vertex(self, pre_slices, post_slices):
pre.n_atoms * post.n_atoms / float(n_connections)
if pre and post else 0
for pre in pre_slices for post in post_slices]
- self.__synapses_per_edge = self.get_rng_next(
- self.__num_synapses, prob_connect)
+ # Use the multinomial directly if possible
+ if (hasattr(self._rng, "rng") and
+ hasattr(self._rng.rng, "multinomial")):
+ self.__synapses_per_edge = self._rng.rng.multinomial(
+ self.__num_synapses, prob_connect)
+ else:
+ self.__synapses_per_edge = self.get_rng_next(
+ self.__num_synapses, prob_connect)
if sum(self.__synapses_per_edge) != self.__num_synapses:
raise SpynnakerException("{} of {} synapses generated".format(
sum(self.__synapses_per_edge), self.__num_synapses))
@@ -320,30 +326,27 @@ def gen_connector_params(
if synapse_info.prepop_is_view:
pre_size, pre_view_lo, pre_view_hi = self._get_connection_param(
synapse_info.pre_population._indexes, pre_vertex_slice)
+ post_size, post_view_lo, post_view_hi = self._get_connection_param(
+ synapse_info.post_population._indexes, post_vertex_slice)
+
+ # only select the relevant pre- and post-slices
+ view_pre_slices = self._get_view_slices(
+ pre_slices, pre_view_lo, pre_view_hi)
+ view_post_slices = self._get_view_slices(
+ post_slices, post_view_lo, post_view_hi)
else:
pre_size = pre_vertex_slice.n_atoms
pre_view_lo = 0
pre_view_hi = synapse_info.n_pre_neurons - 1
-
- params.extend([pre_view_lo, pre_view_hi])
-
- if synapse_info.postpop_is_view:
- post_size, post_view_lo, post_view_hi = self._get_connection_param(
- synapse_info.post_population._indexes, post_vertex_slice)
- else:
post_size = post_vertex_slice.n_atoms
post_view_lo = 0
post_view_hi = synapse_info.n_post_neurons - 1
+ view_pre_slices = pre_slices
+ view_post_slices = post_slices
+ params.extend([pre_view_lo, pre_view_hi])
params.extend([post_view_lo, post_view_hi])
- # only select the relevant pre- and post-slices
- view_pre_slices = self._get_view_slices(
- pre_slices, pre_view_lo, pre_view_hi)
-
- view_post_slices = self._get_view_slices(
- post_slices, post_view_lo, post_view_hi)
-
self._update_synapses_per_post_vertex(
view_pre_slices, view_post_slices)
@@ -354,7 +357,7 @@ def gen_connector_params(
pre_size * post_size])
params.extend(self._get_connector_seed(
pre_vertex_slice, post_vertex_slice, self._rng))
- return numpy.array(params, dtype="uint32")
+ return numpy.array(params, dtype=numpy.uint32)
@property
@overrides(
diff --git a/spynnaker/pyNN/models/neural_projections/delayed_application_edge.py b/spynnaker/pyNN/models/neural_projections/delayed_application_edge.py
index 0543463fc7..24c22e686e 100644
--- a/spynnaker/pyNN/models/neural_projections/delayed_application_edge.py
+++ b/spynnaker/pyNN/models/neural_projections/delayed_application_edge.py
@@ -16,6 +16,8 @@
from spinn_utilities.overrides import overrides
from pacman.model.graphs.application import ApplicationEdge
from pacman.model.partitioner_interfaces import AbstractSlicesConnect
+from spynnaker.pyNN.models.neural_projections\
+ .projection_application_edge import are_dynamics_structural
class DelayedApplicationEdge(ApplicationEdge, AbstractSlicesConnect):
@@ -95,6 +97,9 @@ def get_machine_edge(self, pre_vertex, post_vertex):
@overrides(AbstractSlicesConnect.could_connect)
def could_connect(self, pre_slice, post_slice):
for synapse_info in self.__synapse_information:
+ # Structual Plasticity can learn connection not originally included
+ if are_dynamics_structural(synapse_info.synapse_dynamics):
+ return True
if synapse_info.connector.could_connect(
synapse_info, pre_slice, post_slice):
return True
diff --git a/spynnaker/pyNN/models/neural_projections/projection_application_edge.py b/spynnaker/pyNN/models/neural_projections/projection_application_edge.py
index 5ea5e85edc..62d2082b0e 100644
--- a/spynnaker/pyNN/models/neural_projections/projection_application_edge.py
+++ b/spynnaker/pyNN/models/neural_projections/projection_application_edge.py
@@ -22,7 +22,7 @@
_DynamicsStructural = None
-def _are_dynamics_structural(synapse_dynamics):
+def are_dynamics_structural(synapse_dynamics):
global _DynamicsStructural
if _DynamicsStructural is None:
# Avoid import loop by postponing this import
@@ -46,7 +46,8 @@ class ProjectionApplicationEdge(
"__post_slices",
# True if slices have been convered to sorted lists
"__slices_list_mode",
- "__machine_edges_by_slices"
+ "__machine_edges_by_slices",
+ "__filter"
]
def __init__(
@@ -80,6 +81,9 @@ def __init__(
self.__post_slices = set()
self.__slices_list_mode = False
+ # By default, allow filtering
+ self.__filter = True
+
def add_synapse_information(self, synapse_information):
"""
:param SynapseInformation synapse_information:
@@ -105,6 +109,13 @@ def delay_edge(self):
def delay_edge(self, delay_edge):
self.__delay_edge = delay_edge
+ def set_filter(self, do_filter):
+ """ Set the ability to filter or not
+
+ @param bool do_filter: Whether to allow filtering
+ """
+ self.__filter = do_filter
+
@property
def n_delay_stages(self):
"""
@@ -128,9 +139,11 @@ def get_machine_edge(self, pre_vertex, post_vertex):
@overrides(AbstractSlicesConnect.could_connect)
def could_connect(self, pre_slice, post_slice):
+ if not self.__filter:
+ return False
for synapse_info in self.__synapse_information:
# Structual Plasticity can learn connection not originally included
- if _are_dynamics_structural(synapse_info.synapse_dynamics):
+ if are_dynamics_structural(synapse_info.synapse_dynamics):
return True
if synapse_info.connector.could_connect(
synapse_info, pre_slice, post_slice):
diff --git a/spynnaker/pyNN/models/neural_projections/synapse_information.py b/spynnaker/pyNN/models/neural_projections/synapse_information.py
index 0e502535a5..db2c390ba8 100644
--- a/spynnaker/pyNN/models/neural_projections/synapse_information.py
+++ b/spynnaker/pyNN/models/neural_projections/synapse_information.py
@@ -15,9 +15,9 @@
from pyNN.random import NumpyRNG
from spynnaker.pyNN.models.neural_projections.connectors import (
- AbstractGenerateConnectorOnMachine)
+ AbstractGenerateConnectorOnMachine, OneToOneConnector)
from spynnaker.pyNN.models.neuron.synapse_dynamics import (
- AbstractGenerateOnMachine)
+ AbstractGenerateOnMachine, SynapseDynamicsStatic)
class SynapseInformation(object):
@@ -57,7 +57,7 @@ def __init__(self, connector, pre_population, post_population,
:type rng: ~pyNN.random.NumpyRNG or None
:param AbstractSynapseDynamics synapse_dynamics:
The dynamic behaviour of the synapse
- :param AbstractSynapseType synapse_type: The type of the synapse
+ :param int synapse_type: The type of the synapse
:param bool is_virtual_machine: Whether the machine is virtual
:param weights: The synaptic weights
:type weights: float or list(float) or ~numpy.ndarray(float) or None
@@ -157,7 +157,7 @@ def synapse_dynamics(self):
def synapse_type(self):
""" The type of the synapse
- :rtype: AbstractSynapseType
+ :rtype: int
"""
return self.__synapse_type
@@ -199,6 +199,18 @@ def may_generate_on_machine(self):
self.synapse_dynamics.generate_on_machine())
return connector_gen and synapse_gen
+ def may_use_direct_matrix(self):
+ """ Do the properties of the synaptic information allow it to use the
+ direct matrix?
+
+ :rtype: bool
+ """
+ return (
+ isinstance(self.__connector, OneToOneConnector) and
+ isinstance(self.__synapse_dynamics,
+ SynapseDynamicsStatic) and
+ not self.prepop_is_view and not self.postpop_is_view)
+
@property
def pre_run_connection_holders(self):
""" The list of connection holders to be filled in before run
diff --git a/spynnaker/pyNN/models/neuron/__init__.py b/spynnaker/pyNN/models/neuron/__init__.py
index 7b96f90ab8..68f7643497 100644
--- a/spynnaker/pyNN/models/neuron/__init__.py
+++ b/spynnaker/pyNN/models/neuron/__init__.py
@@ -15,12 +15,25 @@
from .abstract_population_vertex import AbstractPopulationVertex
from .connection_holder import ConnectionHolder
-from .population_machine_vertex import PopulationMachineVertex
-from .synaptic_manager import SynapticManager
+from .population_machine_vertex import (
+ PopulationMachineVertex, SpikeProcessingProvenance)
+from .population_neurons_machine_vertex import PopulationNeuronsMachineVertex
+from .population_machine_neurons import NeuronProvenance
+from .population_synapses_machine_vertex_lead import (
+ PopulationSynapsesMachineVertexLead)
+from .population_synapses_machine_vertex_shared import (
+ PopulationSynapsesMachineVertexShared)
+from .population_synapses_machine_vertex_common import (
+ PopulationSynapsesMachineVertexCommon, SpikeProcessingFastProvenance)
+from .population_machine_synapses_provenance import SynapseProvenance
from .abstract_pynn_neuron_model import AbstractPyNNNeuronModel
from .abstract_pynn_neuron_model_standard import (
AbstractPyNNNeuronModelStandard)
__all__ = ["AbstractPopulationVertex", "AbstractPyNNNeuronModel",
"AbstractPyNNNeuronModelStandard", "ConnectionHolder",
- "PopulationMachineVertex", "SynapticManager"]
+ "PopulationMachineVertex", "PopulationNeuronsMachineVertex",
+ "NeuronProvenance", "PopulationSynapsesMachineVertexCommon",
+ "PopulationSynapsesMachineVertexLead",
+ "PopulationSynapsesMachineVertexShared", "SynapseProvenance",
+ "SpikeProcessingProvenance", "SpikeProcessingFastProvenance"]
diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py
index 484ff6af07..40b8fde7a6 100644
--- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py
+++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py
@@ -14,34 +14,71 @@
# along with this program. If not, see .
import logging
+import math
+import numpy
+from scipy import special # @UnresolvedImport
+
from spinn_utilities.log import FormatAdapter
from spinn_utilities.overrides import overrides
+from spinn_utilities.progress_bar import ProgressBar
+from data_specification.enums.data_type import DataType
from pacman.model.constraints.key_allocator_constraints import (
ContiguousKeyRangeContraint)
-from spinn_utilities.config_holder import get_config_int
+from spinn_utilities.config_holder import (
+ get_config_int, get_config_float, get_config_bool)
+from pacman.model.resources import MultiRegionSDRAM
from spinn_front_end_common.abstract_models import (
AbstractChangableAfterRun, AbstractProvidesOutgoingPartitionConstraints,
AbstractCanReset, AbstractRewritesDataSpecification)
from spinn_front_end_common.abstract_models.impl import (
ProvidesKeyToAtomMappingImpl, TDMAAwareApplicationVertex)
-from spinn_front_end_common.utilities.constants import BYTES_PER_WORD
+from spinn_front_end_common.utilities.constants import (
+ BYTES_PER_WORD, MICRO_TO_SECOND_CONVERSION, SYSTEM_BYTES_REQUIREMENT)
+from spinn_front_end_common.utilities.exceptions import ConfigurationException
+from spinn_front_end_common.interface.profiling.profile_utils import (
+ get_profile_region_size)
+from spinn_front_end_common.interface.buffer_management\
+ .recording_utilities import (
+ get_recording_header_size, get_recording_data_constant_size)
+from spinn_front_end_common.interface.provenance import (
+ ProvidesProvenanceDataFromMachineImpl)
+from spinn_front_end_common.utilities.globals_variables import (
+ machine_time_step)
+
from spynnaker.pyNN.models.common import (
AbstractSpikeRecordable, AbstractNeuronRecordable, AbstractEventRecordable,
NeuronRecorder)
from spynnaker.pyNN.models.abstract_models import (
AbstractPopulationInitializable, AbstractAcceptsIncomingSynapses,
- AbstractPopulationSettable, AbstractContainsUnits)
+ AbstractPopulationSettable, AbstractContainsUnits, AbstractMaxSpikes,
+ HasSynapses)
from spynnaker.pyNN.exceptions import InvalidParameterType
from spynnaker.pyNN.utilities.ranged import (
SpynnakerRangeDictionary)
-from .synaptic_manager import SynapticManager
+from spynnaker.pyNN.utilities.constants import POSSION_SIGMA_SUMMATION_LIMIT
+from spynnaker.pyNN.utilities.running_stats import RunningStats
+from spynnaker.pyNN.models.neuron.synapse_dynamics import (
+ AbstractSynapseDynamics, AbstractSynapseDynamicsStructural)
+from .synapse_io import get_max_row_info
+from .master_pop_table import MasterPopTableAsBinarySearch
+from .generator_data import GeneratorData
+from .synaptic_matrices import SYNAPSES_BASE_GENERATOR_SDRAM_USAGE_IN_BYTES
+
+logger = FormatAdapter(logging.getLogger(__name__))
# TODO: Make sure these values are correct (particularly CPU cycles)
_NEURON_BASE_DTCM_USAGE_IN_BYTES = 9 * BYTES_PER_WORD
_NEURON_BASE_N_CPU_CYCLES_PER_NEURON = 22
_NEURON_BASE_N_CPU_CYCLES = 10
-logger = FormatAdapter(logging.getLogger(__name__))
+# 1 for number of neurons
+# 1 for number of synapse types
+# 1 for number of neuron bits
+# 1 for number of synapse type bits
+# 1 for number of delay bits
+# 1 for drop late packets,
+# 1 for incoming spike buffer size
+_SYNAPSES_BASE_SDRAM_USAGE_IN_BYTES = 7 * BYTES_PER_WORD
class AbstractPopulationVertex(
@@ -56,6 +93,7 @@ class AbstractPopulationVertex(
"""
__slots__ = [
+ "__all_single_syn_sz",
"__change_requires_mapping",
"__change_requires_data_generation",
"__incoming_spike_buffer_size",
@@ -63,15 +101,20 @@ class AbstractPopulationVertex(
"__n_profile_samples",
"__neuron_impl",
"__neuron_recorder",
+ "__synapse_recorder",
"_parameters", # See AbstractPyNNModel
"__pynn_model",
"_state_variables", # See AbstractPyNNModel
- "__synapse_manager",
- "__time_between_requests",
- "__units",
- "__n_data_specs",
"__initial_state_variables",
- "__has_run"]
+ "__has_run",
+ "__updated_state_variables",
+ "__ring_buffer_sigma",
+ "__spikes_per_second",
+ "__drop_late_spikes",
+ "__incoming_projections",
+ "__synapse_dynamics",
+ "__max_row_info",
+ "__self_projection"]
#: recording region IDs
_SPIKE_RECORDING_REGION = 0
@@ -82,9 +125,14 @@ class AbstractPopulationVertex(
#: The Buffer traffic type
_TRAFFIC_IDENTIFIER = "BufferTraffic"
+ _C_MAIN_BASE_N_CPU_CYCLES = 0
+ _NEURON_BASE_N_CPU_CYCLES_PER_NEURON = 22
+ _NEURON_BASE_N_CPU_CYCLES = 10
+ _SYNAPSE_BASE_N_CPU_CYCLES_PER_NEURON = 22
+ _SYNAPSE_BASE_N_CPU_CYCLES = 10
+
# 5 elements before the start of global parameters
- # 1. has key, 2. key, 3. n atoms,
- # 4. n synapse types, 5. incoming spike buffer size.
+ # 1. has key, 2. key, 3. n atoms, 4. n_atoms_peak 5. n_synapse_types
BYTES_TILL_START_OF_GLOBAL_PARAMETERS = 5 * BYTES_PER_WORD
def __init__(
@@ -121,7 +169,6 @@ def __init__(
super().__init__(label, constraints, max_atoms_per_core, splitter)
self.__n_atoms = self.round_n_atoms(n_neurons, "n_neurons")
- self.__n_data_specs = 0
# buffer data
self.__incoming_spike_buffer_size = incoming_spike_buffer_size
@@ -130,6 +177,25 @@ def __init__(
self.__incoming_spike_buffer_size = get_config_int(
"Simulation", "incoming_spike_buffer_size")
+ # Limit the DTCM used by one-to-one connections
+ self.__all_single_syn_sz = get_config_int(
+ "Simulation", "one_to_one_connection_dtcm_max_bytes")
+
+ self.__ring_buffer_sigma = ring_buffer_sigma
+ if self.__ring_buffer_sigma is None:
+ self.__ring_buffer_sigma = get_config_float(
+ "Simulation", "ring_buffer_sigma")
+
+ self.__spikes_per_second = spikes_per_second
+ if self.__spikes_per_second is None:
+ self.__spikes_per_second = get_config_float(
+ "Simulation", "spikes_per_second")
+
+ self.__drop_late_spikes = drop_late_spikes
+ if self.__drop_late_spikes is None:
+ self.__drop_late_spikes = get_config_bool(
+ "Simulation", "drop_late_spikes")
+
self.__neuron_impl = neuron_impl
self.__pynn_model = pynn_model
self._parameters = SpynnakerRangeDictionary(n_neurons)
@@ -139,22 +205,20 @@ def __init__(
self._state_variables = self.__initial_state_variables.copy()
# Set up for recording
- recordable_variables = list(
+ neuron_recordable_variables = list(
self.__neuron_impl.get_recordable_variables())
record_data_types = dict(
self.__neuron_impl.get_recordable_data_types())
self.__neuron_recorder = NeuronRecorder(
- recordable_variables, record_data_types, [NeuronRecorder.SPIKES],
+ neuron_recordable_variables, record_data_types,
+ [NeuronRecorder.SPIKES], n_neurons, [], {}, [], {})
+ self.__synapse_recorder = NeuronRecorder(
+ [], {}, [],
n_neurons, [NeuronRecorder.PACKETS],
{NeuronRecorder.PACKETS: NeuronRecorder.PACKETS_TYPE},
[NeuronRecorder.REWIRING],
{NeuronRecorder.REWIRING: NeuronRecorder.REWIRING_TYPE})
- # Set up synapse handling
- self.__synapse_manager = SynapticManager(
- self.__neuron_impl.get_n_synapse_types(), ring_buffer_sigma,
- spikes_per_second, drop_late_spikes)
-
# bool for if state has changed.
self.__change_requires_mapping = True
self.__change_requires_data_generation = False
@@ -164,44 +228,147 @@ def __init__(
self.__n_profile_samples = get_config_int(
"Reports", "n_profile_samples")
- @overrides(AbstractNeuronRecordable.get_expected_n_rows)
- def get_expected_n_rows(
- self, n_machine_time_steps, sampling_rate, vertex, variable):
- return self.__neuron_recorder.expected_rows_for_a_run_time(
- n_machine_time_steps, sampling_rate)
+ # Set up for incoming
+ self.__incoming_projections = list()
+ self.__max_row_info = dict()
+ self.__self_projection = None
+
+ # Prepare for dealing with STDP - there can only be one (non-static)
+ # synapse dynamics per vertex at present
+ self.__synapse_dynamics = None
+
+ @property
+ def synapse_dynamics(self):
+ """ The synapse dynamics used by the synapses e.g. plastic or static.
+ Settable.
+
+ :rtype: AbstractSynapseDynamics or None
+ """
+ return self.__synapse_dynamics
+
+ @synapse_dynamics.setter
+ def synapse_dynamics(self, synapse_dynamics):
+ """ Set the synapse dynamics. Note that after setting, the dynamics
+ might not be the type set as it can be combined with the existing
+ dynamics in exciting ways.
+
+ :param AbstractSynapseDynamics synapse_dynamics:
+ The synapse dynamics to set
+ """
+ if self.__synapse_dynamics is None:
+ self.__synapse_dynamics = synapse_dynamics
+ else:
+ self.__synapse_dynamics = self.__synapse_dynamics.merge(
+ synapse_dynamics)
+
+ def add_incoming_projection(self, projection):
+ """ Add a projection incoming to this vertex
+
+ :param PyNNProjectionCommon projection:
+ The new projection to add
+ """
+ # Reset the ring buffer shifts as a projection has been added
+ self.__change_requires_mapping = True
+ self.__incoming_projections.append(projection)
+ if projection._projection_edge.pre_vertex == self:
+ self.__self_projection = projection
+
+ @property
+ def self_projection(self):
+ """ Get any projection from this vertex to itself
+
+ :rtype: PyNNProjectionCommon or None
+ """
+ return self.__self_projection
@property
@overrides(TDMAAwareApplicationVertex.n_atoms)
def n_atoms(self):
return self.__n_atoms
+ @overrides(TDMAAwareApplicationVertex.get_n_cores)
+ def get_n_cores(self):
+ return len(self._splitter.get_out_going_slices()[0])
+
+ @property
+ def size(self):
+ """ The number of neurons in the vertex
+
+ :rtype: int
+ """
+ return self.__n_atoms
+
+ @property
+ def all_single_syn_size(self):
+ """ The maximum amount of DTCM to use for single synapses
+
+ :rtype: int
+ """
+ return self.__all_single_syn_sz
+
@property
def incoming_spike_buffer_size(self):
+ """ The size of the incoming spike buffer to be used on the cores
+
+ :rtype: int
+ """
return self.__incoming_spike_buffer_size
@property
def parameters(self):
+ """ The parameters of the neurons in the population
+
+ :rtype: SpyNNakerRangeDictionary
+ """
return self._parameters
@property
def state_variables(self):
+ """ The state variables of the neuron in the population
+
+ :rtype: SpyNNakerRangeDicationary
+ """
return self._state_variables
@property
def neuron_impl(self):
+ """ The neuron implementation
+
+ :rtype: AbstractNeuronImpl
+ """
return self.__neuron_impl
@property
def n_profile_samples(self):
+ """ The maximum number of profile samples to report
+
+ :rtype: int
+ """
return self.__n_profile_samples
@property
def neuron_recorder(self):
+ """ The recorder for neurons
+
+ :rtype: NeuronRecorder
+ """
return self.__neuron_recorder
@property
- def synapse_manager(self):
- return self.__synapse_manager
+ def synapse_recorder(self):
+ """ The recorder for synapses
+
+ :rtype: SynapseRecorder
+ """
+ return self.__synapse_recorder
+
+ @property
+ def drop_late_spikes(self):
+ """ Whether spikes should be dropped if not processed in a timestep
+
+ :rtype: bool
+ """
+ return self.__drop_late_spikes
def set_has_run(self):
""" Set the flag has run so initialize only affects state variables
@@ -222,9 +389,6 @@ def requires_data_generation(self):
@overrides(AbstractChangableAfterRun.mark_no_changes)
def mark_no_changes(self):
- # If mapping will happen, reset things that need this
- if self.__change_requires_mapping:
- self.__synapse_manager.clear_all_caches()
self.__change_requires_mapping = False
self.__change_requires_data_generation = False
@@ -237,6 +401,7 @@ def get_sdram_usage_for_neuron_params(self, vertex_slice):
"""
return (
self.BYTES_TILL_START_OF_GLOBAL_PARAMETERS +
+ (self.__neuron_impl.get_n_synapse_types() * BYTES_PER_WORD) +
self.tdma_sdram_size_in_bytes +
self.__neuron_impl.get_sdram_usage_in_bytes(vertex_slice.n_atoms))
@@ -252,7 +417,7 @@ def set_recording_spikes(
@overrides(AbstractEventRecordable.is_recording_events)
def is_recording_events(self, variable):
- return self.__neuron_recorder.is_recording(variable)
+ return self.__synapse_recorder.is_recording(variable)
@overrides(AbstractEventRecordable.set_recording_events)
def set_recording_events(
@@ -270,35 +435,69 @@ def get_spikes(self, placements, buffer_manager):
@overrides(AbstractEventRecordable.get_events)
def get_events(
self, variable, placements, buffer_manager):
- return self.__neuron_recorder.get_events(
+ return self.__synapse_recorder.get_events(
self.label, buffer_manager, placements, self, variable)
@overrides(AbstractNeuronRecordable.get_recordable_variables)
def get_recordable_variables(self):
- return self.__neuron_recorder.get_recordable_variables()
+ variables = list()
+ variables.extend(self.__neuron_recorder.get_recordable_variables())
+ variables.extend(self.__synapse_recorder.get_recordable_variables())
+ return variables
+
+ def __raise_var_not_supported(self, variable):
+ """ Helper to indicate that recording a variable is not supported
+
+ :param str variable: The variable to report as unsupported
+ """
+ msg = ("Variable {} is not supported. Supported variables are"
+ "{}".format(variable, self.get_recordable_variables()))
+ raise ConfigurationException(msg)
@overrides(AbstractNeuronRecordable.is_recording)
def is_recording(self, variable):
- return self.__neuron_recorder.is_recording(variable)
+ if self.__neuron_recorder.is_recordable(variable):
+ return self.__neuron_recorder.is_recording(variable)
+ if self.__synapse_recorder.is_recordable(variable):
+ return self.__synapse_recorder.is_recording(variable)
+ self.__raise_var_not_supported(variable)
@overrides(AbstractNeuronRecordable.set_recording)
def set_recording(self, variable, new_state=True, sampling_interval=None,
indexes=None):
- self.__neuron_recorder.set_recording(
- variable, new_state, sampling_interval, indexes)
+ if self.__neuron_recorder.is_recordable(variable):
+ self.__neuron_recorder.set_recording(
+ variable, new_state, sampling_interval, indexes)
+ elif self.__synapse_recorder.is_recordable(variable):
+ self.__synapse_recorder.set_recording(
+ variable, new_state, sampling_interval, indexes)
+ else:
+ self.__raise_var_not_supported(variable)
self.__change_requires_mapping = not self.is_recording(variable)
@overrides(AbstractNeuronRecordable.get_data)
def get_data(
self, variable, n_machine_time_steps, placements, buffer_manager):
# pylint: disable=too-many-arguments
- return self.__neuron_recorder.get_matrix_data(
- self.label, buffer_manager, placements, self, variable,
- n_machine_time_steps)
+ if self.__neuron_recorder.is_recordable(variable):
+ return self.__neuron_recorder.get_matrix_data(
+ self.label, buffer_manager, placements, self, variable,
+ n_machine_time_steps)
+ elif self.__synapse_recorder.is_recordable(variable):
+ return self.__synapse_recorder.get_matrix_data(
+ self.label, buffer_manager, placements, self, variable,
+ n_machine_time_steps)
+ self.__raise_var_not_supported(variable)
@overrides(AbstractNeuronRecordable.get_neuron_sampling_interval)
def get_neuron_sampling_interval(self, variable):
- return self.__neuron_recorder.get_neuron_sampling_interval(variable)
+ if self.__neuron_recorder.is_recordable(variable):
+ return self.__neuron_recorder.get_neuron_sampling_interval(
+ variable)
+ elif self.__synapse_recorder.is_recordable(variable):
+ return self.__synapse_recorder.get_neuron_sampling_interval(
+ variable)
+ self.__raise_var_not_supported(variable)
@overrides(AbstractSpikeRecordable.get_spikes_sampling_interval)
def get_spikes_sampling_interval(self):
@@ -339,6 +538,10 @@ def initialize_parameters(self):
return self.__pynn_model.default_initial_values.keys()
def _get_parameter(self, variable):
+ """ Get a neuron parameter value
+
+ :param str variable: The variable to get the value of
+ """
if variable.endswith("_init"):
# method called with "V_init"
key = variable[:-5]
@@ -409,48 +612,34 @@ def weight_scale(self):
@property
def ring_buffer_sigma(self):
- return self.__synapse_manager.ring_buffer_sigma
+ return self.__ring_buffer_sigma
@ring_buffer_sigma.setter
def ring_buffer_sigma(self, ring_buffer_sigma):
- self.__synapse_manager.ring_buffer_sigma = ring_buffer_sigma
-
- def reset_ring_buffer_shifts(self):
- self.__synapse_manager.reset_ring_buffer_shifts()
+ self.__ring_buffer_sigma = ring_buffer_sigma
@property
def spikes_per_second(self):
- return self.__synapse_manager.spikes_per_second
+ return self.__spikes_per_second
@spikes_per_second.setter
def spikes_per_second(self, spikes_per_second):
- self.__synapse_manager.spikes_per_second = spikes_per_second
-
- @property
- def synapse_dynamics(self):
- """
- :rtype: AbstractSynapseDynamics
- """
- return self.__synapse_manager.synapse_dynamics
+ self.__spikes_per_second = spikes_per_second
def set_synapse_dynamics(self, synapse_dynamics):
- """
+ """ Set the synapse dynamics of this population
+
:param AbstractSynapseDynamics synapse_dynamics:
+ The synapse dynamics to set
"""
- self.__synapse_manager.synapse_dynamics = synapse_dynamics
- # If we are setting a synapse dynamics, we must remap even if the
- # change above means we don't have to
- self.__change_requires_mapping = True
-
- @overrides(AbstractAcceptsIncomingSynapses.get_connections_from_machine)
- def get_connections_from_machine(
- self, transceiver, placements, app_edge, synapse_info):
- # pylint: disable=too-many-arguments
- return self.__synapse_manager.get_connections_from_machine(
- transceiver, placements, app_edge, synapse_info)
+ self.synapse_dynamics = synapse_dynamics
def clear_connection_cache(self):
- self.__synapse_manager.clear_connection_cache()
+ """ Flush the cache of connection information; needed for a second run
+ """
+ for post_vertex in self.machine_vertices:
+ if isinstance(post_vertex, HasSynapses):
+ post_vertex.clear_connection_cache()
@overrides(AbstractProvidesOutgoingPartitionConstraints.
get_outgoing_partition_constraints)
@@ -537,6 +726,10 @@ def describe(self):
return context
def get_synapse_id_by_target(self, target):
+ """ Get the id of synapse using its target name
+
+ :param str target: The synapse to get the id of
+ """
return self.__neuron_impl.get_synapse_id_by_target(target)
def __str__(self):
@@ -555,8 +748,544 @@ def reset_to_first_timestep(self):
vertex.set_reload_required(True)
# If synapses change during the run,
- if self.__synapse_manager.changes_during_run:
+ if (self.__synapse_dynamics is not None and
+ self.__synapse_dynamics.changes_during_run):
self.__change_requires_data_generation = True
for vertex in self.machine_vertices:
if isinstance(vertex, AbstractRewritesDataSpecification):
vertex.set_reload_required(True)
+
+ @staticmethod
+ def _ring_buffer_expected_upper_bound(
+ weight_mean, weight_std_dev, spikes_per_second,
+ n_synapses_in, sigma):
+ """ Provides expected upper bound on accumulated values in a ring\
+ buffer element.
+
+ Requires an assessment of maximum Poisson input rate.
+
+ Assumes knowledge of mean and SD of weight distribution, fan-in\
+ and timestep.
+
+ All arguments should be assumed real values except n_synapses_in\
+ which will be an integer.
+
+ :param float weight_mean: Mean of weight distribution (in either nA or\
+ microSiemens as required)
+ :param float weight_std_dev: SD of weight distribution
+ :param float spikes_per_second: Maximum expected Poisson rate in Hz
+ :param int machine_timestep: in us
+ :param int n_synapses_in: No of connected synapses
+ :param float sigma: How many SD above the mean to go for upper bound;\
+ a good starting choice is 5.0. Given length of simulation we can\
+ set this for approximate number of saturation events.
+ :rtype: float
+ """
+ # E[ number of spikes ] in a timestep
+ steps_per_second = MICRO_TO_SECOND_CONVERSION / machine_time_step()
+ average_spikes_per_timestep = (
+ float(n_synapses_in * spikes_per_second) / steps_per_second)
+
+ # Exact variance contribution from inherent Poisson variation
+ poisson_variance = average_spikes_per_timestep * (weight_mean ** 2)
+
+ # Upper end of range for Poisson summation required below
+ # upper_bound needs to be an integer
+ upper_bound = int(round(average_spikes_per_timestep +
+ POSSION_SIGMA_SUMMATION_LIMIT *
+ math.sqrt(average_spikes_per_timestep)))
+
+ # Closed-form exact solution for summation that gives the variance
+ # contributed by weight distribution variation when modulated by
+ # Poisson PDF. Requires scipy.special for gamma and incomplete gamma
+ # functions. Beware: incomplete gamma doesn't work the same as
+ # Mathematica because (1) it's regularised and needs a further
+ # multiplication and (2) it's actually the complement that is needed
+ # i.e. 'gammaincc']
+
+ weight_variance = 0.0
+
+ if weight_std_dev > 0:
+ # pylint: disable=no-member
+ lngamma = special.gammaln(1 + upper_bound)
+ gammai = special.gammaincc(
+ 1 + upper_bound, average_spikes_per_timestep)
+
+ big_ratio = (math.log(average_spikes_per_timestep) * upper_bound -
+ lngamma)
+
+ if -701.0 < big_ratio < 701.0 and big_ratio != 0.0:
+ log_weight_variance = (
+ -average_spikes_per_timestep +
+ math.log(average_spikes_per_timestep) +
+ 2.0 * math.log(weight_std_dev) +
+ math.log(math.exp(average_spikes_per_timestep) * gammai -
+ math.exp(big_ratio)))
+ weight_variance = math.exp(log_weight_variance)
+
+ # upper bound calculation -> mean + n * SD
+ return ((average_spikes_per_timestep * weight_mean) +
+ (sigma * math.sqrt(poisson_variance + weight_variance)))
+
+ def get_ring_buffer_shifts(self, incoming_projections):
+ """ Get the shift of the ring buffers for transfer of values into the
+ input buffers for this model.
+
+ :param list(~spynnaker.pyNN.models.Projection) incoming_projections:
+ The projections to consider in the calculations
+ :rtype: list(int)
+ """
+ weight_scale = self.__neuron_impl.get_global_weight_scale()
+ weight_scale_squared = weight_scale * weight_scale
+ n_synapse_types = self.__neuron_impl.get_n_synapse_types()
+ running_totals = [RunningStats() for _ in range(n_synapse_types)]
+ delay_running_totals = [RunningStats() for _ in range(n_synapse_types)]
+ total_weights = numpy.zeros(n_synapse_types)
+ biggest_weight = numpy.zeros(n_synapse_types)
+ weights_signed = False
+ rate_stats = [RunningStats() for _ in range(n_synapse_types)]
+ steps_per_second = MICRO_TO_SECOND_CONVERSION / machine_time_step()
+
+ for proj in incoming_projections:
+ synapse_info = proj._synapse_information
+ synapse_type = synapse_info.synapse_type
+ synapse_dynamics = synapse_info.synapse_dynamics
+ connector = synapse_info.connector
+
+ weight_mean = (
+ synapse_dynamics.get_weight_mean(
+ connector, synapse_info) * weight_scale)
+ n_connections = \
+ connector.get_n_connections_to_post_vertex_maximum(
+ synapse_info)
+ weight_variance = synapse_dynamics.get_weight_variance(
+ connector, synapse_info.weights,
+ synapse_info) * weight_scale_squared
+ running_totals[synapse_type].add_items(
+ weight_mean, weight_variance, n_connections)
+
+ delay_variance = synapse_dynamics.get_delay_variance(
+ connector, synapse_info.delays, synapse_info)
+ delay_running_totals[synapse_type].add_items(
+ 0.0, delay_variance, n_connections)
+
+ weight_max = (synapse_dynamics.get_weight_maximum(
+ connector, synapse_info) * weight_scale)
+ biggest_weight[synapse_type] = max(
+ biggest_weight[synapse_type], weight_max)
+
+ spikes_per_tick = max(
+ 1.0, self.__spikes_per_second / steps_per_second)
+ spikes_per_second = self.__spikes_per_second
+ pre_vertex = proj._projection_edge.pre_vertex
+ if isinstance(pre_vertex, AbstractMaxSpikes):
+ rate = pre_vertex.max_spikes_per_second()
+ if rate != 0:
+ spikes_per_second = rate
+ spikes_per_tick = pre_vertex.max_spikes_per_ts()
+ rate_stats[synapse_type].add_items(
+ spikes_per_second, 0, n_connections)
+ total_weights[synapse_type] += spikes_per_tick * (
+ weight_max * n_connections)
+
+ if synapse_dynamics.are_weights_signed():
+ weights_signed = True
+
+ max_weights = numpy.zeros(n_synapse_types)
+ for synapse_type in range(n_synapse_types):
+ if delay_running_totals[synapse_type].variance == 0.0:
+ max_weights[synapse_type] = max(total_weights[synapse_type],
+ biggest_weight[synapse_type])
+ else:
+ stats = running_totals[synapse_type]
+ rates = rate_stats[synapse_type]
+ max_weights[synapse_type] = min(
+ self._ring_buffer_expected_upper_bound(
+ stats.mean, stats.standard_deviation, rates.mean,
+ stats.n_items, self.__ring_buffer_sigma),
+ total_weights[synapse_type])
+ max_weights[synapse_type] = max(
+ max_weights[synapse_type], biggest_weight[synapse_type])
+
+ # Convert these to powers; we could use int.bit_length() for this if
+ # they were integers, but they aren't...
+ max_weight_powers = (
+ 0 if w <= 0 else int(math.ceil(max(0, math.log(w, 2))))
+ for w in max_weights)
+
+ # If 2^max_weight_power equals the max weight, we have to add another
+ # power, as range is 0 - (just under 2^max_weight_power)!
+ max_weight_powers = (
+ w + 1 if (2 ** w) <= a else w
+ for w, a in zip(max_weight_powers, max_weights))
+
+ # If we have synapse dynamics that uses signed weights,
+ # Add another bit of shift to prevent overflows
+ if weights_signed:
+ max_weight_powers = (m + 1 for m in max_weight_powers)
+
+ return list(max_weight_powers)
+
+ @staticmethod
+ def __get_weight_scale(ring_buffer_to_input_left_shift):
+ """ Return the amount to scale the weights by to convert them from \
+ floating point values to 16-bit fixed point numbers which can be \
+ shifted left by ring_buffer_to_input_left_shift to produce an\
+ s1615 fixed point number
+
+ :param int ring_buffer_to_input_left_shift:
+ :rtype: float
+ """
+ return float(math.pow(2, 16 - (ring_buffer_to_input_left_shift + 1)))
+
+ def get_weight_scales(self, ring_buffer_shifts):
+ """ Get the weight scaling to apply to weights in synapses
+
+ :param list(int) ring_buffer_shifts:
+ The shifts to convert to weight scales
+ :rtype: list(int)
+ """
+ weight_scale = self.__neuron_impl.get_global_weight_scale()
+ return numpy.array([
+ self.__get_weight_scale(r) * weight_scale
+ for r in ring_buffer_shifts])
+
+ @overrides(AbstractAcceptsIncomingSynapses.get_connections_from_machine)
+ def get_connections_from_machine(
+ self, transceiver, placements, app_edge, synapse_info):
+ # Start with something in the list so that concatenate works
+ connections = [numpy.zeros(
+ 0, dtype=AbstractSynapseDynamics.NUMPY_CONNECTORS_DTYPE)]
+ progress = ProgressBar(
+ len(self.machine_vertices),
+ "Getting synaptic data between {} and {}".format(
+ app_edge.pre_vertex.label, app_edge.post_vertex.label))
+ for post_vertex in progress.over(self.machine_vertices):
+ if isinstance(post_vertex, HasSynapses):
+ placement = placements.get_placement_of_vertex(post_vertex)
+ connections.extend(post_vertex.get_connections_from_machine(
+ transceiver, placement, app_edge, synapse_info))
+ return numpy.concatenate(connections)
+
+ def get_synapse_params_size(self):
+ """ Get the size of the synapse parameters in bytes
+
+ :rtype: int
+ """
+ return (_SYNAPSES_BASE_SDRAM_USAGE_IN_BYTES +
+ (BYTES_PER_WORD * self.__neuron_impl.get_n_synapse_types()))
+
+ def get_synapse_dynamics_size(self, vertex_slice):
+ """ Get the size of the synapse dynamics region
+
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of the vertex to get the usage of
+ :rtype: int
+ """
+ if self.__synapse_dynamics is None:
+ return 0
+
+ return self.__synapse_dynamics.get_parameters_sdram_usage_in_bytes(
+ vertex_slice.n_atoms, self.__neuron_impl.get_n_synapse_types())
+
+ def get_structural_dynamics_size(self, vertex_slice, incoming_projections):
+ """ Get the size of the structural dynamics region
+
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of the vertex to get the usage of
+ :param list(~spynnaker.pyNN.models.Projection) incoming_projections:
+ The projections to consider in the calculations
+ """
+ if self.__synapse_dynamics is None:
+ return 0
+
+ if not isinstance(
+ self.__synapse_dynamics, AbstractSynapseDynamicsStructural):
+ return 0
+
+ return self.__synapse_dynamics\
+ .get_structural_parameters_sdram_usage_in_bytes(
+ incoming_projections, vertex_slice.n_atoms)
+
+ def get_synapses_size(self, vertex_slice, incoming_projections):
+ """ Get the maximum SDRAM usage for the synapses on a vertex slice
+
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of the vertex to get the usage of
+ :param list(~spynnaker.pyNN.models.Projection) incoming_projections:
+ The projections to consider in the calculations
+ """
+ addr = 2 * BYTES_PER_WORD
+ for proj in incoming_projections:
+ addr = self.__add_matrix_size(addr, proj, vertex_slice)
+ return addr
+
+ def __add_matrix_size(self, addr, projection, vertex_slice):
+ """ Add to the address the size of the matrices for the projection to
+ the vertex slice
+
+ :param int addr: The address to start from
+ :param ~spynnaker.pyNN.models.Projection: The projection to add
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice projected to
+ :rtype: int
+ """
+ synapse_info = projection._synapse_information
+ app_edge = projection._projection_edge
+
+ max_row_info = self.get_max_row_info(
+ synapse_info, vertex_slice, app_edge)
+
+ vertex = app_edge.pre_vertex
+ n_sub_atoms = int(min(vertex.get_max_atoms_per_core(), vertex.n_atoms))
+ n_sub_edges = int(math.ceil(vertex.n_atoms / n_sub_atoms))
+
+ if max_row_info.undelayed_max_n_synapses > 0:
+ size = n_sub_atoms * max_row_info.undelayed_max_bytes
+ for _ in range(n_sub_edges):
+ addr = MasterPopTableAsBinarySearch.get_next_allowed_address(
+ addr)
+ addr += size
+ if max_row_info.delayed_max_n_synapses > 0:
+ size = (n_sub_atoms * max_row_info.delayed_max_bytes *
+ app_edge.n_delay_stages)
+ for _ in range(n_sub_edges):
+ addr = MasterPopTableAsBinarySearch.get_next_allowed_address(
+ addr)
+ addr += size
+ return addr
+
+ def get_max_row_info(self, synapse_info, vertex_slice, app_edge):
+ """ Get maximum row length data
+
+ :param SynapseInformation synapse_info: Information about synapses
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice projected to
+ :param ProjectionApplicationEdge app_edge: The edge of the projection
+ """
+ key = (app_edge, synapse_info, vertex_slice)
+ if key in self.__max_row_info:
+ return self.__max_row_info[key]
+ max_row_info = get_max_row_info(
+ synapse_info, vertex_slice, app_edge.n_delay_stages, app_edge)
+ self.__max_row_info[key] = max_row_info
+ return max_row_info
+
+ def get_synapse_expander_size(self, incoming_projections):
+ """ Get the size of the synapse expander region in bytes
+
+ :param list(~spynnaker.pyNN.models.Projection) incoming_projections:
+ The projections to consider in the calculations
+ :rtype: int
+ """
+ size = 0
+ for proj in incoming_projections:
+ synapse_info = proj._synapse_information
+ app_edge = proj._projection_edge
+ n_sub_edges = len(
+ app_edge.pre_vertex.splitter.get_out_going_slices()[0])
+ if not n_sub_edges:
+ vertex = app_edge.pre_vertex
+ max_atoms = float(min(vertex.get_max_atoms_per_core(),
+ vertex.n_atoms))
+ n_sub_edges = int(math.ceil(vertex.n_atoms / max_atoms))
+ size += self.__generator_info_size(synapse_info) * n_sub_edges
+
+ # If anything generates data, also add some base information
+ if size:
+ size += SYNAPSES_BASE_GENERATOR_SDRAM_USAGE_IN_BYTES
+ size += (self.__neuron_impl.get_n_synapse_types() *
+ DataType.U3232.size)
+ return size
+
+ @staticmethod
+ def __generator_info_size(synapse_info):
+ """ The number of bytes required by the generator information
+
+ :param SynapseInformation synapse_info: The synapse information to use
+
+ :rtype: int
+ """
+ if not synapse_info.may_generate_on_machine():
+ return 0
+
+ connector = synapse_info.connector
+ dynamics = synapse_info.synapse_dynamics
+ gen_size = sum((
+ GeneratorData.BASE_SIZE,
+ connector.gen_delay_params_size_in_bytes(synapse_info.delays),
+ connector.gen_weight_params_size_in_bytes(synapse_info.weights),
+ connector.gen_connector_params_size_in_bytes,
+ dynamics.gen_matrix_params_size_in_bytes
+ ))
+ return gen_size
+
+ @property
+ def synapse_executable_suffix(self):
+ """ The suffix of the executable name due to the type of synapses \
+ in use.
+
+ :rtype: str
+ """
+ if self.__synapse_dynamics is None:
+ return ""
+ return self.__synapse_dynamics.get_vertex_executable_suffix()
+
+ @property
+ def neuron_recordables(self):
+ """ Get the names of variables that can be recorded by the neuron
+
+ :rtype: list(str)
+ """
+ return self.__neuron_recorder.get_recordable_variables()
+
+ @property
+ def synapse_recordables(self):
+ """ Get the names of variables that can be recorded by the synapses
+
+ :rtype: list(str)
+ """
+ return self.__synapse_recorder.get_recordable_variables()
+
+ def get_common_constant_sdram(
+ self, n_record, n_provenance, common_regions):
+ """ Get the amount of SDRAM used by common parts
+
+ :param int n_record: The number of recording regions
+ :param int n_provenance: The number of provenance items
+ :param CommonRegions common_regions: Region IDs
+ :rtype: int
+ """
+ sdram = MultiRegionSDRAM()
+ sdram.add_cost(common_regions.system, SYSTEM_BYTES_REQUIREMENT)
+ sdram.add_cost(
+ common_regions.recording,
+ get_recording_header_size(n_record) +
+ get_recording_data_constant_size(n_record))
+ sdram.add_cost(
+ common_regions.provenance,
+ ProvidesProvenanceDataFromMachineImpl.get_provenance_data_size(
+ n_provenance))
+ sdram.add_cost(
+ common_regions.profile,
+ get_profile_region_size(self.__n_profile_samples))
+ return sdram
+
+ def get_neuron_variable_sdram(self, vertex_slice):
+ """ Get the amount of SDRAM per timestep used by neuron parts
+
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of neurons to get the size of
+
+ :rtype: int
+ """
+ return self.__neuron_recorder.get_variable_sdram_usage(vertex_slice)
+
+ def get_synapse_variable_sdram(self, vertex_slice):
+
+ """ Get the amount of SDRAM per timestep used by synapse parts
+
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of neurons to get the size of
+
+ :rtype: int
+ """
+ if isinstance(self.__synapse_dynamics,
+ AbstractSynapseDynamicsStructural):
+ self.__synapse_recorder.set_max_rewires_per_ts(
+ self.__synapse_dynamics.get_max_rewires_per_ts())
+ return self.__synapse_recorder.get_variable_sdram_usage(vertex_slice)
+
+ def get_neuron_constant_sdram(self, vertex_slice, neuron_regions):
+
+ """ Get the amount of fixed SDRAM used by neuron parts
+
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of neurons to get the size of
+ :param NeuronRegions neuron_regions: Region IDs
+ :rtype: int
+ """
+ sdram = MultiRegionSDRAM()
+ sdram.add_cost(
+ neuron_regions.neuron_params,
+ self.get_sdram_usage_for_neuron_params(vertex_slice))
+ sdram.add_cost(
+ neuron_regions.neuron_recording,
+ self.__neuron_recorder.get_metadata_sdram_usage_in_bytes(
+ vertex_slice))
+ return sdram
+
+ def get_common_dtcm(self):
+ """ Get the amount of DTCM used by common parts
+
+ :rtype: int
+ """
+ # TODO: Get some real numbers here
+ return 0
+
+ def get_neuron_dtcm(self, vertex_slice):
+ """ Get the amount of DTCM used by neuron parts
+
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of neurons to get the size of
+
+ :rtype: int
+ """
+ return (
+ self.__neuron_impl.get_dtcm_usage_in_bytes(vertex_slice.n_atoms) +
+ self.__neuron_recorder.get_dtcm_usage_in_bytes(vertex_slice)
+ )
+
+ def get_synapse_dtcm(self, vertex_slice):
+ """ Get the amount of DTCM used by synapse parts
+
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of neurons to get the size of
+
+ :rtype: int
+ """
+ return self.__synapse_recorder.get_dtcm_usage_in_bytes(vertex_slice)
+
+ def get_common_cpu(self):
+ """ Get the amount of CPU used by common parts
+
+ :rtype: int
+ """
+ return self._C_MAIN_BASE_N_CPU_CYCLES
+
+ def get_neuron_cpu(self, vertex_slice):
+ """ Get the amount of CPU used by neuron parts
+
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of neurons to get the size of
+
+ :rtype: int
+ """
+ return (
+ self._NEURON_BASE_N_CPU_CYCLES +
+ (self._NEURON_BASE_N_CPU_CYCLES_PER_NEURON *
+ vertex_slice.n_atoms) +
+ self.__neuron_recorder.get_n_cpu_cycles(vertex_slice.n_atoms) +
+ self.__neuron_impl.get_n_cpu_cycles(vertex_slice.n_atoms))
+
+ def get_synapse_cpu(self, vertex_slice):
+ """ Get the amount of CPU used by synapse parts
+
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of neurons to get the size of
+
+ :rtype: int
+ """
+ return (
+ self._SYNAPSE_BASE_N_CPU_CYCLES +
+ (self._SYNAPSE_BASE_N_CPU_CYCLES_PER_NEURON *
+ vertex_slice.n_atoms) +
+ self.__synapse_recorder.get_n_cpu_cycles(vertex_slice.n_atoms))
+
+ @property
+ def incoming_projections(self):
+ """ The projections that target this population vertex
+
+ :rtype: list(~spynnaker.pyNN.models.projection.Projection)
+ """
+ return self.__incoming_projections
diff --git a/spynnaker/pyNN/models/neuron/generator_data.py b/spynnaker/pyNN/models/neuron/generator_data.py
index 1935200fad..5e14b34555 100644
--- a/spynnaker/pyNN/models/neuron/generator_data.py
+++ b/spynnaker/pyNN/models/neuron/generator_data.py
@@ -15,10 +15,9 @@
import numpy
from data_specification.enums.data_type import DataType
-from spinn_front_end_common.utilities.constants import (
- MICRO_TO_MILLISECOND_CONVERSION, BYTES_PER_WORD)
+from spinn_front_end_common.utilities.constants import BYTES_PER_WORD
from spinn_front_end_common.utilities.globals_variables import (
- machine_time_step)
+ machine_time_step_per_ms)
# Address to indicate that the synaptic region is unused
SYN_REGION_UNUSED = 0xFFFFFFFF
@@ -108,15 +107,13 @@ def gen_data(self):
self.__pre_vertex_slice.n_atoms,
self.__max_stage,
self.__max_delay_per_stage,
- DataType.S1615.encode_as_int(
- MICRO_TO_MILLISECOND_CONVERSION /
- machine_time_step()),
+ DataType.S1615.encode_as_int(machine_time_step_per_ms()),
self.__synapse_information.synapse_type,
synapse_dynamics.gen_matrix_id,
connector.gen_connector_id,
connector.gen_weights_id(self.__synapse_information.weights),
connector.gen_delays_id(self.__synapse_information.delays)],
- dtype="uint32"))
+ dtype=numpy.uint32))
items.append(synapse_dynamics.gen_matrix_params)
items.append(connector.gen_connector_params(
self.__pre_slices, self.__post_slices, self.__pre_vertex_slice,
@@ -128,4 +125,4 @@ def gen_data(self):
items.append(connector.gen_delay_params(
self.__synapse_information.delays, self.__pre_vertex_slice,
self.__post_vertex_slice))
- return numpy.concatenate(items)
+ return items
diff --git a/spynnaker/pyNN/models/neuron/implementations/ranged_dict_vertex_slice.py b/spynnaker/pyNN/models/neuron/implementations/ranged_dict_vertex_slice.py
index d701bbdeae..5345ee243c 100644
--- a/spynnaker/pyNN/models/neuron/implementations/ranged_dict_vertex_slice.py
+++ b/spynnaker/pyNN/models/neuron/implementations/ranged_dict_vertex_slice.py
@@ -33,7 +33,7 @@ def __init__(self, ranged_dict, vertex_slice):
self.__vertex_slice = vertex_slice
def __getitem__(self, key):
- if not isinstance(key, "str"):
+ if not isinstance(key, str):
raise KeyError("Key must be a string")
return _RangedListVertexSlice(
self.__ranged_dict[key], self.__vertex_slice)
diff --git a/spynnaker/pyNN/models/neuron/master_pop_table.py b/spynnaker/pyNN/models/neuron/master_pop_table.py
index 73363ff289..62dfc84730 100644
--- a/spynnaker/pyNN/models/neuron/master_pop_table.py
+++ b/spynnaker/pyNN/models/neuron/master_pop_table.py
@@ -16,11 +16,9 @@
import numpy
import ctypes
from spinn_front_end_common.utilities.constants import BYTES_PER_WORD
-from spynnaker.pyNN.models.neural_projections import ProjectionApplicationEdge
from spynnaker.pyNN.exceptions import (
SynapseRowTooBigException, SynapticConfigurationException)
-from spynnaker.pyNN.utilities.constants import (
- POPULATION_BASED_REGIONS, POP_TABLE_MAX_ROW_LENGTH)
+from spynnaker.pyNN.utilities.constants import POP_TABLE_MAX_ROW_LENGTH
from spynnaker.pyNN.utilities.bit_field_utilities import BIT_IN_A_WORD
# Scale factor for an address; allows more addresses to be represented, but
@@ -148,6 +146,9 @@ class _AddressListEntryCType(ctypes.Union):
# Base size - 2 words for size of table and address list
_BASE_SIZE_BYTES = 8
+# Number of times to multiply for delays
+_DELAY_SCALE = 2
+
# A ctypes pointer to a uint32
_UINT32_PTR = ctypes.POINTER(ctypes.c_uint32)
@@ -243,11 +244,35 @@ def routing_key(self):
@property
def mask(self):
"""
- :return: the mask of the key for this master pop entry
+ :return: the mask of the key for this entry
:rtype: int
"""
return self.__mask
+ @property
+ def core_mask(self):
+ """
+ :return: the mask of the key once shifted to get the source core ID
+ :rtype: int
+ """
+ return self.__core_mask
+
+ @property
+ def core_shift(self):
+ """
+ :return: the shift of the key to get the source core ID
+ :rtype: int
+ """
+ return self.__core_shift
+
+ @property
+ def n_neurons(self):
+ """
+ :return: the number of neurons per source core
+ :rtype: int
+ """
+ return self.__n_neurons
+
@property
def addresses_and_row_lengths(self):
"""
@@ -310,57 +335,51 @@ class MasterPopTableAsBinarySearch(object):
"__entries",
"__n_addresses"]
- MAX_ROW_LENGTH_ERROR_MSG = (
- "Only rows of up to {} entries are allowed".format(
- POP_TABLE_MAX_ROW_LENGTH))
-
- OUT_OF_RANGE_ERROR_MESSAGE = (
- "Address {} is out of range for this population table!")
-
- # Over-scale of estimate for safety
- UPPER_BOUND_FUDGE = 2
-
def __init__(self):
self.__entries = None
self.__n_addresses = 0
- def get_master_population_table_size(self, in_edges):
+ @staticmethod
+ def get_master_population_table_size(incoming_projections):
""" Get the size of the master population table in SDRAM.
- :param iterable(~pacman.model.graphs.application.ApplicationEdge) \
- in_edges:
- The edges arriving at the vertex that are to be handled by this
- table
+ :param list(~spynnaker.pyNN.models.Projection) incoming_projections:
+ The projections arriving at the vertex that are to be handled by
+ this table
:return: the size the master pop table will take in SDRAM (in bytes)
:rtype: int
"""
+ # There will be an address list entry for each incoming projection
+ n_entries = len(incoming_projections)
- # Entry for each edge - but don't know the edges yet, so
- # assume multiple entries for each edge
+ # Count the pre-machine-vertices
n_vertices = 0
- n_entries = 0
- for in_edge in in_edges:
- if isinstance(in_edge, ProjectionApplicationEdge):
- slices, is_exact = (
- in_edge.pre_vertex.splitter.get_out_going_slices())
- if is_exact:
- n_vertices += len(slices)
- n_entries += len(in_edge.synapse_information)
- else:
- n_vertices += len(slices) * self.UPPER_BOUND_FUDGE
- n_entries += (
- len(in_edge.synapse_information) *
- self.UPPER_BOUND_FUDGE)
-
- # Multiply by 2 to get an upper bound
+ seen_edges = set()
+ for proj in incoming_projections:
+ in_edge = proj._projection_edge
+
+ # If we haven't seen this edge before, add it in
+ if in_edge not in seen_edges:
+ seen_edges.add(in_edge)
+ vertex = in_edge.pre_vertex
+ n_cores = len(vertex.splitter.get_out_going_slices()[0])
+
+ # If there are also delays, double it
+ if in_edge.n_delay_stages:
+ n_cores *= _DELAY_SCALE
+
+ n_vertices += n_cores
+
return (
_BASE_SIZE_BYTES +
(n_vertices * _MASTER_POP_ENTRY_SIZE_BYTES) +
(n_vertices * _EXTRA_INFO_ENTRY_SIZE_BYTES) +
(n_entries * _ADDRESS_LIST_ENTRY_SIZE_BYTES))
- def get_allowed_row_length(self, row_length):
- """
+ @staticmethod
+ def get_allowed_row_length(row_length):
+ """ Get the next allowed row length
+
:param int row_length: the row length being considered
:return: the row length available
:rtype: int
@@ -369,10 +388,13 @@ def get_allowed_row_length(self, row_length):
if row_length > POP_TABLE_MAX_ROW_LENGTH:
raise SynapseRowTooBigException(
- POP_TABLE_MAX_ROW_LENGTH, self.MAX_ROW_LENGTH_ERROR_MSG)
+ POP_TABLE_MAX_ROW_LENGTH,
+ "Only rows of up to {} entries are allowed".format(
+ POP_TABLE_MAX_ROW_LENGTH))
return row_length
- def get_next_allowed_address(self, next_address):
+ @staticmethod
+ def get_next_allowed_address(next_address):
""" Get the next allowed address.
:param int next_address: The next address that would be used
@@ -384,7 +406,7 @@ def get_next_allowed_address(self, next_address):
addr_scaled = (next_address + (_ADDRESS_SCALE - 1)) // _ADDRESS_SCALE
if addr_scaled > _MAX_ADDRESS:
raise SynapticConfigurationException(
- self.OUT_OF_RANGE_ERROR_MESSAGE.format(
+ "Address {} is out of range for this population table!".format(
hex(addr_scaled * _ADDRESS_SCALE)))
return addr_scaled * _ADDRESS_SCALE
@@ -470,20 +492,6 @@ def __update_master_population_table(
:raises ~spynnaker.pyNN.exceptions.SynapticConfigurationException:
If a bad address is used.
"""
-
- # pylint: disable=too-many-arguments, arguments-differ
- if key_and_mask.key not in self.__entries:
- if self.__n_addresses > _MAX_ADDRESS_START:
- raise SynapticConfigurationException(
- "The table already contains {} entries;"
- " adding another is too many".format(self.__n_addresses))
- self.__entries[key_and_mask.key] = _MasterPopEntry(
- key_and_mask.key, key_and_mask.mask, core_mask, core_shift,
- n_neurons)
- # Need to add an extra "address" for the extra_info if needed
- if core_mask != 0:
- self.__n_addresses += 1
-
# if not single, scale the address
start_addr = block_start_addr
if not is_single:
@@ -497,13 +505,64 @@ def __update_master_population_table(
"Address {} is too big for this table".format(
block_start_addr))
row_length = self.get_allowed_row_length(row_length)
- index = self.__entries[key_and_mask.key].append(
- start_addr, row_length - 1, is_single)
+
+ entry = self.__add_entry(
+ key_and_mask, core_mask, core_shift, n_neurons)
+ index = entry.append(start_addr, row_length - 1, is_single)
self.__n_addresses += 1
return index
- def add_invalid_entry(
+ def add_invalid_machine_entry(self, key_and_mask):
+ """ Add an entry to the table from a machine vertex that doesn't point
+ to anywhere. Used to keep indices in synchronisation between e.g.
+ normal and delay entries and between entries on different cores.
+
+ :param ~pacman.model.routing_info.BaseKeyAndMask key_and_mask:
+ a key_and_mask object used as part of describing
+ an edge that will require being received to be stored in the
+ master pop table; the whole edge will become multiple calls to
+ this function
+ :return: The index of the added entry
+ :rtype: int
+ """
+ return self.__add_invalid_entry(key_and_mask, 0, 0, 0)
+
+ def add_invalid_application_entry(
self, key_and_mask, core_mask=0, core_shift=0, n_neurons=0):
+ """ Add an entry to the table from an application vertex that doesn't
+ point to anywhere. Used to keep indices in synchronisation between
+ e.g. normal and delay entries and between entries on different
+ cores.
+
+ :param ~pacman.model.routing_info.BaseKeyAndMask key_and_mask:
+ a key_and_mask object used as part of describing
+ an edge that will require being received to be stored in the
+ master pop table; the whole edge will become multiple calls to
+ this function
+ :param int core_mask:
+ Mask for the part of the key that identifies the core
+ :param int core_shift: The shift of the mask to get to the core_mask
+ :param int n_neurons:
+ The number of neurons in each machine vertex (bar the last)
+ :return: The index of the added entry
+ :rtype: int
+ """
+ # If there are too many neurons per core, fail
+ if n_neurons > _MAX_N_NEURONS:
+ raise SynapticConfigurationException(
+ "The parameter n_neurons of {} is too big (maximum {})".format(
+ n_neurons, _MAX_N_NEURONS))
+
+ # If the core mask is too big, fail
+ if core_mask > _MAX_CORE_MASK:
+ raise SynapticConfigurationException(
+ "The core mask of {} is too big (maximum {})".format(
+ core_mask, _MAX_CORE_MASK))
+ return self.__add_invalid_entry(
+ key_and_mask, core_mask, core_shift, n_neurons)
+
+ def __add_invalid_entry(
+ self, key_and_mask, core_mask, core_shift, n_neurons):
""" Add an entry to the table that doesn't point to anywhere. Used
to keep indices in synchronisation between e.g. normal and delay
entries and between entries on different cores.
@@ -521,24 +580,51 @@ def add_invalid_entry(
:return: The index of the added entry
:rtype: int
"""
+ entry = self.__add_entry(
+ key_and_mask, core_mask, core_shift, n_neurons)
+ index = entry.append_invalid()
+ self.__n_addresses += 1
+ return index
+
+ def __add_entry(self, key_and_mask, core_mask, core_shift, n_neurons):
+ if self.__n_addresses >= _MAX_ADDRESS_START:
+ raise SynapticConfigurationException(
+ "The table already contains {} entries;"
+ " adding another is too many".format(self.__n_addresses))
if key_and_mask.key not in self.__entries:
- self.__entries[key_and_mask.key] = _MasterPopEntry(
+ entry = _MasterPopEntry(
key_and_mask.key, key_and_mask.mask, core_mask, core_shift,
n_neurons)
+ self.__entries[key_and_mask.key] = entry
# Need to add an extra "address" for the extra_info if needed
if core_mask != 0:
self.__n_addresses += 1
- index = self.__entries[key_and_mask.key].append_invalid()
- self.__n_addresses += 1
- return index
-
- def finish_master_pop_table(self, spec, master_pop_table_region):
+ return entry
+ entry = self.__entries[key_and_mask.key]
+ if (key_and_mask.mask != entry.mask or
+ core_mask != entry.core_mask or
+ core_shift != entry.core_shift or
+ n_neurons != entry.n_neurons):
+ raise SynapticConfigurationException(
+ "Existing entry for key {} doesn't match one being added:"
+ " Existing mask: {} core_mask: {} core_shift: {}"
+ " n_neurons: {}"
+ " Adding mask: {} core_mask: {} core_shift: {}"
+ " n_neurons: {}".format(
+ key_and_mask.key, entry.mask, entry.core_mask,
+ entry.core_shift, entry.n_neurons, key_and_mask.mask,
+ core_mask, core_shift, n_neurons))
+ return entry
+
+ def finish_master_pop_table(self, spec, region, ref):
""" Complete the master pop table in the data specification.
:param ~data_specification.DataSpecificationGenerator spec:
the data specification to write the master pop entry to
- :param int master_pop_table_region:
+ :param int region:
the region to which the master pop table is being stored
+ :param ref:
+ the reference to use for the region, or None if not referenceable
"""
# sort entries by key
entries = sorted(
@@ -552,9 +638,9 @@ def finish_master_pop_table(self, spec, master_pop_table_region):
n_entries * _MASTER_POP_ENTRY_SIZE_BYTES +
self.__n_addresses * _ADDRESS_LIST_ENTRY_SIZE_BYTES)
spec.reserve_memory_region(
- region=POPULATION_BASED_REGIONS.POPULATION_TABLE.value,
- size=master_pop_table_sz, label='PopTable')
- spec.switch_write_focus(region=master_pop_table_region)
+ region=region, size=master_pop_table_sz, label='PopTable',
+ reference=ref)
+ spec.switch_write_focus(region=region)
# write no master pop entries and the address list size
spec.write_value(n_entries)
diff --git a/spynnaker/pyNN/models/neuron/population_machine_common.py b/spynnaker/pyNN/models/neuron/population_machine_common.py
new file mode 100644
index 0000000000..92ad44cecb
--- /dev/null
+++ b/spynnaker/pyNN/models/neuron/population_machine_common.py
@@ -0,0 +1,159 @@
+# Copyright (c) 2017-2020The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+from collections import namedtuple
+from spinn_utilities.overrides import overrides
+from pacman.model.graphs.machine import MachineVertex
+
+from spinn_front_end_common.interface.provenance import (
+ ProvidesProvenanceDataFromMachineImpl)
+from spinn_front_end_common.interface.buffer_management.buffer_models import (
+ AbstractReceiveBuffersToHost)
+from spinn_front_end_common.utilities.helpful_functions import (
+ locate_memory_region_for_placement)
+from spinn_front_end_common.interface.profiling.profile_utils import (
+ get_profiling_data, reserve_profile_region, write_profile_region_data)
+from spinn_front_end_common.abstract_models import AbstractHasAssociatedBinary
+from spinn_front_end_common.interface.buffer_management\
+ .recording_utilities import (
+ get_recording_header_size, get_recording_header_array)
+from spinn_front_end_common.interface.simulation.simulation_utilities import (
+ get_simulation_header_array)
+
+from spinn_front_end_common.utilities.utility_objs import ExecutableType
+from spinn_front_end_common.interface.profiling import AbstractHasProfileData
+from spinn_front_end_common.utilities.constants import SIMULATION_N_BYTES
+
+
+# Identifiers for common regions
+CommonRegions = namedtuple(
+ "CommonRegions",
+ ["system", "provenance", "profile", "recording"])
+
+
+class PopulationMachineCommon(
+ MachineVertex,
+ ProvidesProvenanceDataFromMachineImpl,
+ AbstractReceiveBuffersToHost,
+ AbstractHasProfileData,
+ AbstractHasAssociatedBinary):
+ """ A common machine vertex for all population binaries
+ """
+
+ __slots__ = [
+ # Resources used by the machine vertex
+ "__resources",
+ # Regions to be used
+ "__regions",
+ # The total number of provenance items returned by this core
+ "__n_provenance_items",
+ # The profile tags to be decoded
+ "__profile_tags",
+ # The name of the binary to run on the core
+ "__binary_file_name"
+ ]
+
+ def __init__(
+ self, label, constraints, app_vertex, vertex_slice, resources,
+ regions, n_provenance_items, profile_tags, binary_file_name):
+ """
+ :param str label: The label of the vertex
+ :param list(~pacman.model.constraints.AbstractConstraint) constraints:
+ Constraints for the vertex
+ :param AbstractPopulationVertex app_vertex:
+ The associated application vertex
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of the population that this implements
+ :param ~pacman.model.resources.ResourceContainer resources:
+ The resources used by the vertex
+ :param .CommonRegions regions: The regions to be assigned
+ :param int n_provenance_items:
+ The number of additional provenance items to be read
+ :param dict(int-->str) profile_tags:
+ A mapping of profile identifiers to names
+ :param str binary_file_name: The name of the binary file
+ """
+ super(PopulationMachineCommon, self).__init__(
+ label, constraints, app_vertex, vertex_slice)
+ self.__resources = resources
+ self.__regions = regions
+ self.__n_provenance_items = n_provenance_items
+ self.__profile_tags = profile_tags
+ self.__binary_file_name = binary_file_name
+
+ @property
+ @overrides(MachineVertex.resources_required)
+ def resources_required(self):
+ return self.__resources
+
+ @property
+ @overrides(ProvidesProvenanceDataFromMachineImpl._provenance_region_id)
+ def _provenance_region_id(self):
+ return self.__regions.provenance
+
+ @property
+ @overrides(ProvidesProvenanceDataFromMachineImpl._n_additional_data_items)
+ def _n_additional_data_items(self):
+ return self.__n_provenance_items
+
+ @overrides(AbstractReceiveBuffersToHost.get_recording_region_base_address)
+ def get_recording_region_base_address(self, txrx, placement):
+ return locate_memory_region_for_placement(
+ placement, self.__regions.recording, txrx)
+
+ @overrides(AbstractHasProfileData.get_profile_data)
+ def get_profile_data(self, transceiver, placement):
+ return get_profiling_data(
+ self.__regions.profile, self.__profile_tags, transceiver,
+ placement)
+
+ @overrides(AbstractHasAssociatedBinary.get_binary_start_type)
+ def get_binary_start_type(self):
+ return ExecutableType.USES_SIMULATION_INTERFACE
+
+ def _write_common_data_spec(self, spec, rec_regions):
+ """ Write the data specification for the common regions
+
+ :param ~data_specification.DataSpecificationGenerator spec:
+ The data specification to write to
+ :param list(int) rec_regions:
+ A list of sizes of each recording region (including empty ones)
+ """
+ # Write the setup region
+ spec.reserve_memory_region(
+ region=self.__regions.system, size=SIMULATION_N_BYTES,
+ label='System')
+ spec.switch_write_focus(self.__regions.system)
+ spec.write_array(get_simulation_header_array(self.__binary_file_name))
+
+ # Reserve memory for provenance
+ self.reserve_provenance_data_region(spec)
+
+ # Write profile data
+ reserve_profile_region(
+ spec, self.__regions.profile, self._app_vertex.n_profile_samples)
+ write_profile_region_data(
+ spec, self.__regions.profile, self._app_vertex.n_profile_samples)
+
+ # Set up for recording
+ spec.reserve_memory_region(
+ region=self.__regions.recording,
+ size=get_recording_header_size(len(rec_regions)),
+ label="Recording")
+ spec.switch_write_focus(self.__regions.recording)
+ spec.write_array(get_recording_header_array(rec_regions))
+
+ @overrides(AbstractHasAssociatedBinary.get_binary_file_name)
+ def get_binary_file_name(self):
+ return self.__binary_file_name
diff --git a/spynnaker/pyNN/models/neuron/population_machine_neurons.py b/spynnaker/pyNN/models/neuron/population_machine_neurons.py
new file mode 100644
index 0000000000..57f2ed2f44
--- /dev/null
+++ b/spynnaker/pyNN/models/neuron/population_machine_neurons.py
@@ -0,0 +1,243 @@
+# Copyright (c) 2017-2020The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+import ctypes
+from collections import namedtuple
+
+from spinn_utilities.abstract_base import abstractproperty, abstractmethod
+from spinn_utilities.overrides import overrides
+
+from spinn_front_end_common.utilities.utility_objs import ProvenanceDataItem
+from spinn_front_end_common.utilities import helpful_functions
+from spinn_front_end_common.utilities.constants import BYTES_PER_WORD
+from spynnaker.pyNN.models.abstract_models import (
+ AbstractReadParametersBeforeSet)
+from spynnaker.pyNN.utilities.constants import SPIKE_PARTITION_ID
+from spynnaker.pyNN.utilities.utility_calls import get_n_bits
+
+
+class NeuronProvenance(ctypes.LittleEndianStructure):
+ """ Provenance items from neuron processing
+ """
+ _fields_ = [
+ # The timer tick at the end of simulation
+ ("current_timer_tick", ctypes.c_uint32),
+ # The number of misses of TDMA time slots
+ ("n_tdma_misses", ctypes.c_uint32),
+ # The earliest send time within any time step
+ ("earliest_send", ctypes.c_uint32),
+ # The latest send time within any time step
+ ("latest_send", ctypes.c_uint32)
+ ]
+
+ N_ITEMS = len(_fields_)
+
+
+# Identifiers for neuron regions
+NeuronRegions = namedtuple(
+ "NeuronRegions",
+ ["neuron_params", "neuron_recording"])
+
+
+class PopulationMachineNeurons(
+ AbstractReadParametersBeforeSet, allow_derivation=True):
+ """ Mix-in for machine vertices that have neurons in them
+ """
+
+ # This MUST stay empty to allow mixing with other things with slots
+ __slots__ = []
+
+ @abstractproperty
+ def _app_vertex(self):
+ """ The application vertex of the machine vertex.
+
+ :note: This is likely to be available via the MachineVertex.
+
+ :rtype: AbstractPopulationVertex
+ """
+
+ @abstractproperty
+ def _vertex_slice(self):
+ """ The slice of the application vertex atoms on this machine vertex.
+
+ :note: This is likely to be available via the MachineVertex.
+
+ :rtype: ~pacman.model.graphs.common.Slice
+ """
+
+ @abstractproperty
+ def _slice_index(self):
+ """ The index of the slice of this vertex in the list of slices
+
+ :rtype: int
+ """
+
+ @abstractproperty
+ def _key(self):
+ """ The key for spikes.
+
+ :rtype: int
+ """
+
+ @abstractmethod
+ def _set_key(self, key):
+ """ Set the key for spikes.
+
+ :note: This is required because this class cannot have any storage.
+
+ :param int key: The key to be set
+ """
+
+ @abstractproperty
+ def _neuron_regions(self):
+ """ The region identifiers for the neuron regions
+
+ :rtype: .NeuronRegions
+ """
+
+ def _parse_neuron_provenance(self, label, names, provenance_data):
+ """ Extract and yield neuron provenance
+
+ :param str label: The label of the node
+ :param list(str) names: The hierarchy of names for the provenance data
+ :param list(int) provenance_data: A list of data items to interpret
+ :return: a list of provenance data items
+ :rtype: iterator of ProvenanceDataItem
+ """
+ neuron_prov = NeuronProvenance(*provenance_data)
+
+ yield ProvenanceDataItem(
+ names + ["Last_timer_tic_the_core_ran_to"],
+ neuron_prov.current_timer_tick)
+ yield self._app_vertex.get_tdma_provenance_item(
+ names, label, neuron_prov.n_tdma_misses)
+ yield ProvenanceDataItem(
+ names + ["Earliest_send_time"], neuron_prov.earliest_send)
+ yield ProvenanceDataItem(
+ names + ["Latest_Send_time"], neuron_prov.latest_send)
+
+ return NeuronProvenance.N_ITEMS
+
+ def _write_neuron_data_spec(self, spec, routing_info, ring_buffer_shifts):
+ """ Write the data specification of the neuron data
+
+ :param ~data_specification.DataSpecificationGenerator spec:
+ The data specification to write to
+ :param ~pacman.model.routing_info.RoutingInfo routing_info:
+ The routing information to read the key from
+ :param list(int) ring_buffer_shifts:
+ The shifts to apply to convert ring buffer values to S1615 values
+ """
+ # Get and store the key
+ self._set_key(routing_info.get_first_key_from_pre_vertex(
+ self, SPIKE_PARTITION_ID))
+
+ # Write the neuron parameters
+ self._write_neuron_parameters(spec, ring_buffer_shifts)
+
+ # Write the neuron recording region
+ neuron_recorder = self._app_vertex.neuron_recorder
+ spec.reserve_memory_region(
+ region=self._neuron_regions.neuron_recording,
+ size=neuron_recorder.get_metadata_sdram_usage_in_bytes(
+ self._vertex_slice),
+ label="neuron recording")
+ neuron_recorder.write_neuron_recording_region(
+ spec, self._neuron_regions.neuron_recording, self._vertex_slice)
+
+ def _write_neuron_parameters(self, spec, ring_buffer_shifts):
+ """ Write the neuron parameters region
+
+ :param ~data_specification.DataSpecificationGenerator spec:
+ The data specification to write to
+ :param list(int) ring_buffer_shifts:
+ The shifts to apply to convert ring buffer values to S1615 values
+ """
+ self._app_vertex.set_has_run()
+
+ # pylint: disable=too-many-arguments
+ n_atoms = self._vertex_slice.n_atoms
+ spec.comment("\nWriting Neuron Parameters for {} Neurons:\n".format(
+ n_atoms))
+
+ # Reserve and switch to the memory region
+ params_size = self._app_vertex.get_sdram_usage_for_neuron_params(
+ self._vertex_slice)
+ spec.reserve_memory_region(
+ region=self._neuron_regions.neuron_params, size=params_size,
+ label='NeuronParams')
+ spec.switch_write_focus(self._neuron_regions.neuron_params)
+
+ # store the tdma data here for this slice.
+ data = self._app_vertex.generate_tdma_data_specification_data(
+ self._slice_index)
+ spec.write_array(data)
+
+ # Write whether the key is to be used, and then the key, or 0 if it
+ # isn't to be used
+ if self._key is None:
+ spec.write_value(data=0)
+ spec.write_value(data=0)
+ else:
+ spec.write_value(data=1)
+ spec.write_value(data=self._key)
+
+ # Write the number of neurons in the block:
+ spec.write_value(data=n_atoms)
+ spec.write_value(data=2**get_n_bits(n_atoms))
+
+ # Write the ring buffer data
+ n_synapse_types = self._app_vertex.neuron_impl.get_n_synapse_types()
+ spec.write_value(n_synapse_types)
+ spec.write_array(ring_buffer_shifts)
+
+ # Write the neuron parameters
+ neuron_data = self._app_vertex.neuron_impl.get_data(
+ self._app_vertex.parameters, self._app_vertex.state_variables,
+ self._vertex_slice)
+ spec.write_array(neuron_data)
+
+ @overrides(AbstractReadParametersBeforeSet.read_parameters_from_machine)
+ def read_parameters_from_machine(
+ self, transceiver, placement, vertex_slice):
+
+ # locate SDRAM address to where the neuron parameters are stored
+ neuron_region_sdram_address = \
+ helpful_functions.locate_memory_region_for_placement(
+ placement, self._neuron_regions.neuron_params,
+ transceiver)
+
+ # shift past the extra stuff before neuron parameters that we don't
+ # need to read
+ neurons_pre_size = (
+ self._app_vertex.tdma_sdram_size_in_bytes +
+ self._app_vertex.BYTES_TILL_START_OF_GLOBAL_PARAMETERS +
+ (self._app_vertex.neuron_impl.get_n_synapse_types() *
+ BYTES_PER_WORD))
+ neuron_parameters_sdram_address = (
+ neuron_region_sdram_address + neurons_pre_size)
+
+ # get size of neuron params
+ size_of_region = self._app_vertex.get_sdram_usage_for_neuron_params(
+ vertex_slice) - neurons_pre_size
+
+ # get data from the machine
+ byte_array = transceiver.read_memory(
+ placement.x, placement.y, neuron_parameters_sdram_address,
+ size_of_region)
+
+ # update python neuron parameters with the data
+ self._app_vertex.neuron_impl.read_data(
+ byte_array, 0, vertex_slice, self._app_vertex.parameters,
+ self._app_vertex.state_variables)
diff --git a/spynnaker/pyNN/models/neuron/population_machine_synapses.py b/spynnaker/pyNN/models/neuron/population_machine_synapses.py
new file mode 100644
index 0000000000..9f36dd5fe0
--- /dev/null
+++ b/spynnaker/pyNN/models/neuron/population_machine_synapses.py
@@ -0,0 +1,299 @@
+# Copyright (c) 2017-2020The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+from collections import namedtuple
+
+from spinn_utilities.overrides import overrides
+from spinn_utilities.abstract_base import abstractproperty
+
+from spinn_front_end_common.utilities.helpful_functions import (
+ locate_memory_region_for_placement)
+from spinn_front_end_common.abstract_models import (
+ AbstractSupportsBitFieldGeneration,
+ AbstractSupportsBitFieldRoutingCompression)
+
+from spynnaker.pyNN.models.neuron.synapse_dynamics import (
+ AbstractSynapseDynamicsStructural)
+from spynnaker.pyNN.utilities.utility_calls import get_n_bits
+from spynnaker.pyNN.utilities import bit_field_utilities
+from spynnaker.pyNN.models.abstract_models import (
+ AbstractSynapseExpandable, HasSynapses)
+
+from .synaptic_matrices import SynapticMatrices
+from .population_machine_synapses_provenance import (
+ PopulationMachineSynapsesProvenance)
+
+# Identifiers for synapse regions
+SYNAPSE_FIELDS = [
+ "synapse_params", "direct_matrix", "pop_table", "synaptic_matrix",
+ "synapse_dynamics", "structural_dynamics", "bitfield_builder",
+ "bitfield_key_map", "bitfield_filter", "connection_builder"]
+SynapseRegions = namedtuple(
+ "SynapseRegions", SYNAPSE_FIELDS)
+
+SynapseReferences = namedtuple(
+ "SynapseReferences",
+ ["direct_matrix_ref", "pop_table_ref", "synaptic_matrix_ref",
+ "bitfield_filter_ref"])
+
+
+class PopulationMachineSynapses(
+ PopulationMachineSynapsesProvenance,
+ AbstractSupportsBitFieldGeneration,
+ AbstractSupportsBitFieldRoutingCompression,
+ AbstractSynapseExpandable,
+ HasSynapses, allow_derivation=True):
+ """ Mix-in for machine vertices that contain synapses
+ """
+
+ # This MUST stay empty to allow mixing with other things with slots
+ __slots__ = []
+
+ @abstractproperty
+ def _app_vertex(self):
+ """ The application vertex of the machine vertex.
+
+ :note: This is likely to be available via the MachineVertex.
+
+ :rtype: AbstractPopulationVertex
+ """
+
+ @abstractproperty
+ def _vertex_slice(self):
+ """ The slice of the application vertex atoms on this machine vertex.
+
+ :note: This is likely to be available via the MachineVertex.
+
+ :rtype: ~pacman.model.graphs.common.Slice
+ """
+
+ @abstractproperty
+ def _synaptic_matrices(self):
+ """ The object holding synaptic matrices.
+
+ :note: This can be created by calling the _create_synaptic_matrices
+ method defined below.
+
+ :rtype: SynapticMatrices
+ """
+
+ @abstractproperty
+ def _synapse_regions(self):
+ """ The identifiers of synaptic regions
+
+ :rtype: .SynapseRegions
+ """
+
+ @property
+ def _synapse_references(self):
+ """ The references to synapse regions. Override to provide these.
+
+ :rtype: .SynapseRegions
+ """
+ return SynapseRegions(*[None for _ in range(len(SYNAPSE_FIELDS))])
+
+ def _create_synaptic_matrices(self, allow_direct=True):
+ """ Creates the synaptic matrices object.
+
+ :note: This is required because this object cannot have any storage
+
+ :rtype: SynapticMatrices
+ """
+ return SynapticMatrices(
+ self._vertex_slice,
+ self._app_vertex.neuron_impl.get_n_synapse_types(),
+ self._app_vertex.all_single_syn_size if allow_direct else 0,
+ self._synapse_regions.synaptic_matrix,
+ self._synapse_regions.direct_matrix,
+ self._synapse_regions.pop_table,
+ self._synapse_regions.connection_builder,
+ self._synapse_references.synaptic_matrix,
+ self._synapse_references.direct_matrix,
+ self._synapse_references.pop_table,
+ self._synapse_references.connection_builder)
+
+ @overrides(AbstractSupportsBitFieldGeneration.bit_field_base_address)
+ def bit_field_base_address(self, transceiver, placement):
+ return locate_memory_region_for_placement(
+ placement=placement, transceiver=transceiver,
+ region=self._synapse_regions.bitfield_filter)
+
+ @overrides(AbstractSupportsBitFieldRoutingCompression.
+ key_to_atom_map_region_base_address)
+ def key_to_atom_map_region_base_address(self, transceiver, placement):
+ return locate_memory_region_for_placement(
+ placement=placement, transceiver=transceiver,
+ region=self._synapse_regions.bitfield_key_map)
+
+ @overrides(AbstractSupportsBitFieldGeneration.bit_field_builder_region)
+ def bit_field_builder_region(self, transceiver, placement):
+ return locate_memory_region_for_placement(
+ placement=placement, transceiver=transceiver,
+ region=self._synapse_regions.bitfield_builder)
+
+ @overrides(AbstractSupportsBitFieldRoutingCompression.
+ regeneratable_sdram_blocks_and_sizes)
+ def regeneratable_sdram_blocks_and_sizes(self, transceiver, placement):
+ synaptic_matrix_base_address = locate_memory_region_for_placement(
+ placement=placement, transceiver=transceiver,
+ region=self._synapse_regions.synaptic_matrix)
+ return [(
+ self._synaptic_matrices.host_generated_block_addr +
+ synaptic_matrix_base_address,
+ self._synaptic_matrices.on_chip_generated_matrix_size)]
+
+ def _write_synapse_data_spec(
+ self, spec, routing_info, ring_buffer_shifts, weight_scales,
+ all_syn_block_sz, structural_sz):
+ """ Write the data specification for the synapse data
+
+ :param ~data_specification.DataSpecificationGenerator spec:
+ The data specification to write to
+ :param ~pacman.model.routing_info.RoutingInfo routing_info:
+ The routing information to read the key from
+ :param list(int) ring_buffer_shifts:
+ The shifts to apply to convert ring buffer values to S1615 values
+ :param list(int) weight_scales:
+ The scaling to apply to weights to store them in the synapses
+ :param int all_syn_block_sz: The maximum size of the synapses in bytes
+ :param int structural_sz: The size of the structural data
+ """
+ # Get incoming projections
+ incoming = self._app_vertex.incoming_projections
+
+ # Write the synapse parameters
+ self._write_synapse_parameters(spec, ring_buffer_shifts)
+
+ # Write the synaptic matrices
+ self._synaptic_matrices.write_synaptic_data(
+ spec, incoming, all_syn_block_sz, weight_scales, routing_info)
+
+ # Write any synapse dynamics
+ synapse_dynamics = self._app_vertex.synapse_dynamics
+ synapse_dynamics_sz = self._app_vertex.get_synapse_dynamics_size(
+ self._vertex_slice)
+ if synapse_dynamics_sz > 0:
+ spec.reserve_memory_region(
+ region=self._synapse_regions.synapse_dynamics,
+ size=synapse_dynamics_sz, label='synapseDynamicsParams',
+ reference=self._synapse_references.synapse_dynamics)
+ synapse_dynamics.write_parameters(
+ spec, self._synapse_regions.synapse_dynamics, weight_scales)
+ elif self._synapse_references.synapse_dynamics is not None:
+ # If there is a reference for this region, we have to create it!
+ spec.reserve_memory_region(
+ region=self._synapse_regions.synapse_dynamics,
+ size=4, label='synapseDynamicsParams',
+ reference=self._synapse_references.synapse_dynamics)
+ if isinstance(synapse_dynamics, AbstractSynapseDynamicsStructural):
+ spec.reserve_memory_region(
+ region=self._synapse_regions.structural_dynamics,
+ size=structural_sz, label='synapseDynamicsStructuralParams',
+ reference=self._synapse_references.structural_dynamics)
+ synapse_dynamics.write_structural_parameters(
+ spec, self._synapse_regions.structural_dynamics,
+ weight_scales, self._app_vertex, self._vertex_slice,
+ routing_info, self._synaptic_matrices)
+ elif self._synapse_references.structural_dynamics is not None:
+ # If there is a reference for this region, we have to create it!
+ spec.reserve_memory_region(
+ region=self._synapse_regions.structural_dynamics,
+ size=4, label='synapseDynamicsStructuralParams',
+ reference=self._synapse_references.structural_dynamics)
+
+ # write up the bitfield builder data
+ # reserve bit field region
+ bit_field_utilities.reserve_bit_field_regions(
+ spec, incoming, self._synapse_regions.bitfield_builder,
+ self._synapse_regions.bitfield_filter,
+ self._synapse_regions.bitfield_key_map,
+ self._synapse_references.bitfield_builder,
+ self._synapse_references.bitfield_filter,
+ self._synapse_references.bitfield_key_map)
+ bit_field_utilities.write_bitfield_init_data(
+ spec, incoming, self._vertex_slice, routing_info,
+ self._synapse_regions.bitfield_builder,
+ self._synapse_regions.pop_table,
+ self._synapse_regions.synaptic_matrix,
+ self._synapse_regions.direct_matrix,
+ self._synapse_regions.bitfield_filter,
+ self._synapse_regions.bitfield_key_map,
+ self._synapse_regions.structural_dynamics,
+ isinstance(synapse_dynamics, AbstractSynapseDynamicsStructural))
+
+ def _write_synapse_parameters(self, spec, ring_buffer_shifts):
+ """ Write the synapse parameters data region
+
+ :param ~data_specification.DataSpecificationGenerator spec:
+ The data specification to write to
+ :param list(int) ring_buffer_shifts:
+ The shifts to apply to convert ring buffer values to S1615 values
+ """
+ # Reserve space
+ spec.reserve_memory_region(
+ region=self._synapse_regions.synapse_params,
+ size=self._app_vertex.get_synapse_params_size(),
+ label='SynapseParams',
+ reference=self._synapse_references.synapse_params)
+
+ # Get values
+ n_neurons = self._vertex_slice.n_atoms
+ n_synapse_types = self._app_vertex.neuron_impl.get_n_synapse_types()
+ max_delay = self._app_vertex.splitter.max_support_delay()
+
+ # Write synapse parameters
+ spec.switch_write_focus(self._synapse_regions.synapse_params)
+ spec.write_value(n_neurons)
+ spec.write_value(n_synapse_types)
+ spec.write_value(get_n_bits(n_neurons))
+ spec.write_value(get_n_bits(n_synapse_types))
+ spec.write_value(get_n_bits(max_delay))
+ spec.write_value(int(self._app_vertex.drop_late_spikes))
+ spec.write_value(self._app_vertex.incoming_spike_buffer_size)
+ spec.write_array(ring_buffer_shifts)
+
+ @overrides(AbstractSynapseExpandable.gen_on_machine)
+ def gen_on_machine(self):
+ return self._synaptic_matrices.gen_on_machine
+
+ @overrides(AbstractSynapseExpandable.read_generated_connection_holders)
+ def read_generated_connection_holders(self, transceiver, placement):
+ self._synaptic_matrices.read_generated_connection_holders(
+ transceiver, placement)
+
+ @property
+ @overrides(AbstractSynapseExpandable.connection_generator_region)
+ def connection_generator_region(self):
+ return self._synapse_regions.connection_builder
+
+ def get_connections_from_machine(
+ self, transceiver, placement, app_edge, synapse_info):
+ """ Get the connections from the machine for this vertex.
+
+ :param ~spinnman.transceiver.Transceiver transceiver:
+ How to read the connection data
+ :param ~pacman.model.placement.Placement placements:
+ Where the connection data is on the machine
+ :param ProjectionApplicationEdge app_edge:
+ The edge for which the data is being read
+ :param SynapseInformation synapse_info:
+ The specific projection within the edge
+ """
+ return self._synaptic_matrices.get_connections_from_machine(
+ transceiver, placement, app_edge, synapse_info)
+
+ def clear_connection_cache(self):
+ """ Flush the cache of connection information; needed for a second run
+ """
+ self._synaptic_matrices.clear_connection_cache()
diff --git a/spynnaker/pyNN/models/neuron/population_machine_synapses_provenance.py b/spynnaker/pyNN/models/neuron/population_machine_synapses_provenance.py
new file mode 100644
index 0000000000..3507362daf
--- /dev/null
+++ b/spynnaker/pyNN/models/neuron/population_machine_synapses_provenance.py
@@ -0,0 +1,121 @@
+# Copyright (c) 2017-2020The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+import ctypes
+from spinn_utilities.abstract_base import abstractproperty
+from spinn_front_end_common.utilities.utility_objs import ProvenanceDataItem
+
+
+class SynapseProvenance(ctypes.LittleEndianStructure):
+ """ Provenance items from synapse processing
+ """
+ _fields_ = [
+ # A count of presynaptic events.
+ ("n_pre_synaptic_events", ctypes.c_uint32),
+ # A count of synaptic saturations.
+ ("n_saturations", ctypes.c_uint32),
+ # The number of STDP weight saturations.
+ ("n_plastic_saturations", ctypes.c_uint32),
+ # The number of searches of the population table that hit nothing
+ ("n_ghost_searches", ctypes.c_uint32),
+ # The number of bitfields that couldn't fit in DTCM
+ ("n_failed_bitfield_reads", ctypes.c_uint32),
+ # The number of population table hits on INVALID entries
+ ("n_invalid_pop_table_hits", ctypes.c_uint32),
+ # The number of spikes that didn't transfer empty rows
+ ("n_filtered_by_bitfield", ctypes.c_uint32)
+ ]
+
+ N_ITEMS = len(_fields_)
+
+
+class PopulationMachineSynapsesProvenance(object):
+ """ Mix-in to add synapse provenance gathering without other synapse things
+ """
+
+ # This MUST stay empty to allow mixing with other things with slots
+ __slots__ = []
+
+ TOTAL_PRE_SYNAPTIC_EVENT_NAME = "Total_pre_synaptic_events"
+ SATURATION_COUNT_NAME = "Times_synaptic_weights_have_saturated"
+ SATURATED_PLASTIC_WEIGHTS_NAME = (
+ "Times_plastic_synaptic_weights_have_saturated")
+ GHOST_SEARCHES = "Number of failed pop table searches"
+ BIT_FIELDS_NOT_READ = "N bit fields not able to be read into DTCM"
+ INVALID_MASTER_POP_HITS = "Invalid Master Pop hits"
+ BIT_FIELD_FILTERED_PACKETS = \
+ "How many packets were filtered by the bitfield filterer."
+
+ @abstractproperty
+ def _app_vertex(self):
+ """ The application vertex of the machine vertex.
+
+ :note: This is likely to be available via the MachineVertex.
+
+ :rtype: AbstractPopulationVertex
+ """
+
+ def _parse_synapse_provenance(self, label, names, provenance_data):
+ """ Extract and yield synapse provenance
+
+ :param str label: The label of the node
+ :param list(str) names: The hierarchy of names for the provenance data
+ :param list(int) provenance_data: A list of data items to interpret
+ :return: a list of provenance data items
+ :rtype: iterator of ProvenanceDataItem
+ """
+ synapse_prov = SynapseProvenance(*provenance_data)
+
+ yield ProvenanceDataItem(
+ names + [self.TOTAL_PRE_SYNAPTIC_EVENT_NAME],
+ synapse_prov.n_pre_synaptic_events)
+ yield ProvenanceDataItem(
+ names + [self.SATURATION_COUNT_NAME],
+ synapse_prov.n_saturations, synapse_prov.n_saturations > 0,
+ f"The weights from the synapses for {label} saturated "
+ f"{synapse_prov.n_saturations} times. If this causes issues you "
+ "can increase the spikes_per_second and / or ring_buffer_sigma "
+ "values located within the .spynnaker.cfg file.")
+ yield ProvenanceDataItem(
+ names + [self.SATURATED_PLASTIC_WEIGHTS_NAME],
+ synapse_prov.n_plastic_saturations,
+ synapse_prov.n_plastic_saturations > 0,
+ f"The weights from the plastic synapses for {label} saturated "
+ f"{synapse_prov.n_plastic_saturations} times. If this causes "
+ "issues increase the spikes_per_second and / or ring_buffer_sigma"
+ " values located within the .spynnaker.cfg file.")
+ yield ProvenanceDataItem(
+ names + [self.GHOST_SEARCHES], synapse_prov.n_ghost_searches,
+ synapse_prov.n_ghost_searches > 0,
+ f"The number of failed population table searches for {label} was "
+ f"{synapse_prov.n_ghost_searches}. If this number is large "
+ "relative to the predicted incoming spike rate, try increasing "
+ " source and target neurons per core")
+ yield ProvenanceDataItem(
+ names + [self.BIT_FIELDS_NOT_READ],
+ synapse_prov.n_failed_bitfield_reads, False,
+ f"On {label}, the filter for stopping redundant DMAs couldn't be "
+ f"fully filled in; it failed to read "
+ f"{synapse_prov.n_failed_bitfield_reads} entries. "
+ "Try reducing neurons per core.")
+ yield ProvenanceDataItem(
+ names + [self.INVALID_MASTER_POP_HITS],
+ synapse_prov.n_invalid_pop_table_hits,
+ synapse_prov.n_invalid_pop_table_hits > 0,
+ f"On {label}, there were {synapse_prov.n_invalid_pop_table_hits} "
+ "keys received that had no master pop entry for them. This is an "
+ "error, which most likely stems from bad routing.")
+ yield ProvenanceDataItem(
+ names + [self.BIT_FIELD_FILTERED_PACKETS],
+ synapse_prov.n_filtered_by_bitfield)
diff --git a/spynnaker/pyNN/models/neuron/population_machine_vertex.py b/spynnaker/pyNN/models/neuron/population_machine_vertex.py
index e4aeb22302..c0a9b8081f 100644
--- a/spynnaker/pyNN/models/neuron/population_machine_vertex.py
+++ b/spynnaker/pyNN/models/neuron/population_machine_vertex.py
@@ -12,88 +12,129 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
+from enum import Enum
+import os
+import ctypes
-from enum import IntEnum
-from spinn_utilities.overrides import overrides
from pacman.executor.injection_decorator import inject_items
-from pacman.model.graphs.machine import MachineVertex
-from spinn_front_end_common.utilities.utility_objs import ProvenanceDataItem
-from spinn_front_end_common.interface.provenance import (
- ProvidesProvenanceDataFromMachineImpl)
-from spinn_front_end_common.interface.buffer_management.buffer_models import (
- AbstractReceiveBuffersToHost)
-from spinn_front_end_common.utilities.helpful_functions import (
- locate_memory_region_for_placement)
+from spinn_utilities.overrides import overrides
from spinn_front_end_common.abstract_models import (
- AbstractHasAssociatedBinary, AbstractSupportsBitFieldGeneration,
- AbstractSupportsBitFieldRoutingCompression,
AbstractGeneratesDataSpecification, AbstractRewritesDataSpecification)
-from spinn_front_end_common.interface.profiling import (
- AbstractHasProfileData, profile_utils)
-from spinn_front_end_common.interface.profiling.profile_utils import (
- get_profiling_data)
-from spinn_front_end_common.utilities.utility_objs import ExecutableType
-from spinn_front_end_common.utilities import (
- constants as common_constants, helpful_functions)
-from spinn_front_end_common.interface.simulation import simulation_utilities
-from spynnaker.pyNN.models.neuron.synapse_dynamics import (
- AbstractSynapseDynamicsStructural)
-from spynnaker.pyNN.utilities import constants, bit_field_utilities
-from spynnaker.pyNN.models.abstract_models import (
- AbstractSynapseExpandable, AbstractReadParametersBeforeSet)
-from spynnaker.pyNN.utilities.constants import POPULATION_BASED_REGIONS
+from spinn_front_end_common.utilities.utility_objs import ProvenanceDataItem
+from .population_machine_common import CommonRegions, PopulationMachineCommon
+from .population_machine_neurons import (
+ NeuronRegions, PopulationMachineNeurons, NeuronProvenance)
+from .population_machine_synapses import (
+ SynapseRegions, PopulationMachineSynapses)
+from .population_machine_synapses_provenance import SynapseProvenance
+
+
+class SpikeProcessingProvenance(ctypes.LittleEndianStructure):
+ _fields_ = [
+ # A count of the times that the synaptic input circular buffers
+ # overflowed
+ ("n_buffer_overflows", ctypes.c_uint32),
+ # The number of DMA transfers done
+ ("n_dmas_complete", ctypes.c_uint32),
+ # The number of spikes successfully processed
+ ("n_spikes_processed", ctypes.c_uint32),
+ # The number of rewirings performed.
+ ("n_rewires", ctypes.c_uint32),
+ # The number of packets that were dropped due to being late
+ ("n_late_packets", ctypes.c_uint32),
+ # The maximum size of the spike input buffer during simulation
+ ("max_size_input_buffer", ctypes.c_uint32)
+ ]
+
+ N_ITEMS = len(_fields_)
+
+
+class MainProvenance(ctypes.LittleEndianStructure):
+ """ Provenance items from synapse processing
+ """
+ _fields_ = [
+ # the maximum number of background tasks queued
+ ("max_background_queued", ctypes.c_uint32),
+ # the number of times the background queue overloaded
+ ("n_background_overloads", ctypes.c_uint32)
+ ]
+
+ N_ITEMS = len(_fields_)
class PopulationMachineVertex(
- MachineVertex, AbstractReceiveBuffersToHost,
- AbstractHasAssociatedBinary, ProvidesProvenanceDataFromMachineImpl,
- AbstractHasProfileData, AbstractSupportsBitFieldGeneration,
- AbstractSupportsBitFieldRoutingCompression,
- AbstractGeneratesDataSpecification, AbstractSynapseExpandable,
- AbstractRewritesDataSpecification, AbstractReadParametersBeforeSet):
+ PopulationMachineCommon,
+ PopulationMachineNeurons,
+ PopulationMachineSynapses,
+ AbstractGeneratesDataSpecification,
+ AbstractRewritesDataSpecification):
+ """ A machine vertex for PyNN Populations
+ """
__slots__ = [
- "__binary_file_name",
- "__recorded_region_ids",
- "__resources",
- "__on_chip_generatable_offset",
- "__on_chip_generatable_size",
- "__drop_late_spikes",
- "__change_requires_neuron_parameters_reload"]
-
- class EXTRA_PROVENANCE_DATA_ENTRIES(IntEnum):
- """ Entries for the provenance data generated by standard neuron \
- models.
- """
- #: The number of pre-synaptic events
- PRE_SYNAPTIC_EVENT_COUNT = 0
- #: The number of times the synapse arithmetic saturated
- SATURATION_COUNT = 1
- #: The number of times there was a buffer overflow
- BUFFER_OVERFLOW_COUNT = 2
- #: The current timer tick
- CURRENT_TIMER_TIC = 3
- #: The number of times the plastic synapses saturated during weight
- #: calculation
- PLASTIC_SYNAPTIC_WEIGHT_SATURATION_COUNT = 4
- GHOST_POP_TABLE_SEARCHES = 5
- FAILED_TO_READ_BIT_FIELDS = 6
- DMA_COMPLETES = 7
- SPIKE_PROGRESSING_COUNT = 8
- INVALID_MASTER_POP_HITS = 9
- BIT_FIELD_FILTERED_COUNT = 10
- N_REWIRES = 11
- #: The number of packets that were dropped as they arrived too late
- #: to be processed
- N_LATE_SPIKES = 12
- #: The max filled size of the input buffer
- INPUT_BUFFER_FILLED_SIZE = 13
- #: The number of TDMA misses
- TDMA_MISSES = 14
- # the maxmimum number of background tasks queued
- MAX_BACKGROUND_QUEUED = 15
- # the number of times the background queue overloaded
- N_BACKGROUND_OVERLOADS = 16
+ "__change_requires_neuron_parameters_reload",
+ "__synaptic_matrices",
+ "__key",
+ "__ring_buffer_shifts",
+ "__weight_scales",
+ "__all_syn_block_sz",
+ "__structural_sz",
+ "__slice_index"]
+
+ INPUT_BUFFER_FULL_NAME = "Times_the_input_buffer_lost_packets"
+ DMA_COMPLETE = "DMA's that were completed"
+ SPIKES_PROCESSED = "how many spikes were processed"
+ N_REWIRES_NAME = "Number_of_rewires"
+ N_LATE_SPIKES_NAME = "Number_of_late_spikes"
+ MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME = "Max_filled_size_input_buffer"
+ BACKGROUND_OVERLOADS_NAME = "Times_the_background_queue_overloaded"
+ BACKGROUND_MAX_QUEUED_NAME = "Max_backgrounds_queued"
+
+ class REGIONS(Enum):
+ """Regions for populations."""
+ SYSTEM = 0
+ NEURON_PARAMS = 1
+ SYNAPSE_PARAMS = 2
+ POPULATION_TABLE = 3
+ SYNAPTIC_MATRIX = 4
+ SYNAPSE_DYNAMICS = 5
+ STRUCTURAL_DYNAMICS = 6
+ NEURON_RECORDING = 7
+ PROVENANCE_DATA = 8
+ PROFILING = 9
+ CONNECTOR_BUILDER = 10
+ DIRECT_MATRIX = 11
+ BIT_FIELD_FILTER = 12
+ BIT_FIELD_BUILDER = 13
+ BIT_FIELD_KEY_MAP = 14
+ RECORDING = 15
+
+ # Regions for this vertex used by common parts
+ COMMON_REGIONS = CommonRegions(
+ system=REGIONS.SYSTEM.value,
+ provenance=REGIONS.PROVENANCE_DATA.value,
+ profile=REGIONS.PROFILING.value,
+ recording=REGIONS.RECORDING.value)
+
+ # Regions for this vertex used by neuron parts
+ NEURON_REGIONS = NeuronRegions(
+ neuron_params=REGIONS.NEURON_PARAMS.value,
+ neuron_recording=REGIONS.NEURON_RECORDING.value
+ )
+
+ # Regions for this vertex used by synapse parts
+ SYNAPSE_REGIONS = SynapseRegions(
+ synapse_params=REGIONS.SYNAPSE_PARAMS.value,
+ direct_matrix=REGIONS.DIRECT_MATRIX.value,
+ pop_table=REGIONS.POPULATION_TABLE.value,
+ synaptic_matrix=REGIONS.SYNAPTIC_MATRIX.value,
+ synapse_dynamics=REGIONS.SYNAPSE_DYNAMICS.value,
+ structural_dynamics=REGIONS.STRUCTURAL_DYNAMICS.value,
+ bitfield_builder=REGIONS.BIT_FIELD_BUILDER.value,
+ bitfield_key_map=REGIONS.BIT_FIELD_KEY_MAP.value,
+ bitfield_filter=REGIONS.BIT_FIELD_FILTER.value,
+ connection_builder=REGIONS.CONNECTOR_BUILDER.value
+ )
_PROFILE_TAG_LABELS = {
0: "TIMER",
@@ -102,383 +143,166 @@ class EXTRA_PROVENANCE_DATA_ENTRIES(IntEnum):
3: "PROCESS_FIXED_SYNAPSES",
4: "PROCESS_PLASTIC_SYNAPSES"}
- # x words needed for a bitfield covering 256 atoms
- _WORDS_TO_COVER_256_ATOMS = 8
-
- # provenance data items
- SATURATION_COUNT_NAME = "Times_synaptic_weights_have_saturated"
- INPUT_BUFFER_FULL_NAME = "Times_the_input_buffer_lost_packets"
- TOTAL_PRE_SYNAPTIC_EVENT_NAME = "Total_pre_synaptic_events"
- LAST_TIMER_TICK_NAME = "Last_timer_tic_the_core_ran_to"
- N_RE_WIRES_NAME = "Number_of_rewires"
- SATURATED_PLASTIC_WEIGHTS_NAME = (
- "Times_plastic_synaptic_weights_have_saturated")
- _N_LATE_SPIKES_NAME = "Number_of_late_spikes"
- _MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME = "Max_filled_size_input_buffer"
- _BACKGROUND_OVERLOADS_NAME = "Times_the_background_queue_overloaded"
- _BACKGROUND_MAX_QUEUED_NAME = "Max_backgrounds_queued"
- BIT_FIELD_FILTERED_PACKETS = (
- "How many packets were filtered by the bitfield filterer.")
- INVALID_MASTER_POP_HITS = "Invalid Master Pop hits"
- SPIKES_PROCESSED = "how many spikes were processed"
- DMA_COMPLETE = "DMA's that were completed"
- BIT_FIELDS_NOT_READ = "N bit fields not able to be read into DTCM"
- GHOST_SEARCHES = "Number of failed pop table searches"
- PLASTIC_WEIGHT_SATURATION = "Times_plastic_synaptic_weights_have_saturated"
- LAST_TIMER_TICK = "Last_timer_tic_the_core_ran_to"
- TOTAL_PRE_SYNAPTIC_EVENTS = "Total_pre_synaptic_events"
- LOST_INPUT_BUFFER_PACKETS = "Times_the_input_buffer_lost_packets"
-
- N_ADDITIONAL_PROVENANCE_DATA_ITEMS = len(EXTRA_PROVENANCE_DATA_ENTRIES)
-
def __init__(
- self, resources_required, recorded_region_ids, label, constraints,
- app_vertex, vertex_slice, drop_late_spikes, binary_file_name):
+ self, resources_required, label, constraints, app_vertex,
+ vertex_slice, slice_index, ring_buffer_shifts, weight_scales,
+ all_syn_block_sz, structural_sz):
"""
:param ~pacman.model.resources.ResourceContainer resources_required:
- :param iterable(int) recorded_region_ids:
- :param str label:
- :param bool drop_late_spikes: control flag for dropping packets.
+ The resources used by the vertex
+ :param str label: The label of the vertex
:param list(~pacman.model.constraints.AbstractConstraint) constraints:
+ Constraints for the vertex
:param AbstractPopulationVertex app_vertex:
The associated application vertex
:param ~pacman.model.graphs.common.Slice vertex_slice:
The slice of the population that this implements
- :param str binary_file_name: binary name to be run for this verte
+ :param int slice_index:
+ The index of the slice in the ordered list of slices
+ :param list(int) ring_buffer_shifts:
+ The shifts to apply to convert ring buffer values to S1615 values
+ :param list(int) weight_scales:
+ The scaling to apply to weights to store them in the synapses
+ :param int all_syn_block_sz: The maximum size of the synapses in bytes
+ :param int structural_sz: The size of the structural data
"""
- super().__init__(label, constraints, app_vertex, vertex_slice)
- self.__binary_file_name = binary_file_name
- self.__recorded_region_ids = recorded_region_ids
- self.__resources = resources_required
- self.__drop_late_spikes = drop_late_spikes
- self.__on_chip_generatable_offset = None
- self.__on_chip_generatable_size = None
+ super(PopulationMachineVertex, self).__init__(
+ label, constraints, app_vertex, vertex_slice, resources_required,
+ self.COMMON_REGIONS,
+ NeuronProvenance.N_ITEMS + SynapseProvenance.N_ITEMS +
+ SpikeProcessingProvenance.N_ITEMS + MainProvenance.N_ITEMS,
+ self._PROFILE_TAG_LABELS, self.__get_binary_file_name(app_vertex))
+ self.__key = None
+ self.__synaptic_matrices = self._create_synaptic_matrices()
self.__change_requires_neuron_parameters_reload = False
-
- def set_on_chip_generatable_area(self, offset, size):
- self.__on_chip_generatable_offset = offset
- self.__on_chip_generatable_size = size
-
- @overrides(AbstractSupportsBitFieldGeneration.bit_field_base_address)
- def bit_field_base_address(self, transceiver, placement):
- return locate_memory_region_for_placement(
- placement=placement, transceiver=transceiver,
- region=POPULATION_BASED_REGIONS.BIT_FIELD_FILTER.value)
-
- @overrides(AbstractSupportsBitFieldRoutingCompression.
- key_to_atom_map_region_base_address)
- def key_to_atom_map_region_base_address(self, transceiver, placement):
- return locate_memory_region_for_placement(
- placement=placement, transceiver=transceiver,
- region=POPULATION_BASED_REGIONS.BIT_FIELD_KEY_MAP.value)
-
- @overrides(AbstractSupportsBitFieldGeneration.bit_field_builder_region)
- def bit_field_builder_region(self, transceiver, placement):
- return locate_memory_region_for_placement(
- placement=placement, transceiver=transceiver,
- region=POPULATION_BASED_REGIONS.BIT_FIELD_BUILDER.value)
-
- @overrides(AbstractSupportsBitFieldRoutingCompression.
- regeneratable_sdram_blocks_and_sizes)
- def regeneratable_sdram_blocks_and_sizes(self, transceiver, placement):
- synaptic_matrix_base_address = locate_memory_region_for_placement(
- placement=placement, transceiver=transceiver,
- region=POPULATION_BASED_REGIONS.SYNAPTIC_MATRIX.value)
- return [(
- self.__on_chip_generatable_offset + synaptic_matrix_base_address,
- self.__on_chip_generatable_size)]
+ self.__slice_index = slice_index
+ self.__ring_buffer_shifts = ring_buffer_shifts
+ self.__weight_scales = weight_scales
+ self.__all_syn_block_sz = all_syn_block_sz
+ self.__structural_sz = structural_sz
@property
- @overrides(MachineVertex.resources_required)
- def resources_required(self):
- return self.__resources
+ @overrides(PopulationMachineNeurons._slice_index)
+ def _slice_index(self):
+ return self.__slice_index
@property
- @overrides(ProvidesProvenanceDataFromMachineImpl._provenance_region_id)
- def _provenance_region_id(self):
- return POPULATION_BASED_REGIONS.PROVENANCE_DATA.value
+ @overrides(PopulationMachineNeurons._key)
+ def _key(self):
+ return self.__key
+
+ @overrides(PopulationMachineNeurons._set_key)
+ def _set_key(self, key):
+ self.__key = key
@property
- @overrides(ProvidesProvenanceDataFromMachineImpl._n_additional_data_items)
- def _n_additional_data_items(self):
- return len(self.EXTRA_PROVENANCE_DATA_ENTRIES)
-
- @overrides(ProvidesProvenanceDataFromMachineImpl.
- get_provenance_data_from_machine)
- def get_provenance_data_from_machine(self, transceiver, placement):
- provenance_data = self._read_provenance_data(transceiver, placement)
- label, names = self._get_provenance_placement_description(placement)
-
- # This is why we have to override the superclass public method
- tic_overruns = 0
- for item in self.parse_system_provenance_items(
- label, names, provenance_data):
- yield item
- if item.names[-1] == self._TIMER_TICK_OVERRUN:
- # GOTCHA!
- tic_overruns = item.value
-
- # translate into provenance data items
- yield from self.__parse_prov_items(
- label, names, self._get_extra_provenance_words(provenance_data),
- tic_overruns)
-
- def __parse_prov_items(self, label, names, provenance_data, tic_overruns):
- # Would be parse_extra_provenance_items except for extra argument
- """
- :param str label:
- :param list(str) names:
- :param list(int) provenance_data:
- :param int tic_overruns:
- :rtype: iterable(ProvenanceDataItem)
- """
- (n_pre_synaptic_events, n_saturations, n_buffer_overflows,
- last_timer_tick, n_plastic_saturations, n_ghost_searches,
- n_bitfield_fails, dma_completes, spike_processing_count,
- invalid_master_pop_hits, n_packets_filtered, n_rewires,
- n_late_packets, input_buffer_max, tdma_misses, max_bg_queued,
- n_bg_overloads) = provenance_data
-
- # translate into provenance data items
- yield ProvenanceDataItem(
- names + [self.SATURATION_COUNT_NAME],
- n_saturations, (n_saturations > 0),
- f"The weights from the synapses for {label} saturated "
- f"{n_saturations} times. If this causes issues you can increase "
- "the spikes_per_second and / or ring_buffer_sigma values located "
- "within the .spynnaker.cfg file.")
- yield ProvenanceDataItem(
- names + [self.INPUT_BUFFER_FULL_NAME],
- n_buffer_overflows, (n_buffer_overflows > 0),
- f"The input buffer for {label} lost packets on "
- f"{n_buffer_overflows} occasions. This is often a sign that the "
- "system is running too quickly for the number of neurons per "
- "core. Please increase the timer_tic or time_scale_factor or "
- "decrease the number of neurons per core.")
- yield ProvenanceDataItem(
- names + [self.TOTAL_PRE_SYNAPTIC_EVENT_NAME],
- n_pre_synaptic_events)
- yield ProvenanceDataItem(
- names + [self.LAST_TIMER_TICK_NAME], last_timer_tick)
- yield ProvenanceDataItem(
- names + [self.SATURATED_PLASTIC_WEIGHTS_NAME],
- n_plastic_saturations, (n_plastic_saturations > 0),
- f"The weights from the plastic synapses for {label} saturated "
- f"{n_plastic_saturations} times. If this causes issue increase "
- "the spikes_per_second and / or ring_buffer_sigma values located "
- "within the .spynnaker.cfg file.")
- yield ProvenanceDataItem(
- names + [self.N_RE_WIRES_NAME], n_rewires)
- yield ProvenanceDataItem(
- names + [self.GHOST_SEARCHES], n_ghost_searches,
- (n_ghost_searches > 0),
- f"The number of failed population table searches for {label} was "
- f"{n_ghost_searches}. If this number is large relative to the "
- "predicted incoming spike rate, try increasing source and target "
- "neurons per core")
- yield ProvenanceDataItem(
- names + [self.BIT_FIELDS_NOT_READ],
- n_bitfield_fails, False,
- f"On {label}, the filter for stopping redundant DMAs couldn't be "
- f"fully filled in; it failed to read {n_bitfield_fails} entries, "
- "which means it required a max of "
- f"{n_bitfield_fails * self._WORDS_TO_COVER_256_ATOMS} "
- "extra bytes of DTCM (assuming cores have at most 255 neurons). "
- "Try reducing neurons per core, or size of buffers, or neuron "
- "params per neuron, etc.")
- yield ProvenanceDataItem(
- names + [self.DMA_COMPLETE], dma_completes)
- yield ProvenanceDataItem(
- names + [self.SPIKES_PROCESSED],
- spike_processing_count)
- yield ProvenanceDataItem(
- names + [self.INVALID_MASTER_POP_HITS],
- invalid_master_pop_hits, (invalid_master_pop_hits > 0),
- f"On {label}, there were {invalid_master_pop_hits} keys received "
- "that had no master pop entry for them. This is an error, which "
- "most likely stems from bad routing.")
- yield ProvenanceDataItem(
- names + [self.BIT_FIELD_FILTERED_PACKETS],
- n_packets_filtered, (n_packets_filtered > 0 and (
- n_buffer_overflows > 0 or tic_overruns > 0)),
- f"On {label}, there were {n_packets_filtered} packets received "
- "that were filtered by the bit-field filterer on the core. These "
- "packets were having to be stored and processed on core, which "
- "means the core may not be running as efficiently as it should. "
- "Please adjust the network or the mapping so that these packets "
- "are filtered in the router to improve performance.")
+ @overrides(PopulationMachineNeurons._neuron_regions)
+ def _neuron_regions(self):
+ return self.NEURON_REGIONS
- late_message = (
- f"On {label}, {n_late_packets} packets were dropped from the "
- "input buffer, because they arrived too late to be processed in "
- "a given time step. Try increasing the time_scale_factor located "
- "within the .spynnaker.cfg file or in the pynn.setup() method."
- if self.__drop_late_spikes else
- f"On {label}, {n_late_packets} packets arrived too late to be "
- "processed in a given time step. Try increasing the "
- "time_scale_factor located within the .spynnaker.cfg file or in "
- "the pynn.setup() method.")
- yield ProvenanceDataItem(
- names + [self._N_LATE_SPIKES_NAME],
- n_late_packets, (n_late_packets > 0), late_message)
+ @property
+ @overrides(PopulationMachineSynapses._synapse_regions)
+ def _synapse_regions(self):
+ return self.SYNAPSE_REGIONS
- yield ProvenanceDataItem(
- names + [self._MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME],
- input_buffer_max, report=False)
+ @property
+ @overrides(PopulationMachineSynapses._synaptic_matrices)
+ def _synaptic_matrices(self):
+ return self.__synaptic_matrices
- yield self._app_vertex.get_tdma_provenance_item(
- names, label, tdma_misses)
+ @staticmethod
+ def __get_binary_file_name(app_vertex):
+ """ Get the local binary filename for this vertex. Static because at
+ the time this is needed, the local app_vertex is not set.
+ :param AbstractPopulationVertex app_vertex:
+ The associated application vertex
+ :rtype: str
+ """
+ # Split binary name into title and extension
+ name, ext = os.path.splitext(app_vertex.neuron_impl.binary_name)
+
+ # Reunite title and extension and return
+ return name + app_vertex.synapse_executable_suffix + ext
+
+ @overrides(PopulationMachineCommon.parse_extra_provenance_items)
+ def parse_extra_provenance_items(self, label, names, provenance_data):
+ syn_offset = NeuronProvenance.N_ITEMS
+ proc_offset = syn_offset + SynapseProvenance.N_ITEMS
+ end_proc_offset = proc_offset + SpikeProcessingProvenance.N_ITEMS
+ yield from self._parse_neuron_provenance(
+ label, names, provenance_data[:NeuronProvenance.N_ITEMS])
+ yield from self._parse_synapse_provenance(
+ label, names, provenance_data[syn_offset:proc_offset])
+ yield from self._parse_spike_processing_provenance(
+ label, names, provenance_data[proc_offset:end_proc_offset])
+
+ main_prov = MainProvenance(*provenance_data[-MainProvenance.N_ITEMS:])
yield ProvenanceDataItem(
- names + [self._BACKGROUND_MAX_QUEUED_NAME],
- max_bg_queued, (max_bg_queued > 1),
- f"On {label}, a maximum of {max_bg_queued} background tasks were "
- "queued, which can indicate a core overloading. Try increasing "
- "the time_scale_factor located within the .spynnaker.cfg file or "
- "in the pynn.setup() method.")
+ names + [self.BACKGROUND_MAX_QUEUED_NAME],
+ main_prov.max_background_queued,
+ main_prov.max_background_queued > 1,
+ f"A maximum of {main_prov.max_background_queued} background"
+ f" tasks were queued on {label}. Try increasing the"
+ " time_scale_factor located within the .spynnaker.cfg file or"
+ " in the pynn.setup() method.")
yield ProvenanceDataItem(
- names + [self._BACKGROUND_OVERLOADS_NAME],
- n_bg_overloads, (n_bg_overloads > 0),
- f"On {label}, the background queue overloaded {n_bg_overloads} "
- "times, which can indicate a core overloading. Try increasing "
- "the time_scale_factor located within the .spynnaker.cfg file or "
- "in the pynn.setup() method.")
-
- @overrides(AbstractReceiveBuffersToHost.get_recorded_region_ids)
+ names + [self.BACKGROUND_OVERLOADS_NAME],
+ main_prov.n_background_overloads,
+ main_prov.n_background_overloads > 0,
+ "The background queue overloaded "
+ f"{main_prov.n_background_overloads} times on {label}."
+ " Try increasing the time_scale_factor located within"
+ " the .spynnaker.cfg file or in the pynn.setup() method.")
+
+ @overrides(PopulationMachineCommon.get_recorded_region_ids)
def get_recorded_region_ids(self):
- return self.__recorded_region_ids
-
- @overrides(AbstractReceiveBuffersToHost.get_recording_region_base_address)
- def get_recording_region_base_address(self, txrx, placement):
- return locate_memory_region_for_placement(
- placement, POPULATION_BASED_REGIONS.NEURON_RECORDING.value, txrx)
-
- @overrides(AbstractHasProfileData.get_profile_data)
- def get_profile_data(self, transceiver, placement):
- return get_profiling_data(
- POPULATION_BASED_REGIONS.PROFILING.value,
- self._PROFILE_TAG_LABELS, transceiver, placement)
-
- @overrides(AbstractHasAssociatedBinary.get_binary_file_name)
- def get_binary_file_name(self):
- return self.__binary_file_name
-
- @overrides(AbstractHasAssociatedBinary.get_binary_start_type)
- def get_binary_start_type(self):
- return ExecutableType.USES_SIMULATION_INTERFACE
+ ids = self._app_vertex.neuron_recorder.recorded_ids_by_slice(
+ self.vertex_slice)
+ ids.extend(self._app_vertex.synapse_recorder.recorded_ids_by_slice(
+ self.vertex_slice))
+ return ids
@inject_items({
- "application_graph": "MemoryApplicationGraph",
- "machine_graph": "MemoryMachineGraph",
"routing_info": "MemoryRoutingInfos",
- "data_n_time_steps": "DataNTimeSteps",
- "n_key_map": "MemoryMachinePartitionNKeysMap"
+ "data_n_time_steps": "DataNTimeSteps"
})
@overrides(
AbstractGeneratesDataSpecification.generate_data_specification,
additional_arguments={
- "application_graph", "machine_graph", "routing_info",
- "data_n_time_steps", "n_key_map"
+ "routing_info", "data_n_time_steps"
})
def generate_data_specification(
- self, spec, placement, application_graph, machine_graph,
- routing_info, data_n_time_steps, n_key_map):
+ self, spec, placement, routing_info, data_n_time_steps):
"""
- :param application_graph: (injected)
- :param machine_graph: (injected)
:param routing_info: (injected)
:param data_n_time_steps: (injected)
- :param n_key_map: (injected)
"""
- # pylint: disable=too-many-arguments, arguments-differ
-
- spec.comment("\n*** Spec for block of {} neurons ***\n".format(
- self._app_vertex.neuron_impl.model_name))
-
- # Reserve memory regions
- self._reserve_memory_regions(spec, machine_graph, n_key_map)
-
- # Declare random number generators and distributions:
- # TODO add random distribution stuff
- # self.write_random_distribution_declarations(spec)
-
- # Get the key
- key = routing_info.get_first_key_from_pre_vertex(
- self, constants.SPIKE_PARTITION_ID)
-
- # Write the setup region
- spec.switch_write_focus(POPULATION_BASED_REGIONS.SYSTEM.value)
- spec.write_array(simulation_utilities.get_simulation_header_array(
- self.__binary_file_name))
-
- # If the dynamics are structural the neuron recorder needs to know
- # the maximum rewires that could happend per timestep
- s_dynamics = self._app_vertex.synapse_manager.synapse_dynamics
- if isinstance(s_dynamics, AbstractSynapseDynamicsStructural):
- max_rewires_per_ts = s_dynamics.get_max_rewires_per_ts()
- self._app_vertex.neuron_recorder.set_max_rewires_per_ts(
- max_rewires_per_ts)
-
- # Write the neuron recording region
- self._app_vertex.neuron_recorder.write_neuron_recording_region(
- spec, POPULATION_BASED_REGIONS.NEURON_RECORDING.value,
+ # pylint: disable=arguments-differ
+ rec_regions = self._app_vertex.neuron_recorder.get_region_sizes(
self.vertex_slice, data_n_time_steps)
+ rec_regions.extend(self._app_vertex.synapse_recorder.get_region_sizes(
+ self.vertex_slice, data_n_time_steps))
+ self._write_common_data_spec(spec, rec_regions)
+
+ self._write_neuron_data_spec(
+ spec, routing_info, self.__ring_buffer_shifts)
- # Write the neuron parameters
- self._write_neuron_parameters(
- spec, key, constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value)
-
- # write profile data
- profile_utils.write_profile_region_data(
- spec, POPULATION_BASED_REGIONS.PROFILING.value,
- self._app_vertex.n_profile_samples)
-
- # Get the weight_scale value from the appropriate location
- weight_scale = self._app_vertex.neuron_impl.get_global_weight_scale()
-
- # allow the synaptic matrix to write its data spec-able data
- self._app_vertex.synapse_manager.write_data_spec(
- spec, self._app_vertex, self.vertex_slice, self, machine_graph,
- application_graph, routing_info, weight_scale)
- self.set_on_chip_generatable_area(
- self._app_vertex.synapse_manager.host_written_matrix_size(
- self.vertex_slice),
- self._app_vertex.synapse_manager.on_chip_written_matrix_size(
- self.vertex_slice))
-
- # write up the bitfield builder data
- bit_field_utilities.write_bitfield_init_data(
- spec, self, machine_graph, routing_info,
- n_key_map, POPULATION_BASED_REGIONS.BIT_FIELD_BUILDER.value,
- POPULATION_BASED_REGIONS.POPULATION_TABLE.value,
- POPULATION_BASED_REGIONS.SYNAPTIC_MATRIX.value,
- POPULATION_BASED_REGIONS.DIRECT_MATRIX.value,
- POPULATION_BASED_REGIONS.BIT_FIELD_FILTER.value,
- POPULATION_BASED_REGIONS.BIT_FIELD_KEY_MAP.value,
- POPULATION_BASED_REGIONS.STRUCTURAL_DYNAMICS.value,
- isinstance(
- self._app_vertex.synapse_manager.synapse_dynamics,
- AbstractSynapseDynamicsStructural))
+ self._write_synapse_data_spec(
+ spec, routing_info, self.__ring_buffer_shifts,
+ self.__weight_scales, self.__all_syn_block_sz,
+ self.__structural_sz)
# End the writing of this specification:
spec.end_specification()
- @inject_items({"routing_info": "MemoryRoutingInfos"})
@overrides(
- AbstractRewritesDataSpecification.regenerate_data_specification,
- additional_arguments={"routing_info"})
- def regenerate_data_specification(self, spec, placement, routing_info):
+ AbstractRewritesDataSpecification.regenerate_data_specification)
+ def regenerate_data_specification(self, spec, placement):
# pylint: disable=too-many-arguments, arguments-differ
- # reserve the neuron parameters data region
- self._reserve_neuron_params_data_region(spec)
-
# write the neuron params into the new DSG region
- self._write_neuron_parameters(
- key=routing_info.get_first_key_from_pre_vertex(
- self, constants.SPIKE_PARTITION_ID),
- spec=spec,
- region_id=constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value)
+ self._write_neuron_parameters(spec, self.__ring_buffer_shifts)
# close spec
spec.end_specification()
@@ -491,143 +315,50 @@ def reload_required(self):
def set_reload_required(self, new_value):
self.__change_requires_neuron_parameters_reload = new_value
- def _reserve_memory_regions(self, spec, machine_graph, n_key_map):
- """ Reserve the DSG data regions.
+ def _parse_spike_processing_provenance(
+ self, label, names, provenance_data):
+ """ Extract and yield spike processing provenance
- :param ~.DataSpecificationGenerator spec:
- the spec to write the DSG region to
- :param ~.MachineGraph machine_graph: machine graph
- :param n_key_map: n key map
- :return: None
+ :param str label: The label of the node
+ :param list(str) names: The hierarchy of names for the provenance data
+ :param list(int) provenance_data: A list of data items to interpret
+ :return: a list of provenance data items
+ :rtype: iterator of ProvenanceDataItem
"""
- spec.comment("\nReserving memory space for data regions:\n\n")
-
- # Reserve memory:
- spec.reserve_memory_region(
- region=POPULATION_BASED_REGIONS.SYSTEM.value,
- size=common_constants.SIMULATION_N_BYTES,
- label='System')
-
- self._reserve_neuron_params_data_region(spec)
-
- spec.reserve_memory_region(
- region=POPULATION_BASED_REGIONS.NEURON_RECORDING.value,
- size=self._app_vertex.neuron_recorder.get_exact_static_sdram_usage(
- self.vertex_slice),
- label="neuron recording")
-
- profile_utils.reserve_profile_region(
- spec, POPULATION_BASED_REGIONS.PROFILING.value,
- self._app_vertex.n_profile_samples)
-
- # reserve bit field region
- bit_field_utilities.reserve_bit_field_regions(
- spec, machine_graph, n_key_map, self,
- POPULATION_BASED_REGIONS.BIT_FIELD_BUILDER.value,
- POPULATION_BASED_REGIONS.BIT_FIELD_FILTER.value,
- POPULATION_BASED_REGIONS.BIT_FIELD_KEY_MAP.value)
-
- self.reserve_provenance_data_region(spec)
-
- @staticmethod
- def neuron_region_sdram_address(placement, transceiver):
- return helpful_functions.locate_memory_region_for_placement(
- placement, POPULATION_BASED_REGIONS.NEURON_PARAMS.value,
- transceiver)
-
- def _reserve_neuron_params_data_region(self, spec):
- """ Reserve the neuron parameter data region.
-
- :param ~data_specification.DataSpecificationGenerator spec:
- the spec to write the DSG region to
- :return: None
- """
- params_size = self._app_vertex.get_sdram_usage_for_neuron_params(
- self.vertex_slice)
- spec.reserve_memory_region(
- region=POPULATION_BASED_REGIONS.NEURON_PARAMS.value,
- size=params_size, label='NeuronParams')
+ prov = SpikeProcessingProvenance(*provenance_data)
- def _write_neuron_parameters(self, spec, key, region_id):
-
- self._app_vertex.set_has_run()
-
- # pylint: disable=too-many-arguments
- n_atoms = self.vertex_slice.n_atoms
- spec.comment("\nWriting Neuron Parameters for {} Neurons:\n".format(
- n_atoms))
-
- # Set the focus to the memory region:
- spec.switch_write_focus(region_id)
-
- # store the tdma data here for this slice.
- data = self._app_vertex.generate_tdma_data_specification_data(
- self._app_vertex.vertex_slices.index(self.vertex_slice))
- spec.write_array(data)
-
- # Write whether the key is to be used, and then the key, or 0 if it
- # isn't to be used
- if key is None:
- spec.write_value(data=0)
- spec.write_value(data=0)
- else:
- spec.write_value(data=1)
- spec.write_value(data=key)
-
- # Write the number of neurons in the block:
- spec.write_value(data=n_atoms)
-
- # Write the number of synapse types
- spec.write_value(
- data=self._app_vertex.neuron_impl.get_n_synapse_types())
-
- # Write the size of the incoming spike buffer
- spec.write_value(data=self._app_vertex.incoming_spike_buffer_size)
-
- # Write the neuron parameters
- neuron_data = self._app_vertex.neuron_impl.get_data(
- self._app_vertex.parameters, self._app_vertex.state_variables,
- self.vertex_slice)
- spec.write_array(neuron_data)
+ yield ProvenanceDataItem(
+ names + [self.INPUT_BUFFER_FULL_NAME],
+ prov.n_buffer_overflows,
+ prov.n_buffer_overflows > 0,
+ f"The input buffer for {label} lost packets on "
+ f"{prov.n_buffer_overflows} occasions. This is often a "
+ "sign that the system is running too quickly for the number of "
+ "neurons per core. Please increase the timer_tic or"
+ " time_scale_factor or decrease the number of neurons per core.")
+ yield ProvenanceDataItem(
+ names + [self.DMA_COMPLETE], prov.n_dmas_complete)
+ yield ProvenanceDataItem(
+ names + [self.SPIKES_PROCESSED],
+ prov.n_spikes_processed)
+ yield ProvenanceDataItem(
+ names + [self.N_REWIRES_NAME], prov.n_rewires)
- @overrides(AbstractSynapseExpandable.gen_on_machine)
- def gen_on_machine(self):
- return self.app_vertex.synapse_manager.gen_on_machine(
- self.vertex_slice)
+ late_message = (
+ f"On {label}, {prov.n_late_packets} packets were dropped "
+ "from the input buffer, because they arrived too late to be "
+ "processed in a given time step. Try increasing the "
+ "time_scale_factor located within the .spynnaker.cfg file or in "
+ "the pynn.setup() method."
+ if self._app_vertex.drop_late_spikes else
+ f"On {label}, {prov.n_late_packets} packets arrived too "
+ "late to be processed in a given time step. Try increasing the "
+ "time_scale_factor located within the .spynnaker.cfg file or in "
+ "the pynn.setup() method.")
+ yield ProvenanceDataItem(
+ names + [self.N_LATE_SPIKES_NAME], prov.n_late_packets,
+ prov.n_late_packets > 0, late_message)
- @overrides(AbstractSynapseExpandable.read_generated_connection_holders)
- def read_generated_connection_holders(self, transceiver, placement):
- self._app_vertex.synapse_manager.read_generated_connection_holders(
- transceiver, placement)
-
- @overrides(AbstractReadParametersBeforeSet.read_parameters_from_machine)
- def read_parameters_from_machine(
- self, transceiver, placement, vertex_slice):
-
- # locate SDRAM address to where the neuron parameters are stored
- neuron_region_sdram_address = self.neuron_region_sdram_address(
- placement, transceiver)
-
- # shift past the extra stuff before neuron parameters that we don't
- # need to read
- neuron_parameters_sdram_address = (
- neuron_region_sdram_address +
- self._app_vertex.tdma_sdram_size_in_bytes +
- self._app_vertex.BYTES_TILL_START_OF_GLOBAL_PARAMETERS)
-
- # get size of neuron params
- size_of_region = self._app_vertex.get_sdram_usage_for_neuron_params(
- vertex_slice)
- size_of_region -= (
- self._app_vertex.BYTES_TILL_START_OF_GLOBAL_PARAMETERS +
- self._app_vertex.tdma_sdram_size_in_bytes)
-
- # get data from the machine
- byte_array = transceiver.read_memory(
- placement.x, placement.y, neuron_parameters_sdram_address,
- size_of_region)
-
- # update python neuron parameters with the data
- self._app_vertex.neuron_impl.read_data(
- byte_array, 0, vertex_slice, self._app_vertex.parameters,
- self._app_vertex.state_variables)
+ yield ProvenanceDataItem(
+ names + [self.MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME],
+ prov.max_size_input_buffer, report=False)
diff --git a/spynnaker/pyNN/models/neuron/population_neurons_machine_vertex.py b/spynnaker/pyNN/models/neuron/population_neurons_machine_vertex.py
new file mode 100644
index 0000000000..3df3f0d1a0
--- /dev/null
+++ b/spynnaker/pyNN/models/neuron/population_neurons_machine_vertex.py
@@ -0,0 +1,286 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+from enum import Enum
+import os
+import ctypes
+
+from pacman.executor.injection_decorator import inject_items
+from spinn_utilities.overrides import overrides
+from spinn_front_end_common.abstract_models import (
+ AbstractGeneratesDataSpecification, AbstractRewritesDataSpecification)
+from spinn_front_end_common.utilities.constants import BYTES_PER_WORD
+from spinn_front_end_common.utilities.utility_objs import ProvenanceDataItem
+from spynnaker.pyNN.exceptions import SynapticConfigurationException
+from spynnaker.pyNN.models.abstract_models import (
+ ReceivesSynapticInputsOverSDRAM, SendsSynapticInputsOverSDRAM)
+from spynnaker.pyNN.utilities.utility_calls import get_n_bits
+from .population_machine_common import CommonRegions, PopulationMachineCommon
+from .population_machine_neurons import (
+ NeuronRegions, PopulationMachineNeurons, NeuronProvenance)
+
+# Size of SDRAM params = 1 word for address + 1 word for size
+# + 1 word for n_neurons + 1 word for n_synapse_types
+# + 1 word for number of synapse vertices
+# + 1 word for number of neuron bits needed
+SDRAM_PARAMS_SIZE = 6 * BYTES_PER_WORD
+
+
+class NeuronMainProvenance(ctypes.LittleEndianStructure):
+ """ Provenance items from synapse processing
+ """
+ _fields_ = [
+ # the maximum number of times the timer tick didn't complete in time
+ ("n_timer_overruns", ctypes.c_uint32),
+ ]
+
+ N_ITEMS = len(_fields_)
+
+
+class PopulationNeuronsMachineVertex(
+ PopulationMachineCommon,
+ PopulationMachineNeurons,
+ AbstractGeneratesDataSpecification,
+ AbstractRewritesDataSpecification,
+ ReceivesSynapticInputsOverSDRAM):
+ """ A machine vertex for the Neurons of PyNN Populations
+ """
+
+ __slots__ = [
+ "__change_requires_neuron_parameters_reload",
+ "__key",
+ "__sdram_partition",
+ "__ring_buffer_shifts",
+ "__weight_scales",
+ "__slice_index"]
+
+ class REGIONS(Enum):
+ """Regions for populations."""
+ SYSTEM = 0
+ PROVENANCE_DATA = 1
+ PROFILING = 2
+ RECORDING = 3
+ NEURON_PARAMS = 4
+ NEURON_RECORDING = 5
+ SDRAM_EDGE_PARAMS = 6
+
+ # Regions for this vertex used by common parts
+ COMMON_REGIONS = CommonRegions(
+ system=REGIONS.SYSTEM.value,
+ provenance=REGIONS.PROVENANCE_DATA.value,
+ profile=REGIONS.PROFILING.value,
+ recording=REGIONS.RECORDING.value)
+
+ # Regions for this vertex used by neuron parts
+ NEURON_REGIONS = NeuronRegions(
+ neuron_params=REGIONS.NEURON_PARAMS.value,
+ neuron_recording=REGIONS.NEURON_RECORDING.value
+ )
+
+ _PROFILE_TAG_LABELS = {
+ 0: "TIMER_NEURONS"}
+
+ def __init__(
+ self, resources_required, label, constraints, app_vertex,
+ vertex_slice, slice_index, ring_buffer_shifts, weight_scales):
+ """
+ :param ~pacman.model.resources.ResourceContainer resources_required:
+ The resources used by the vertex
+ :param str label: The label of the vertex
+ :param list(~pacman.model.constraints.AbstractConstraint) constraints:
+ Constraints for the vertex
+ :param AbstractPopulationVertex app_vertex:
+ The associated application vertex
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of the population that this implements
+ :param int slice_index:
+ The index of the slice in the ordered list of slices
+ :param list(int) ring_buffer_shifts:
+ The shifts to apply to convert ring buffer values to S1615 values
+ :param list(int) weight_scales:
+ The scaling to apply to weights to store them in the synapses
+ """
+ super(PopulationNeuronsMachineVertex, self).__init__(
+ label, constraints, app_vertex, vertex_slice, resources_required,
+ self.COMMON_REGIONS,
+ NeuronProvenance.N_ITEMS + NeuronMainProvenance.N_ITEMS,
+ self._PROFILE_TAG_LABELS, self.__get_binary_file_name(app_vertex))
+ self.__key = None
+ self.__change_requires_neuron_parameters_reload = False
+ self.__sdram_partition = None
+ self.__slice_index = slice_index
+ self.__ring_buffer_shifts = ring_buffer_shifts
+ self.__weight_scales = weight_scales
+
+ @property
+ @overrides(PopulationMachineNeurons._slice_index)
+ def _slice_index(self):
+ return self.__slice_index
+
+ @property
+ @overrides(PopulationMachineNeurons._key)
+ def _key(self):
+ return self.__key
+
+ @overrides(PopulationMachineNeurons._set_key)
+ def _set_key(self, key):
+ self.__key = key
+
+ @property
+ @overrides(PopulationMachineNeurons._neuron_regions)
+ def _neuron_regions(self):
+ return self.NEURON_REGIONS
+
+ def set_sdram_partition(self, sdram_partition):
+ """ Set the SDRAM partition. Must only be called once per instance
+
+ :param ~pacman.model.graphs.machine\
+ .SourceSegmentedSDRAMMachinePartition sdram_partition:
+ The SDRAM partition to receive synapses from
+ """
+ if self.__sdram_partition is not None:
+ raise SynapticConfigurationException(
+ "Trying to set SDRAM partition more than once")
+ self.__sdram_partition = sdram_partition
+
+ @staticmethod
+ def __get_binary_file_name(app_vertex):
+ """ Get the local binary filename for this vertex. Static because at
+ the time this is needed, the local app_vertex is not set.
+
+ :param AbstractPopulationVertex app_vertex:
+ The associated application vertex
+ :rtype: str
+ """
+ # Split binary name into title and extension
+ name, ext = os.path.splitext(app_vertex.neuron_impl.binary_name)
+
+ # Reunite title and extension and return
+ return name + "_neuron" + ext
+
+ @overrides(PopulationMachineCommon.parse_extra_provenance_items)
+ def parse_extra_provenance_items(self, label, names, provenance_data):
+ yield from self._parse_neuron_provenance(
+ label, names, provenance_data[:NeuronProvenance.N_ITEMS])
+
+ neuron_prov = NeuronMainProvenance(
+ *provenance_data[-NeuronMainProvenance.N_ITEMS:])
+
+ yield ProvenanceDataItem(
+ names + ["Timer tick overruns"],
+ neuron_prov.n_timer_overruns, neuron_prov.n_timer_overruns > 0,
+ f"Vertex {label} overran on {neuron_prov.n_timer_overruns} "
+ "timesteps. This may mean that the simulation results are invalid."
+ " Try with fewer neurons per core, increasing the time"
+ " scale factor, or reducing the number of spikes sent")
+
+ @overrides(PopulationMachineCommon.get_recorded_region_ids)
+ def get_recorded_region_ids(self):
+ ids = self._app_vertex.neuron_recorder.recorded_ids_by_slice(
+ self.vertex_slice)
+ return ids
+
+ @inject_items({
+ "routing_info": "MemoryRoutingInfos",
+ "data_n_time_steps": "DataNTimeSteps",
+ })
+ @overrides(
+ AbstractGeneratesDataSpecification.generate_data_specification,
+ additional_arguments={"routing_info", "data_n_time_steps"})
+ def generate_data_specification(
+ self, spec, placement, routing_info, data_n_time_steps):
+ """
+ :param machine_graph: (injected)
+ :param routing_info: (injected)
+ :param data_n_time_steps: (injected)
+ :param n_key_map: (injected)
+ """
+ # pylint: disable=arguments-differ
+ rec_regions = self._app_vertex.neuron_recorder.get_region_sizes(
+ self.vertex_slice, data_n_time_steps)
+ self._write_common_data_spec(spec, rec_regions)
+
+ self._write_neuron_data_spec(
+ spec, routing_info, self.__ring_buffer_shifts)
+
+ # Write information about SDRAM
+ n_neurons = self._vertex_slice.n_atoms
+ n_synapse_types = self._app_vertex.neuron_impl.get_n_synapse_types()
+ spec.reserve_memory_region(
+ region=self.REGIONS.SDRAM_EDGE_PARAMS.value,
+ size=SDRAM_PARAMS_SIZE, label="SDRAM Params")
+ spec.switch_write_focus(self.REGIONS.SDRAM_EDGE_PARAMS.value)
+ spec.write_value(
+ self.__sdram_partition.get_sdram_base_address_for(self))
+ spec.write_value(self.n_bytes_for_transfer)
+ spec.write_value(n_neurons)
+ spec.write_value(n_synapse_types)
+ spec.write_value(len(self.__sdram_partition.pre_vertices))
+ spec.write_value(get_n_bits(n_neurons))
+
+ # End the writing of this specification:
+ spec.end_specification()
+
+ @overrides(
+ AbstractRewritesDataSpecification.regenerate_data_specification)
+ def regenerate_data_specification(self, spec, placement):
+ # pylint: disable=too-many-arguments, arguments-differ
+
+ # write the neuron params into the new DSG region
+ self._write_neuron_parameters(spec, self.__ring_buffer_shifts)
+
+ # close spec
+ spec.end_specification()
+
+ @overrides(AbstractRewritesDataSpecification.reload_required)
+ def reload_required(self):
+ return self.__change_requires_neuron_parameters_reload
+
+ @overrides(AbstractRewritesDataSpecification.set_reload_required)
+ def set_reload_required(self, new_value):
+ self.__change_requires_neuron_parameters_reload = new_value
+
+ @property
+ @overrides(ReceivesSynapticInputsOverSDRAM.n_target_neurons)
+ def n_target_neurons(self):
+ return self._vertex_slice.n_atoms
+
+ @property
+ @overrides(ReceivesSynapticInputsOverSDRAM.n_target_synapse_types)
+ def n_target_synapse_types(self):
+ return self._app_vertex.neuron_impl.get_n_synapse_types()
+
+ @property
+ @overrides(ReceivesSynapticInputsOverSDRAM.weight_scales)
+ def weight_scales(self):
+ return self.__weight_scales
+
+ @property
+ @overrides(ReceivesSynapticInputsOverSDRAM.n_bytes_for_transfer)
+ def n_bytes_for_transfer(self):
+ n_bytes = (2 ** get_n_bits(self.n_target_neurons) *
+ self.n_target_synapse_types * self.N_BYTES_PER_INPUT)
+ # May need to add some padding if not a round number of words
+ extra_bytes = n_bytes % BYTES_PER_WORD
+ if extra_bytes:
+ n_bytes += BYTES_PER_WORD - extra_bytes
+ return n_bytes
+
+ @overrides(ReceivesSynapticInputsOverSDRAM.sdram_requirement)
+ def sdram_requirement(self, sdram_machine_edge):
+ if isinstance(sdram_machine_edge.pre_vertex,
+ SendsSynapticInputsOverSDRAM):
+ return self.n_bytes_for_transfer
+ raise SynapticConfigurationException(
+ "Unknown pre vertex type in edge {}".format(sdram_machine_edge))
diff --git a/spynnaker/pyNN/models/neuron/population_synapses_machine_vertex_common.py b/spynnaker/pyNN/models/neuron/population_synapses_machine_vertex_common.py
new file mode 100644
index 0000000000..2627cd66b8
--- /dev/null
+++ b/spynnaker/pyNN/models/neuron/population_synapses_machine_vertex_common.py
@@ -0,0 +1,338 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+from enum import Enum
+import ctypes
+
+from spinn_utilities.overrides import overrides
+from spinn_utilities.abstract_base import abstractmethod
+from spinn_utilities.config_holder import get_config_int
+from spinn_front_end_common.utilities.constants import BYTES_PER_WORD
+from spinn_front_end_common.utilities.utility_objs import ProvenanceDataItem
+from spynnaker.pyNN.exceptions import SynapticConfigurationException
+from spynnaker.pyNN.models.abstract_models import (
+ ReceivesSynapticInputsOverSDRAM, SendsSynapticInputsOverSDRAM)
+from .population_machine_common import CommonRegions, PopulationMachineCommon
+from .population_machine_synapses import SynapseRegions
+from .population_machine_synapses_provenance import SynapseProvenance
+
+# Size of SDRAM params = 1 word for address + 1 word for size
+# + 1 word for time to send
+SDRAM_PARAMS_SIZE = 3 * BYTES_PER_WORD
+
+# Size of the Key config params = 1 work for key + 1 word for mask
+# + 1 word for spike mask + 1 word for self connection boolean
+KEY_CONFIG_SIZE = 4 * BYTES_PER_WORD
+
+
+class SpikeProcessingFastProvenance(ctypes.LittleEndianStructure):
+ _fields_ = [
+ # A count of the times that the synaptic input circular buffers
+ # overflowed
+ ("n_buffer_overflows", ctypes.c_uint32),
+ # The number of DMA transfers done
+ ("n_dmas_complete", ctypes.c_uint32),
+ # The number of spikes successfully processed
+ ("n_spikes_processed", ctypes.c_uint32),
+ # The number of rewirings performed.
+ ("n_rewires", ctypes.c_uint32),
+ # The number of packets that were dropped due to being late
+ ("n_late_packets", ctypes.c_uint32),
+ # The maximum size of the spike input buffer during simulation
+ ("max_size_input_buffer", ctypes.c_uint32),
+ # The maximum number of spikes in a time step
+ ("max_spikes_received", ctypes.c_uint32),
+ # The maximum number of spikes processed in a time step
+ ("max_spikes_processed", ctypes.c_uint32),
+ # The number of times the transfer time over ran
+ ("n_transfer_timer_overruns", ctypes.c_uint32),
+ # The number of times a time step was skipped entirely
+ ("n_skipped_time_steps", ctypes.c_uint32),
+ # The maximum overrun of a transfer
+ ("max_transfer_timer_overrun", ctypes.c_uint32)
+ ]
+
+ N_ITEMS = len(_fields_)
+
+
+class PopulationSynapsesMachineVertexCommon(
+ PopulationMachineCommon,
+ SendsSynapticInputsOverSDRAM):
+ """ Common parts of a machine vertex for the synapses of a Population
+ """
+
+ INPUT_BUFFER_FULL_NAME = "Times_the_input_buffer_lost_packets"
+ DMA_COMPLETE = "DMA's that were completed"
+ SPIKES_PROCESSED = "How many spikes were processed"
+ N_REWIRES_NAME = "Number_of_rewires"
+ N_LATE_SPIKES_NAME = "Number_of_late_spikes"
+ MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME = "Max_filled_size_input_buffer"
+ MAX_SPIKES_RECEIVED = "Max_spikes_received_in_time_step"
+ MAX_SPIKES_PROCESSED = "Max_spikes_processed_in_time_step"
+ N_TRANSFER_TIMER_OVERRUNS = "Times_the_transfer_did_not_complete_in_time"
+ N_SKIPPED_TIME_STEPS = "Times_a_time_step_was_skipped"
+ MAX_TRANSFER_TIMER_OVERRUNS = "Max_transfer_overrn"
+
+ __slots__ = [
+ "__sdram_partition",
+ "__neuron_to_synapse_edge"]
+
+ class REGIONS(Enum):
+ """Regions for populations."""
+ SYSTEM = 0
+ PROVENANCE_DATA = 1
+ PROFILING = 2
+ RECORDING = 3
+ SYNAPSE_PARAMS = 4
+ DIRECT_MATRIX = 5
+ SYNAPTIC_MATRIX = 6
+ POPULATION_TABLE = 7
+ SYNAPSE_DYNAMICS = 8
+ STRUCTURAL_DYNAMICS = 9
+ BIT_FIELD_FILTER = 10
+ SDRAM_EDGE_PARAMS = 11
+ KEY_REGION = 12
+ CONNECTOR_BUILDER = 13
+ BIT_FIELD_BUILDER = 14
+ BIT_FIELD_KEY_MAP = 15
+
+ # Regions for this vertex used by common parts
+ COMMON_REGIONS = CommonRegions(
+ system=REGIONS.SYSTEM.value,
+ provenance=REGIONS.PROVENANCE_DATA.value,
+ profile=REGIONS.PROFILING.value,
+ recording=REGIONS.RECORDING.value)
+
+ # Regions for this vertex used by synapse parts
+ SYNAPSE_REGIONS = SynapseRegions(
+ synapse_params=REGIONS.SYNAPSE_PARAMS.value,
+ direct_matrix=REGIONS.DIRECT_MATRIX.value,
+ pop_table=REGIONS.POPULATION_TABLE.value,
+ synaptic_matrix=REGIONS.SYNAPTIC_MATRIX.value,
+ synapse_dynamics=REGIONS.SYNAPSE_DYNAMICS.value,
+ structural_dynamics=REGIONS.STRUCTURAL_DYNAMICS.value,
+ bitfield_builder=REGIONS.BIT_FIELD_BUILDER.value,
+ bitfield_key_map=REGIONS.BIT_FIELD_KEY_MAP.value,
+ bitfield_filter=REGIONS.BIT_FIELD_FILTER.value,
+ connection_builder=REGIONS.CONNECTOR_BUILDER.value
+ )
+
+ _PROFILE_TAG_LABELS = {
+ 0: "TIMER_SYNAPSES",
+ 1: "DMA_READ",
+ 2: "INCOMING_SPIKE",
+ 3: "PROCESS_FIXED_SYNAPSES",
+ 4: "PROCESS_PLASTIC_SYNAPSES"}
+
+ def __init__(
+ self, resources_required, label, constraints, app_vertex,
+ vertex_slice):
+ """
+ :param ~pacman.model.resources.ResourceContainer resources_required:
+ The resources used by the vertex
+ :param str label: The label of the vertex
+ :param list(~pacman.model.constraints.AbstractConstraint) constraints:
+ Constraints for the vertex
+ :param AbstractPopulationVertex app_vertex:
+ The associated application vertex
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of the population that this implements
+ """
+ super(PopulationSynapsesMachineVertexCommon, self).__init__(
+ label, constraints, app_vertex, vertex_slice, resources_required,
+ self.COMMON_REGIONS,
+ SynapseProvenance.N_ITEMS + SpikeProcessingFastProvenance.N_ITEMS,
+ self._PROFILE_TAG_LABELS, self.__get_binary_file_name(app_vertex))
+ self.__sdram_partition = None
+ self.__neuron_to_synapse_edge = None
+
+ def set_sdram_partition(self, sdram_partition):
+ """ Set the SDRAM partition. Must only be called once per instance
+
+ :param ~pacman.model.graphs.machine\
+ .SourceSegmentedSDRAMMachinePartition sdram_partition:
+ The SDRAM partition to receive synapses from
+ """
+ if self.__sdram_partition is not None:
+ raise SynapticConfigurationException(
+ "Trying to set SDRAM partition more than once")
+ self.__sdram_partition = sdram_partition
+
+ def set_neuron_to_synapse_edge(self, neuron_to_synapse_edge):
+ """ Set the edge that goes from the neuron core back to the synapse\
+ core.
+
+ :param ~pacman.model.graphs.machine.MachineEdge neuron_to_synapse_edge:
+ The edge that we will receive spikes from
+ """
+ self.__neuron_to_synapse_edge = neuron_to_synapse_edge
+
+ @staticmethod
+ def __get_binary_file_name(app_vertex):
+ """ Get the local binary filename for this vertex. Static because at
+ the time this is needed, the local app_vertex is not set.
+
+ :param AbstractPopulationVertex app_vertex:
+ The associated application vertex
+ :rtype: str
+ """
+
+ # Reunite title and extension and return
+ return "synapses" + app_vertex.synapse_executable_suffix + ".aplx"
+
+ @overrides(PopulationMachineCommon.get_recorded_region_ids)
+ def get_recorded_region_ids(self):
+ ids = self._app_vertex.synapse_recorder.recorded_ids_by_slice(
+ self.vertex_slice)
+ return ids
+
+ def _write_sdram_edge_spec(self, spec):
+ """ Write information about SDRAM Edge
+
+ :param DataSpecificationGenerator spec:
+ The generator of the specification to write
+ """
+ send_size = self.__sdram_partition.get_sdram_size_of_region_for(self)
+ spec.reserve_memory_region(
+ region=self.REGIONS.SDRAM_EDGE_PARAMS.value,
+ size=SDRAM_PARAMS_SIZE, label="SDRAM Params")
+ spec.switch_write_focus(self.REGIONS.SDRAM_EDGE_PARAMS.value)
+ spec.write_value(
+ self.__sdram_partition.get_sdram_base_address_for(self))
+ spec.write_value(send_size)
+ spec.write_value(get_config_int(
+ "Simulation", "transfer_overhead_clocks"))
+
+ def _write_key_spec(self, spec, routing_info):
+ """ Write key config region
+
+ :param DataSpecificationGenerator spec:
+ The generator of the specification to write
+ :param RoutingInfo routing_info:
+ Container of keys and masks for edges
+ """
+ spec.reserve_memory_region(
+ region=self.REGIONS.KEY_REGION.value, size=KEY_CONFIG_SIZE,
+ label="Key Config")
+ spec.switch_write_focus(self.REGIONS.KEY_REGION.value)
+ if self.__neuron_to_synapse_edge is None:
+ # No Key = make sure it doesn't match; i.e. spike & 0x0 != 0x1
+ spec.write_value(1)
+ spec.write_value(0)
+ spec.write_value(0)
+ spec.write_value(0)
+ else:
+ r_info = routing_info.get_routing_info_for_edge(
+ self.__neuron_to_synapse_edge)
+ spec.write_value(r_info.first_key)
+ spec.write_value(r_info.first_mask)
+ spec.write_value(~r_info.first_mask & 0xFFFFFFFF)
+ spec.write_value(int(self._app_vertex.self_projection is not None))
+
+ @overrides(SendsSynapticInputsOverSDRAM.sdram_requirement)
+ def sdram_requirement(self, sdram_machine_edge):
+ if isinstance(sdram_machine_edge.post_vertex,
+ ReceivesSynapticInputsOverSDRAM):
+ return sdram_machine_edge.post_vertex.n_bytes_for_transfer
+ raise SynapticConfigurationException(
+ "Unknown post vertex type in edge {}".format(sdram_machine_edge))
+
+ @overrides(PopulationMachineCommon.parse_extra_provenance_items)
+ def parse_extra_provenance_items(self, label, names, provenance_data):
+ proc_offset = SynapseProvenance.N_ITEMS
+ yield from self._parse_synapse_provenance(
+ label, names, provenance_data[:proc_offset])
+ yield from self._parse_spike_processing_fast_provenance(
+ label, names, provenance_data[proc_offset:])
+
+ @abstractmethod
+ def _parse_synapse_provenance(self, label, names, provenance_data):
+ """ Extract and yield synapse provenance
+
+ :param str label: The label of the node
+ :param list(str) names: The hierarchy of names for the provenance data
+ :param list(int) provenance_data: A list of data items to interpret
+ :return: a list of provenance data items
+ :rtype: iterator of ProvenanceDataItem
+ """
+
+ def _parse_spike_processing_fast_provenance(
+ self, label, names, provenance_data):
+ """ Extract and yield spike processing provenance
+
+ :param str label: The label of the node
+ :param list(str) names: The hierarchy of names for the provenance data
+ :param list(int) provenance_data: A list of data items to interpret
+ :return: a list of provenance data items
+ :rtype: iterator of ProvenanceDataItem
+ """
+ prov = SpikeProcessingFastProvenance(*provenance_data)
+
+ yield ProvenanceDataItem(
+ names + [self.INPUT_BUFFER_FULL_NAME],
+ prov.n_buffer_overflows,
+ prov.n_buffer_overflows > 0,
+ f"The input buffer for {label} lost packets on "
+ f"{prov.n_buffer_overflows} occasions. This is often a "
+ "sign that the system is running too quickly for the number of "
+ "neurons per core. Please increase the timer_tic or"
+ " time_scale_factor or decrease the number of neurons per core.")
+ yield ProvenanceDataItem(
+ names + [self.DMA_COMPLETE], prov.n_dmas_complete)
+ yield ProvenanceDataItem(
+ names + [self.SPIKES_PROCESSED],
+ prov.n_spikes_processed)
+ yield ProvenanceDataItem(
+ names + [self.N_REWIRES_NAME], prov.n_rewires)
+
+ late_message = (
+ f"On {label}, {prov.n_late_packets} packets were dropped "
+ "from the input buffer, because they arrived too late to be "
+ "processed in a given time step. Try increasing the "
+ "time_scale_factor located within the .spynnaker.cfg file or in "
+ "the pynn.setup() method."
+ if self._app_vertex.drop_late_spikes else
+ f"On {label}, {prov.n_late_packets} packets arrived too "
+ "late to be processed in a given time step. Try increasing the "
+ "time_scale_factor located within the .spynnaker.cfg file or in "
+ "the pynn.setup() method.")
+ yield ProvenanceDataItem(
+ names + [self.N_LATE_SPIKES_NAME], prov.n_late_packets,
+ prov.n_late_packets > 0, late_message)
+
+ yield ProvenanceDataItem(
+ names + [self.MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME],
+ prov.max_size_input_buffer, report=False)
+ yield ProvenanceDataItem(
+ names + [self.MAX_SPIKES_RECEIVED], prov.max_spikes_received)
+ yield ProvenanceDataItem(
+ names + [self.MAX_SPIKES_PROCESSED], prov.max_spikes_processed)
+ yield ProvenanceDataItem(
+ names + [self.N_TRANSFER_TIMER_OVERRUNS],
+ prov.n_transfer_timer_overruns, prov.n_transfer_timer_overruns > 0,
+ f"On {label}, the transfer of synaptic inputs to SDRAM did not end"
+ " before the next timer tick started"
+ f" {prov.n_transfer_timer_overruns} times with a maximum overrun"
+ f" of {prov.max_transfer_timer_overrun}. Try increasing "
+ " transfer_overhead_clocks in your .spynnaker.cfg file.")
+ yield ProvenanceDataItem(
+ names + [self.N_SKIPPED_TIME_STEPS], prov.n_skipped_time_steps,
+ prov.n_skipped_time_steps > 0,
+ f"On {label}, synaptic processing did not start on"
+ f" {prov.n_skipped_time_steps} time steps. Try increasing the "
+ "time_scale_factor located within the .spynnaker.cfg file or in "
+ "the pynn.setup() method.")
+ yield ProvenanceDataItem(
+ names + [self.MAX_TRANSFER_TIMER_OVERRUNS],
+ prov.max_transfer_timer_overrun)
diff --git a/spynnaker/pyNN/models/neuron/population_synapses_machine_vertex_lead.py b/spynnaker/pyNN/models/neuron/population_synapses_machine_vertex_lead.py
new file mode 100644
index 0000000000..1ec6c1cbe8
--- /dev/null
+++ b/spynnaker/pyNN/models/neuron/population_synapses_machine_vertex_lead.py
@@ -0,0 +1,123 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+from pacman.executor.injection_decorator import inject_items
+from spinn_utilities.overrides import overrides
+from spinn_front_end_common.abstract_models import (
+ AbstractGeneratesDataSpecification)
+from .population_machine_common import PopulationMachineCommon
+from .population_machine_synapses import PopulationMachineSynapses
+from .population_synapses_machine_vertex_common import (
+ PopulationSynapsesMachineVertexCommon)
+
+
+class PopulationSynapsesMachineVertexLead(
+ PopulationSynapsesMachineVertexCommon,
+ PopulationMachineSynapses,
+ AbstractGeneratesDataSpecification):
+ """ A synaptic machine vertex that leads other Synaptic machine vertices,
+ writing shared areas.
+ """
+
+ __slots__ = [
+ "__synaptic_matrices",
+ "__ring_buffer_shifts",
+ "__weight_scales",
+ "__all_syn_block_sz",
+ "__structural_sz",
+ "__synapse_references"]
+
+ def __init__(
+ self, resources_required, label, constraints, app_vertex,
+ vertex_slice, ring_buffer_shifts, weight_scales, all_syn_block_sz,
+ structural_sz, synapse_references):
+ """
+ :param ~pacman.model.resources.ResourceContainer resources_required:
+ The resources used by the vertex
+ :param str label: The label of the vertex
+ :param list(~pacman.model.constraints.AbstractConstraint) constraints:
+ Constraints for the vertex
+ :param AbstractPopulationVertex app_vertex:
+ The associated application vertex
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of the population that this implements
+ """
+ super(PopulationSynapsesMachineVertexLead, self).__init__(
+ resources_required, label, constraints, app_vertex, vertex_slice)
+ self.__ring_buffer_shifts = ring_buffer_shifts
+ self.__weight_scales = weight_scales
+ self.__all_syn_block_sz = all_syn_block_sz
+ self.__structural_sz = structural_sz
+ self.__synapse_references = synapse_references
+
+ # Need to do this last so that the values above can be used
+ self.__synaptic_matrices = self._create_synaptic_matrices(False)
+
+ @property
+ @overrides(PopulationMachineSynapses._synapse_regions)
+ def _synapse_regions(self):
+ return self.SYNAPSE_REGIONS
+
+ @property
+ @overrides(PopulationMachineSynapses._synaptic_matrices)
+ def _synaptic_matrices(self):
+ return self.__synaptic_matrices
+
+ @property
+ @overrides(PopulationMachineSynapses._synapse_references)
+ def _synapse_references(self):
+ return self.__synapse_references
+
+ @overrides(PopulationMachineCommon.get_recorded_region_ids)
+ def get_recorded_region_ids(self):
+ ids = self._app_vertex.synapse_recorder.recorded_ids_by_slice(
+ self.vertex_slice)
+ return ids
+
+ @inject_items({
+ "routing_info": "MemoryRoutingInfos",
+ "data_n_time_steps": "DataNTimeSteps"
+ })
+ @overrides(
+ AbstractGeneratesDataSpecification.generate_data_specification,
+ additional_arguments={"routing_info", "data_n_time_steps"})
+ def generate_data_specification(
+ self, spec, placement, routing_info, data_n_time_steps):
+ """
+ :param routing_info: (injected)
+ :param data_n_time_steps: (injected)
+ """
+ # pylint: disable=arguments-differ
+ rec_regions = self._app_vertex.synapse_recorder.get_region_sizes(
+ self.vertex_slice, data_n_time_steps)
+ self._write_common_data_spec(spec, rec_regions)
+
+ self._write_synapse_data_spec(
+ spec, routing_info, self.__ring_buffer_shifts,
+ self.__weight_scales, self.__all_syn_block_sz,
+ self.__structural_sz)
+
+ # Write information about SDRAM
+ self._write_sdram_edge_spec(spec)
+
+ # Write information about keys
+ self._write_key_spec(spec, routing_info)
+
+ # End the writing of this specification:
+ spec.end_specification()
+
+ @overrides(PopulationSynapsesMachineVertexCommon._parse_synapse_provenance)
+ def _parse_synapse_provenance(self, label, names, provenance_data):
+ return PopulationMachineSynapses._parse_synapse_provenance(
+ self, label, names, provenance_data)
diff --git a/spynnaker/pyNN/models/neuron/population_synapses_machine_vertex_shared.py b/spynnaker/pyNN/models/neuron/population_synapses_machine_vertex_shared.py
new file mode 100644
index 0000000000..84a0536aa6
--- /dev/null
+++ b/spynnaker/pyNN/models/neuron/population_synapses_machine_vertex_shared.py
@@ -0,0 +1,90 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+from pacman.executor.injection_decorator import inject_items
+from spinn_utilities.overrides import overrides
+from spinn_front_end_common.abstract_models import (
+ AbstractGeneratesDataSpecification)
+from .population_synapses_machine_vertex_common import (
+ PopulationSynapsesMachineVertexCommon)
+from .population_machine_synapses_provenance import (
+ PopulationMachineSynapsesProvenance)
+
+
+class PopulationSynapsesMachineVertexShared(
+ PopulationSynapsesMachineVertexCommon,
+ PopulationMachineSynapsesProvenance,
+ AbstractGeneratesDataSpecification):
+ """ A machine vertex for PyNN Populations
+ """
+
+ __slots__ = [
+ "__synapse_references"
+ ]
+
+ def __init__(
+ self, resources_required, label, constraints, app_vertex,
+ vertex_slice, synapse_references):
+ """
+ :param ~pacman.model.resources.ResourceContainer resources_required:
+ The resources used by the vertex
+ :param str label: The label of the vertex
+ :param list(~pacman.model.constraints.AbstractConstraint) constraints:
+ Constraints for the vertex
+ :param AbstractPopulationVertex app_vertex:
+ The associated application vertex
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of the population that this implements
+ """
+ super(PopulationSynapsesMachineVertexShared, self).__init__(
+ resources_required, label, constraints, app_vertex, vertex_slice)
+ self.__synapse_references = synapse_references
+
+ @inject_items({
+ "routing_info": "MemoryRoutingInfos",
+ "data_n_time_steps": "DataNTimeSteps",
+ })
+ @overrides(
+ AbstractGeneratesDataSpecification.generate_data_specification,
+ additional_arguments={"routing_info", "data_n_time_steps"})
+ def generate_data_specification(
+ self, spec, placement, routing_info, data_n_time_steps):
+ """
+ :param machine_graph: (injected)
+ :param routing_info: (injected)
+ :param data_n_time_steps: (injected)
+ :param n_key_map: (injected)
+ """
+ # pylint: disable=arguments-differ
+ rec_regions = self._app_vertex.synapse_recorder.get_region_sizes(
+ self.vertex_slice, data_n_time_steps)
+ self._write_common_data_spec(spec, rec_regions)
+
+ # Write references to shared regions
+ for reg, ref in zip(self.SYNAPSE_REGIONS, self.__synapse_references):
+ spec.reference_memory_region(reg, ref)
+
+ # Write information about SDRAM
+ self._write_sdram_edge_spec(spec)
+
+ # Write information about keys
+ self._write_key_spec(spec, routing_info)
+
+ # End the writing of this specification:
+ spec.end_specification()
+
+ @overrides(PopulationSynapsesMachineVertexCommon._parse_synapse_provenance)
+ def _parse_synapse_provenance(self, label, names, provenance_data):
+ return PopulationMachineSynapsesProvenance._parse_synapse_provenance(
+ self, label, names, provenance_data)
diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_generate_on_machine.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_generate_on_machine.py
index 89364cdbec..5eead1eb94 100644
--- a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_generate_on_machine.py
+++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_generate_on_machine.py
@@ -50,7 +50,7 @@ def gen_matrix_params(self):
:rtype: ~numpy.ndarray(uint32)
"""
- return numpy.zeros(0, dtype="uint32")
+ return numpy.zeros(0, dtype=numpy.uint32)
@property
def gen_matrix_params_size_in_bytes(self):
diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics_structural.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics_structural.py
index fe054f287a..44ec81fd86 100644
--- a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics_structural.py
+++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics_structural.py
@@ -21,17 +21,13 @@ class AbstractSynapseDynamicsStructural(object, metaclass=AbstractBase):
@abstractmethod
def get_structural_parameters_sdram_usage_in_bytes(
- self, graph, vertex, n_neurons):
+ self, incoming_projections, n_neurons):
""" Get the size of the structural parameters
Note: At the Application level this will be an estimate.
- :param graph: Graph at same level as vertex.
- :type graph: ~pacman.model.graphs.application.ApplicationGraph or
- ~pacman.model.graphs.machine.MachineGraph
- :param vertex: Vertex at the same level as the graph
- :type vertex: ~pacman.model.graphs.application.ApplicationVertex or
- ~pacman.model.graphs.machine.MachineVertex
+ :param list(~spynnaker.pyNN.models.Projection) incoming_projections:
+ The projections that target the vertex in question
:param int n_neurons:
:return: the size of the parameters, in bytes
:rtype: int
@@ -40,7 +36,7 @@ def get_structural_parameters_sdram_usage_in_bytes(
@abstractmethod
def write_structural_parameters(
- self, spec, region, weight_scales, machine_graph, machine_vertex,
+ self, spec, region, weight_scales, app_vertex, vertex_slice,
routing_info, synaptic_matrices):
""" Write structural plasticity parameters
@@ -48,10 +44,10 @@ def write_structural_parameters(
The data specification to write to
:param int region: region ID
:param list(float) weight_scales: Weight scaling for each synapse type
- :param ~pacman.model.graphs.machine.MachineGraph machine_graph:
- The machine graph
- :param AbstractPopulationVertex machine_vertex:
- The machine vertex
+ :param ~pacman.model.graphs.application.ApplicationVertex app_vertex:
+ The target application vertex
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of the target vertex to generate for
:param ~pacman.model.routing_info.RoutingInfo routing_info:
Routing information for all edges
:param SynapticMatrices synaptic_matrices:
diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py
index b9cfb6eb63..87f00e1761 100644
--- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py
+++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py
@@ -106,7 +106,7 @@ def get_static_synaptic_data(
fixed_fixed = (
((numpy.rint(numpy.abs(connections["weight"])).astype("uint32") &
0xFFFF) << 16) |
- ((connections["delay"].astype("uint32") & 0xF) <<
+ (connections["delay"].astype("uint32") <<
(n_neuron_id_bits + n_synapse_type_bits)) |
(connections["synapse_type"].astype(
"uint32") << n_neuron_id_bits) |
@@ -168,9 +168,8 @@ def read_static_synaptic_data(
connections["target"] = (
(data & neuron_id_mask) + post_vertex_slice.lo_atom)
connections["weight"] = (data >> 16) & 0xFFFF
- connections["delay"] = (data >> (n_neuron_id_bits +
- n_synapse_type_bits)) & 0xF
- connections["delay"][connections["delay"] == 0] = 16
+ connections["delay"] = (data & 0xFFFF) >> (
+ n_neuron_id_bits + n_synapse_type_bits)
return connections
diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py
index 338064db08..ace4f2fb8e 100644
--- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py
+++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py
@@ -71,7 +71,7 @@ def __init__(
:param AbstractTimingDependence timing_dependence:
:param AbstractWeightDependence weight_dependence:
:param None voltage_dependence: not supported
- :param float dendritic_delay_fraction: [0.5, 1.0]
+ :param float dendritic_delay_fraction: must be 1.0!
:param float weight:
:param delay: Use ``None`` to get the simulator default minimum delay.
:type delay: float or None
@@ -101,9 +101,8 @@ def __init__(
self.__delay = delay
self.__backprop_delay = backprop_delay
- if not (0.5 <= self.__dendritic_delay_fraction <= 1.0):
- raise NotImplementedError(
- "dendritic_delay_fraction must be in the interval [0.5, 1.0]")
+ if self.__dendritic_delay_fraction != 1.0:
+ raise NotImplementedError("All delays must be dendritic!")
@overrides(AbstractPlasticSynapseDynamics.merge)
def merge(self, synapse_dynamics):
@@ -325,17 +324,10 @@ def get_plastic_synaptic_data(
n_neuron_id_bits = get_n_bits(post_vertex_slice.n_atoms)
neuron_id_mask = (1 << n_neuron_id_bits) - 1
- dendritic_delays = (
- connections["delay"] * self.__dendritic_delay_fraction)
- axonal_delays = (
- connections["delay"] * (1.0 - self.__dendritic_delay_fraction))
-
# Get the fixed data
fixed_plastic = (
- ((dendritic_delays.astype("uint16") & 0xF) <<
+ (connections["delay"].astype("uint16") <<
(n_neuron_id_bits + n_synapse_type_bits)) |
- ((axonal_delays.astype("uint16") & 0xF) <<
- (4 + n_neuron_id_bits + n_synapse_type_bits)) |
(connections["synapse_type"].astype("uint16")
<< n_neuron_id_bits) |
((connections["target"].astype("uint16") -
@@ -450,9 +442,8 @@ def read_plastic_synaptic_data(
connections["target"] = (
(data_fixed & neuron_id_mask) + post_vertex_slice.lo_atom)
connections["weight"] = pp_half_words
- connections["delay"] = (data_fixed >> (
- n_neuron_id_bits + n_synapse_type_bits)) & 0xF
- connections["delay"][connections["delay"] == 0] = 16
+ connections["delay"] = data_fixed >> (
+ n_neuron_id_bits + n_synapse_type_bits)
return connections
@overrides(AbstractPlasticSynapseDynamics.get_weight_mean)
@@ -529,7 +520,7 @@ def gen_matrix_params(self):
return numpy.array([
self._n_header_bytes // BYTES_PER_SHORT,
synapse_struct.get_n_half_words_per_connection(),
- synapse_struct.get_weight_half_word()], dtype="uint32")
+ synapse_struct.get_weight_half_word()], dtype=numpy.uint32)
@property
@overrides(AbstractGenerateOnMachine.
diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_common.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_common.py
index b219218fd3..bbe8976949 100644
--- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_common.py
+++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_common.py
@@ -19,17 +19,11 @@
AbstractBase, abstractmethod, abstractproperty)
from spinn_utilities.overrides import overrides
from data_specification.enums.data_type import DataType
-from pacman.model.graphs.application import (
- ApplicationGraph, ApplicationVertex)
-from pacman.model.graphs.machine import (MachineGraph, MachineVertex)
-from pacman.exceptions import PacmanInvalidParameterException
from spinn_front_end_common.utilities.constants import (
MICRO_TO_MILLISECOND_CONVERSION, MICRO_TO_SECOND_CONVERSION,
BYTES_PER_WORD, BYTES_PER_SHORT)
from spinn_front_end_common.utilities.globals_variables import (
machine_time_step)
-from spynnaker.pyNN.models.neural_projections import (
- ProjectionApplicationEdge)
from .abstract_synapse_dynamics_structural import (
AbstractSynapseDynamicsStructural)
from spynnaker.pyNN.exceptions import SynapticConfigurationException
@@ -90,112 +84,83 @@ def p_rew(self):
"""
return 1. / self.f_rew
- @overrides(AbstractSynapseDynamicsStructural.write_structural_parameters,
- extend_doc=False)
+ @overrides(AbstractSynapseDynamicsStructural.write_structural_parameters)
def write_structural_parameters(
- self, spec, region, weight_scales, machine_graph, machine_vertex,
- routing_info, synaptic_matrices):
- """ Write structural plasticity parameters
-
- :param ~data_specification.DataSpecificationGenerator spec:
- the data spec
- :param int region: region ID
- :param weight_scales: scaling the weights
- :type weight_scales: ~numpy.ndarray or list(float)
- :param ~pacman.model.graphs.machine.MachineGraph machine_graph:
- Full machine level network
- :param AbstractPopulationVertex machine_vertex:
- the vertex for which data specs are being prepared
- :param ~pacman.model.routing_info.RoutingInfo routing_info:
- All of the routing information on the network
- :param SynapticMatrices synaptic_matrices:
- The synaptic matrices for this vertex
- """
+ self, spec, region, weight_scales, app_vertex,
+ vertex_slice, routing_info, synaptic_matrices):
spec.comment("Writing structural plasticity parameters")
spec.switch_write_focus(region)
# Get relevant edges
- structural_edges, machine_edges_by_app = (
- self.__get_structural_edges_by_machine(
- machine_graph, machine_vertex))
+ structural_projections = self.__get_structural_projections(
+ app_vertex.incoming_projections)
# Write the common part of the rewiring data
self.__write_common_rewiring_data(
- spec, machine_vertex, len(structural_edges))
+ spec, app_vertex, vertex_slice,
+ len(structural_projections))
# Write the pre-population info
pop_index = self.__write_prepopulation_info(
- spec, machine_vertex, structural_edges, machine_edges_by_app,
- routing_info, weight_scales, synaptic_matrices)
+ spec, app_vertex, structural_projections, routing_info,
+ weight_scales, synaptic_matrices, vertex_slice)
# Write the post-to-pre table
- self.__write_post_to_pre_table(spec, pop_index, machine_vertex)
+ self.__write_post_to_pre_table(
+ spec, pop_index, app_vertex, vertex_slice)
# Write the component parameters
# pylint: disable=no-member
+ spec.comment("Writing partner selection parameters")
self.partner_selection.write_parameters(spec)
- for synapse_info in structural_edges.values():
- dynamics = synapse_info.synapse_dynamics
+ for proj in structural_projections:
+ spec.comment("Writing formation parameters for {}".format(
+ proj.label))
+ dynamics = proj._synapse_information.synapse_dynamics
dynamics.formation.write_parameters(spec)
- for synapse_info in structural_edges.values():
- dynamics = synapse_info.synapse_dynamics
+ for proj in structural_projections:
+ spec.comment("Writing elimination parameters for {}".format(
+ proj.label))
+ dynamics = proj._synapse_information.synapse_dynamics
dynamics.elimination.write_parameters(
- spec, weight_scales[synapse_info.synapse_type])
+ spec, weight_scales[proj._synapse_information.synapse_type])
- def __get_structural_edges_by_app(self, app_graph, app_vertex):
- """
- :param ~pacman.model.graphs.application.ApplicationGraph app_graph:
- :param ~pacman.model.graphs.application.ApplicationVertex app_vertex:
- :rtype: dict(ProjectionApplicationEdge, SynapseInformation)
- """
- structural_edges = dict()
- for app_edge in app_graph.get_edges_ending_at_vertex(app_vertex):
- if isinstance(app_edge, ProjectionApplicationEdge):
- for synapse_info in app_edge.synapse_information:
- if isinstance(synapse_info.synapse_dynamics,
- AbstractSynapseDynamicsStructural):
- if app_edge in structural_edges:
- raise SynapticConfigurationException(
- self.PAIR_ERROR)
- structural_edges[app_edge] = synapse_info
- return structural_edges
-
- def __get_structural_edges_by_machine(self, machine_graph, machine_vertex):
+ def __get_structural_projections(self, incoming_projections):
"""
- :param ~pacman.model.graphs.machine.MachineGraph machine_graph:
- :param ~pacman.model.graphs.machine.MachineVertex machine_vertex:
- :rtype: dict(ProjectionApplicationEdge, SynapseInformation)
+ :param list(Projection) incoming_projections:
+ Projections to filter to structural only
+ :rtype: list(Projection)
"""
- structural_edges = collections.OrderedDict()
- machine_edges = collections.defaultdict(list)
- for machine_edge in machine_graph.get_edges_ending_at_vertex(
- machine_vertex):
- app_edge = machine_edge.app_edge
- if isinstance(app_edge, ProjectionApplicationEdge):
- for synapse_info in app_edge.synapse_information:
- if isinstance(synapse_info.synapse_dynamics,
- AbstractSynapseDynamicsStructural):
- if app_edge in structural_edges:
- if structural_edges[app_edge] != synapse_info:
- raise SynapticConfigurationException(
- self.PAIR_ERROR)
- else:
- structural_edges[app_edge] = synapse_info
- machine_edges[app_edge].append(machine_edge)
- return structural_edges, machine_edges
+ structural_projections = list()
+ seen_app_edges = set()
+ for proj in incoming_projections:
+ app_edge = proj._projection_edge
+ for synapse_info in app_edge.synapse_information:
+ if isinstance(synapse_info.synapse_dynamics,
+ AbstractSynapseDynamicsStructural):
+ if app_edge in seen_app_edges:
+ raise SynapticConfigurationException(
+ self.PAIR_ERROR)
+ else:
+ seen_app_edges.add(app_edge)
+ structural_projections.append(proj)
+ return structural_projections
def __write_common_rewiring_data(
- self, spec, machine_vertex, n_pre_pops):
+ self, spec, app_vertex, vertex_slice, n_pre_pops):
""" Write the non-sub-population synapse parameters to the spec.
:param ~data_specification.DataSpecificationGenerator spec:
the data spec
- :param ~pacman.model.graphs.machine.MachineVertex machine_vertex:
- the vertex for which data specs are being prepared
+ :param ~pacman.model.graphs.application.ApplicationVertex app_vertex:
+ The application vertex being generated
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The slice of the target vertex to generate for
:param int n_pre_pops: the number of pre-populations
:return: None
:rtype: None
"""
+ spec.comment("Writing common rewiring data")
if (self.p_rew * MICRO_TO_MILLISECOND_CONVERSION <
machine_time_step() / MICRO_TO_MILLISECOND_CONVERSION):
# Fast rewiring
@@ -212,13 +177,11 @@ def __write_common_rewiring_data(
# write s_max
spec.write_value(data=int(self.s_max))
# write total number of atoms in the application vertex
- app_vertex = machine_vertex.app_vertex
spec.write_value(data=app_vertex.n_atoms)
# write local low, high and number of atoms
- post_slice = machine_vertex.vertex_slice
- spec.write_value(data=post_slice.n_atoms)
- spec.write_value(data=post_slice.lo_atom)
- spec.write_value(data=post_slice.hi_atom)
+ spec.write_value(data=vertex_slice.n_atoms)
+ spec.write_value(data=vertex_slice.lo_atom)
+ spec.write_value(data=vertex_slice.hi_atom)
# write with_replacement
spec.write_value(data=self.with_replacement)
@@ -232,14 +195,15 @@ def __write_common_rewiring_data(
spec.write_value(data=n_pre_pops)
def __write_prepopulation_info(
- self, spec, machine_vertex, structural_edges, machine_edges_by_app,
- routing_info, weight_scales, synaptic_matrices):
+ self, spec, app_vertex, structural_projections, routing_info,
+ weight_scales, synaptic_matrices, post_vertex_slice):
"""
:param ~data_specification.DataSpecificationGenerator spec:
- :param ~pacman.model.graphs.machine.MachineVertex machine_vertex:
+ :param ~pacman.model.graphs.application.ApplicationVertex app_vertex:
the vertex for which data specs are being prepared
:param list(tuple(ProjectionApplicationEdge,SynapseInformation)) \
- structural_edges:
+ structural_projections:
+ Projections that are structural
:param machine_edges_by_app:
map of app edge to associated machine edges
:type machine_edges_by_app:
@@ -250,18 +214,27 @@ def __write_prepopulation_info(
:param SynapticMatrices synaptic_matrices:
:rtype: dict(tuple(AbstractPopulationVertex,SynapseInformation),int)
"""
+ spec.comment("Writing pre-population info")
pop_index = dict()
index = 0
- for app_edge, synapse_info in structural_edges.items():
+ for proj in structural_projections:
+ spec.comment("Writing pre-population info for {}".format(
+ proj.label))
+ app_edge = proj._projection_edge
+ synapse_info = proj._synapse_information
pop_index[app_edge.pre_vertex, synapse_info] = index
index += 1
- machine_edges = machine_edges_by_app[app_edge]
dynamics = synapse_info.synapse_dynamics
+ machine_edges = list()
+ for machine_edge in app_edge.machine_edges:
+ if machine_edge.post_vertex.vertex_slice == post_vertex_slice:
+ machine_edges.append(machine_edge)
+
# Number of machine edges
spec.write_value(len(machine_edges), data_type=DataType.UINT16)
# Controls - currently just if this is a self connection or not
- self_connected = machine_vertex.app_vertex == app_edge.pre_vertex
+ self_connected = app_vertex == app_edge.pre_vertex
spec.write_value(int(self_connected), data_type=DataType.UINT16)
# Delay
delay_scale = (
@@ -296,21 +269,22 @@ def __write_prepopulation_info(
app_edge, synapse_info, machine_edge))
return pop_index
- def __write_post_to_pre_table(self, spec, pop_index, machine_vertex):
+ def __write_post_to_pre_table(
+ self, spec, pop_index, app_vertex, vertex_slice):
""" Post to pre table is basically the transpose of the synaptic\
matrix.
:param ~data_specification.DataSpecificationGenerator spec:
:param dict(tuple(AbstractPopulationVertex,SynapseInformation),int) \
pop_index:
- :param ~pacman.model.graphs.machine.MachineVertex machine_vertex:
+ :param ~pacman.model.graphs.application.ApplicationVertex app_vertex:
the vertex for which data specs are being prepared
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ The target slice
"""
# pylint: disable=unsubscriptable-object
# Get connections for this post slice
- post_slice = machine_vertex.vertex_slice
- slice_conns = self.connections[
- machine_vertex.app_vertex, post_slice.lo_atom]
+ slice_conns = self.connections[app_vertex, vertex_slice.lo_atom]
# Make a single large array of connections
connections = numpy.concatenate(
[conn for (conn, _, _, _) in slice_conns])
@@ -331,7 +305,7 @@ def __write_post_to_pre_table(self, spec, pop_index, machine_vertex):
[m_edge.pre_vertex.vertex_slice.lo_atom
for (_, _, m_edge, _) in slice_conns], conn_lens)
connections["source"] = connections["source"] - lo_atoms
- connections["target"] = connections["target"] - post_slice.lo_atom
+ connections["target"] = connections["target"] - vertex_slice.lo_atom
# Make an array of all data required
conn_data = numpy.dstack(
@@ -339,7 +313,7 @@ def __write_post_to_pre_table(self, spec, pop_index, machine_vertex):
# Break data into rows based on target and strip target out
rows = [conn_data[connections["target"] == i]
- for i in range(0, post_slice.n_atoms)]
+ for i in range(0, vertex_slice.n_atoms)]
if any(len(row) > self.s_max for row in rows):
raise Exception("Too many initial connections per incoming neuron")
@@ -352,50 +326,42 @@ def __write_post_to_pre_table(self, spec, pop_index, machine_vertex):
# Finally make the table and write it out
post_to_pre = numpy.core.records.fromarrays(
numpy.concatenate(padded_rows).T, formats="u1, u1, u2").view("u4")
+ if len(post_to_pre) != vertex_slice.n_atoms * self.s_max:
+ raise Exception(
+ "Wrong size of pre-to-pop tables: {} Found, {} Expected"
+ .format(len(post_to_pre), vertex_slice.n_atoms * self.s_max))
+ spec.comment("Writing post-to-pre table of {} words".format(
+ vertex_slice.n_atoms * self.s_max))
spec.write_array(post_to_pre)
@overrides(AbstractSynapseDynamicsStructural.
get_structural_parameters_sdram_usage_in_bytes)
def get_structural_parameters_sdram_usage_in_bytes(
- self, graph, vertex, n_neurons):
+ self, incoming_projections, n_neurons):
# Work out how many sub-edges we will end up with, as this is used
# for key_atom_info
- n_sub_edges = 0
- if (isinstance(graph, ApplicationGraph) and
- isinstance(vertex, ApplicationVertex)):
- structural_edges = self.__get_structural_edges_by_app(
- graph, vertex)
- machine_edges_by_app = None
- elif (isinstance(graph, MachineGraph) and
- isinstance(vertex, MachineVertex)):
- structural_edges, machine_edges_by_app = \
- self.__get_structural_edges_by_machine(graph, vertex)
- else:
- raise PacmanInvalidParameterException(
- "vertex", vertex, "Not at the same level as graph")
- # Also keep track of the parameter sizes
-
# pylint: disable=no-member
param_sizes = (
self.partner_selection.get_parameters_sdram_usage_in_bytes())
- for (app_edge, synapse_info) in structural_edges.items():
- if machine_edges_by_app:
- n_sub_edges += len(machine_edges_by_app[app_edge])
- else:
- slices, _ = (
- app_edge.pre_vertex.splitter.get_out_going_slices())
- n_sub_edges = len(slices)
- dynamics = synapse_info.synapse_dynamics
+ n_sub_edges = 0
+ structural_projections = self.__get_structural_projections(
+ incoming_projections)
+ for proj in structural_projections:
+ dynamics = proj._synapse_information.synapse_dynamics
+ app_edge = proj._projection_edge
+ n_sub_edges += len(
+ app_edge.pre_vertex.splitter.get_out_going_slices()[0])
param_sizes += dynamics.formation\
.get_parameters_sdram_usage_in_bytes()
param_sizes += dynamics.elimination\
.get_parameters_sdram_usage_in_bytes()
- return int((self._REWIRING_DATA_SIZE +
- (self._PRE_POP_INFO_BASE_SIZE * len(structural_edges)) +
- (self._KEY_ATOM_INFO_SIZE * n_sub_edges) +
- (self._POST_TO_PRE_ENTRY_SIZE * n_neurons * self.s_max) +
- param_sizes))
+ return int(
+ self._REWIRING_DATA_SIZE +
+ (self._PRE_POP_INFO_BASE_SIZE * len(structural_projections)) +
+ (self._KEY_ATOM_INFO_SIZE * n_sub_edges) +
+ (self._POST_TO_PRE_ENTRY_SIZE * n_neurons * self.s_max) +
+ param_sizes)
def get_vertex_executable_suffix(self):
"""
diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_static.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_static.py
index deac883c1b..45507bc475 100644
--- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_static.py
+++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_static.py
@@ -267,6 +267,20 @@ def get_weight_maximum(self, connector, synapse_info):
w_m = super().get_weight_maximum(connector, synapse_info)
return max(w_m, self.__initial_weight)
+ @overrides(SynapseDynamicsStatic.get_delay_maximum)
+ def get_delay_maximum(self, connector, synapse_info):
+ d_m = super().get_delay_maximum(connector, synapse_info)
+ return max(d_m, self.__initial_delay)
+
+ @overrides(SynapseDynamicsStatic.get_delay_minimum)
+ def get_delay_minimum(self, connector, synapse_info):
+ d_m = super().get_delay_minimum(connector, synapse_info)
+ return min(d_m, self.__initial_delay)
+
+ @overrides(SynapseDynamicsStatic.get_delay_variance)
+ def get_delay_variance(self, connector, delays, synapse_info):
+ return 0.0
+
@overrides(_Common.get_seeds)
def get_seeds(self, app_vertex=None):
if app_vertex:
diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_stdp.py
index 3879ecf211..814e96b5b9 100644
--- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_stdp.py
+++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_stdp.py
@@ -267,6 +267,20 @@ def get_weight_maximum(self, connector, synapse_info):
w_max = super().get_weight_maximum(connector, synapse_info)
return max(w_max, self.__initial_weight)
+ @overrides(SynapseDynamicsSTDP.get_delay_maximum)
+ def get_delay_maximum(self, connector, synapse_info):
+ d_m = super().get_delay_maximum(connector, synapse_info)
+ return max(d_m, self.__initial_delay)
+
+ @overrides(SynapseDynamicsSTDP.get_delay_minimum)
+ def get_delay_minimum(self, connector, synapse_info):
+ d_m = super().get_delay_minimum(connector, synapse_info)
+ return min(d_m, self.__initial_delay)
+
+ @overrides(SynapseDynamicsSTDP.get_delay_variance)
+ def get_delay_variance(self, connector, delays, synapse_info):
+ return 0.0
+
@overrides(SynapseDynamicsStructuralCommon.get_seeds)
def get_seeds(self, app_vertex=None):
if app_vertex:
diff --git a/spynnaker/pyNN/models/neuron/synapse_io.py b/spynnaker/pyNN/models/neuron/synapse_io.py
index 7a515d95f0..69480fae7b 100644
--- a/spynnaker/pyNN/models/neuron/synapse_io.py
+++ b/spynnaker/pyNN/models/neuron/synapse_io.py
@@ -13,7 +13,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-import math
import numpy
from spinn_front_end_common.utilities.constants import BYTES_PER_WORD
@@ -23,8 +22,8 @@
AbstractConnector)
from spynnaker.pyNN.exceptions import SynapseRowTooBigException
from spynnaker.pyNN.models.neuron.synapse_dynamics import (
- AbstractStaticSynapseDynamics, AbstractSynapseDynamicsStructural,
- AbstractSynapseDynamics)
+ AbstractStaticSynapseDynamics, AbstractSynapseDynamics)
+from .master_pop_table import MasterPopTableAsBinarySearch
_N_HEADER_WORDS = 3
# There are 16 slots, one per time step
@@ -91,7 +90,7 @@ def delayed_max_n_synapses(self):
@property
def undelayed_max_bytes(self):
- """ Maximum number of bytes, including headers, in a row of the\
+ """ Maximum number of bytes, including headers, in a row of the
undelayed matrix
:rtype: int
@@ -100,7 +99,7 @@ def undelayed_max_bytes(self):
@property
def delayed_max_bytes(self):
- """ Maximum number of bytes, including headers, in a row of the\
+ """ Maximum number of bytes, including headers, in a row of the
delayed matrix
:rtype: int
@@ -109,7 +108,7 @@ def delayed_max_bytes(self):
@property
def undelayed_max_words(self):
- """ Maximum number of words, excluding headers, in a row of the\
+ """ Maximum number of words, excluding headers, in a row of the
undelayed matrix
:rtype: int
@@ -118,7 +117,7 @@ def undelayed_max_words(self):
@property
def delayed_max_words(self):
- """ Maximum number of words, excluding headers, in a row of the\
+ """ Maximum number of words, excluding headers, in a row of the
undelayed matrix
:rtype: int
@@ -126,641 +125,596 @@ def delayed_max_words(self):
return self.__delayed_max_words
-class SynapseIORowBased(object):
- """ A SynapseRowIO implementation that uses a row for each source neuron,\
- where each row consists of a fixed region, a plastic region, and a\
- fixed-plastic region (this is the bits of the plastic row that don't\
- actually change). The plastic region structure is determined by the\
- synapse dynamics of the connector.
- """
- __slots__ = []
-
- def get_maximum_delay_supported_in_ms(
- self, post_vertex_max_delay_ticks):
- """ Get the maximum delay supported by the synapse representation \
- before extensions are required, or None if any delay is supported
+def get_maximum_delay_supported_in_ms(post_vertex_max_delay_ticks):
+ """ Get the maximum delay supported by the synapse representation \
+ before extensions are required, or None if any delay is supported
- :param int post_vertex_max_delay_ticks: post vertex max delay
- :rtype: int
- """
- return post_vertex_max_delay_ticks * machine_time_step_ms()
-
- @staticmethod
- def _n_words(n_bytes):
- """ Get the number of words in a given number of bytes
-
- :param int n_bytes: The number of bytes
- :rtype: int
- """
- return math.ceil(float(n_bytes) / BYTES_PER_WORD)
-
- @staticmethod
- def _get_allowed_row_length(
- n_words, dynamics, population_table, in_edge, n_synapses):
- """ Get the allowed row length in words in the population table for a\
- desired row length in words
-
- :param int n_words: The number of words in the row
- :param AbstractSynapseDynamics dynamics: The synapse dynamics used
- :param MasterPopTableAsBinarySearch population_table:
- The population table that holds the row lengths
- :param ProjectionApplicationEdge in_edge: The incoming edge
- :param int n_synapses: The number of synapses for the number of words
- :raises SynapseRowTooBigException:
- If the given row is too big. The exception will detail the
- """
- if n_words == 0:
- return 0
- try:
- return population_table.get_allowed_row_length(n_words)
- except SynapseRowTooBigException as e:
- # Find the number of synapses available for the maximum population
- # table size, as extracted from the exception
- max_synapses = dynamics.get_max_synapses(e.max_size)
- raise SynapseRowTooBigException(
- max_synapses,
- "The connection between {} and {} has more synapses ({}) than"
- " can currently be supported on this implementation of PyNN"
- " ({} for this connection type)."
- " Please reduce the size of the target population, or reduce"
- " the number of neurons per core.".format(
- in_edge.pre_vertex, in_edge.post_vertex, n_synapses,
- max_synapses)) from e
-
- def get_max_row_info(
- self, synapse_info, post_vertex_slice, n_delay_stages,
- population_table, in_edge):
- """ Get the information about the maximum lengths of delayed and\
- undelayed rows in bytes (including header), words (without header)\
- and number of synapses
-
- :param SynapseInformation synapse_info:
- The synapse information to get the row data for
- :param ~pacman.model.graphs.common.Slice post_vertex_slice:
- The slice of the machine vertex being represented
- :param int n_delay_stages:
- The number of delay stages on the edge
- :param MasterPopTableAsBinarySearch population_table:
- The population table to be used
- :param ProjectionApplicationEdge in_edge:
- The incoming edge on which the synapse information is held
- :rtype: MaxRowInfo
- :raises SynapseRowTooBigException:
- If the synapse information can't be represented
- """
- max_delay_supported = self.get_maximum_delay_supported_in_ms(
- in_edge.post_vertex.splitter.max_support_delay())
- max_delay = max_delay_supported * (n_delay_stages + 1)
- pad_to_length = synapse_info.synapse_dynamics.pad_to_length
-
- # delay point where delay extensions start
- min_delay_for_delay_extension = (
- max_delay_supported + numpy.finfo(numpy.double).tiny)
-
- # row length for the non-delayed synaptic matrix
- max_undelayed_n_synapses = synapse_info.connector \
+ :param int post_vertex_max_delay_ticks: post vertex max delay
+ :rtype: int
+ """
+ return post_vertex_max_delay_ticks * machine_time_step_ms()
+
+
+def get_max_row_info(
+ synapse_info, post_vertex_slice, n_delay_stages, in_edge):
+ """ Get the information about the maximum lengths of delayed and\
+ undelayed rows in bytes (including header), words (without header)\
+ and number of synapses
+
+ :param SynapseInformation synapse_info:
+ The synapse information to get the row data for
+ :param ~pacman.model.graphs.common.Slice post_vertex_slice:
+ The slice of the machine vertex being represented
+ :param int n_delay_stages:
+ The number of delay stages on the edge
+ :param ProjectionApplicationEdge in_edge:
+ The incoming edge on which the synapse information is held
+ :rtype: MaxRowInfo
+ :raises SynapseRowTooBigException:
+ If the synapse information can't be represented
+ """
+ max_delay_supported = get_maximum_delay_supported_in_ms(
+ in_edge.post_vertex.splitter.max_support_delay())
+ max_delay = max_delay_supported * (n_delay_stages + 1)
+ pad_to_length = synapse_info.synapse_dynamics.pad_to_length
+
+ # delay point where delay extensions start
+ min_delay_for_delay_extension = (
+ max_delay_supported + numpy.finfo(numpy.double).tiny)
+
+ # row length for the non-delayed synaptic matrix
+ max_undelayed_n_synapses = synapse_info.connector \
+ .get_n_connections_from_pre_vertex_maximum(
+ post_vertex_slice, synapse_info, 0, max_delay_supported)
+ if pad_to_length is not None:
+ max_undelayed_n_synapses = max(
+ pad_to_length, max_undelayed_n_synapses)
+
+ # determine the max row length in the delay extension
+ max_delayed_n_synapses = 0
+ if n_delay_stages > 0:
+ max_delayed_n_synapses = synapse_info.connector \
.get_n_connections_from_pre_vertex_maximum(
- post_vertex_slice, synapse_info, 0, max_delay_supported)
+ post_vertex_slice, synapse_info,
+ min_delay_for_delay_extension, max_delay)
if pad_to_length is not None:
- max_undelayed_n_synapses = max(
- pad_to_length, max_undelayed_n_synapses)
-
- # determine the max row length in the delay extension
- max_delayed_n_synapses = 0
- if n_delay_stages > 0:
- max_delayed_n_synapses = synapse_info.connector \
- .get_n_connections_from_pre_vertex_maximum(
- post_vertex_slice, synapse_info,
- min_delay_for_delay_extension, max_delay)
- if pad_to_length is not None:
- max_delayed_n_synapses = max(
- pad_to_length, max_delayed_n_synapses)
-
- # Get the row sizes
- dynamics = synapse_info.synapse_dynamics
- if isinstance(dynamics, AbstractStaticSynapseDynamics):
- undelayed_n_words = dynamics.get_n_words_for_static_connections(
- max_undelayed_n_synapses)
- delayed_n_words = dynamics.get_n_words_for_static_connections(
- max_delayed_n_synapses)
- else:
- undelayed_n_words = dynamics.get_n_words_for_plastic_connections(
- max_undelayed_n_synapses)
- delayed_n_words = dynamics.get_n_words_for_plastic_connections(
- max_delayed_n_synapses)
-
- # Adjust for the allowed row lengths from the population table
- undelayed_max_n_words = self._get_allowed_row_length(
- undelayed_n_words, dynamics, population_table, in_edge,
+ max_delayed_n_synapses = max(
+ pad_to_length, max_delayed_n_synapses)
+
+ # Get the row sizes
+ dynamics = synapse_info.synapse_dynamics
+ if isinstance(dynamics, AbstractStaticSynapseDynamics):
+ undelayed_n_words = dynamics.get_n_words_for_static_connections(
max_undelayed_n_synapses)
- delayed_max_n_words = self._get_allowed_row_length(
- delayed_n_words, dynamics, population_table, in_edge,
+ delayed_n_words = dynamics.get_n_words_for_static_connections(
+ max_delayed_n_synapses)
+ else:
+ undelayed_n_words = dynamics.get_n_words_for_plastic_connections(
+ max_undelayed_n_synapses)
+ delayed_n_words = dynamics.get_n_words_for_plastic_connections(
max_delayed_n_synapses)
- undelayed_max_bytes = 0
- if undelayed_max_n_words > 0:
- undelayed_max_bytes = (
- undelayed_max_n_words + _N_HEADER_WORDS) * BYTES_PER_WORD
- delayed_max_bytes = 0
- if delayed_max_n_words > 0:
- delayed_max_bytes = (
- delayed_max_n_words + _N_HEADER_WORDS) * BYTES_PER_WORD
-
- return MaxRowInfo(
- max_undelayed_n_synapses, max_delayed_n_synapses,
- undelayed_max_bytes, delayed_max_bytes,
- undelayed_max_n_words, delayed_max_n_words)
-
- @staticmethod
- def _get_row_data(
+ # Adjust for the allowed row lengths from the population table
+ undelayed_max_n_words = _get_allowed_row_length(
+ undelayed_n_words, dynamics, in_edge, max_undelayed_n_synapses)
+ delayed_max_n_words = _get_allowed_row_length(
+ delayed_n_words, dynamics, in_edge, max_delayed_n_synapses)
+
+ undelayed_max_bytes = 0
+ if undelayed_max_n_words > 0:
+ undelayed_max_bytes = (
+ undelayed_max_n_words + _N_HEADER_WORDS) * BYTES_PER_WORD
+ delayed_max_bytes = 0
+ if delayed_max_n_words > 0:
+ delayed_max_bytes = (
+ delayed_max_n_words + _N_HEADER_WORDS) * BYTES_PER_WORD
+
+ return MaxRowInfo(
+ max_undelayed_n_synapses, max_delayed_n_synapses,
+ undelayed_max_bytes, delayed_max_bytes,
+ undelayed_max_n_words, delayed_max_n_words)
+
+
+def _get_allowed_row_length(n_words, dynamics, in_edge, n_synapses):
+ """ Get the allowed row length in words in the population table for a
+ desired row length in words
+
+ :param int n_words: The number of words in the row
+ :param AbstractSynapseDynamics dynamics: The synapse dynamics used
+ :param ProjectionApplicationEdge in_edge: The incoming edge
+ :param int n_synapses: The number of synapses for the number of words
+ :raises SynapseRowTooBigException:
+ If the given row is too big; the exception will detail the maximum
+ number of synapses that are supported.
+ """
+ if n_words == 0:
+ return 0
+ try:
+ return MasterPopTableAsBinarySearch.get_allowed_row_length(n_words)
+ except SynapseRowTooBigException as e:
+ # Find the number of synapses available for the maximum population
+ # table size, as extracted from the exception
+ max_synapses = dynamics.get_max_synapses(e.max_size)
+ raise SynapseRowTooBigException(
+ max_synapses,
+ "The connection between {} and {} has more synapses ({}) than"
+ " can currently be supported on this implementation of PyNN"
+ " ({} for this connection type)."
+ " Please reduce the size of the target population, or reduce"
+ " the number of neurons per core.".format(
+ in_edge.pre_vertex, in_edge.post_vertex, n_synapses,
+ max_synapses)) from e
+
+
+def get_synapses(
+ connections, synapse_info, n_delay_stages, n_synapse_types,
+ weight_scales, app_edge, pre_vertex_slice, post_vertex_slice,
+ max_row_info, gen_undelayed, gen_delayed):
+ """ Get the synapses as an array of words for non-delayed synapses and\
+ an array of words for delayed synapses. This is used to prepare\
+ information for *deployment to SpiNNaker*.
+
+ :param ~numpy.ndarray connections:
+ The connections to get the synapses from
+ :param SynapseInformation synapse_info:
+ The synapse information to convert to synapses
+ :param int n_delay_stages:
+ The number of delay stages in total to be represented
+ :param int n_synapse_types:
+ The number of synapse types in total to be represented
+ :param list(float) weight_scales:
+ The scaling of the weights for each synapse type
+ :param ~pacman.model.graphs.appplication.ApplicationEdge app_edge:
+ The incoming machine edge that the synapses are on
+ :param ~pacman.model.graphs.common.Slice pre_vertex_slice:
+ The slice of the pre-vertex to get the synapses for
+ :param ~pacman.model.graphs.common.Slice post_vertex_slice:
+ The slice of the post-vertex to get the synapses for
+ :param MaxRowInfo max_row_info:
+ The maximum row information for the synapses
+ :param bool gen_undelayed:
+ Whether to generate undelayed data
+ :param bool gen_delayed:
+ Whether to generate delayed data
+ :return:
+ (``row_data``, ``delayed_row_data``, ``delayed_source_ids``,
+ ``stages``) where:
+
+ * ``row_data`` is the undelayed connectivity data arranged into a
+ row per source, each row the same length
+ * ``delayed_row_data`` is the delayed connectivity data arranged
+ into a row per source per delay stage, each row the same length
+ * ``delayed_source_ids`` is the machine-vertex-local source neuron
+ id of each connection of the delayed vertices
+ * ``stages`` is the delay stage of each delayed connection
+ :rtype:
+ tuple(~numpy.ndarray, ~numpy.ndarray, ~numpy.ndarray,
+ ~numpy.ndarray)
+ """
+ # pylint: disable=too-many-arguments, too-many-locals
+ # pylint: disable=assignment-from-no-return
+ # Get delays in timesteps
+ max_delay = app_edge.post_vertex.splitter.max_support_delay()
+
+ # Convert delays to timesteps
+ connections["delay"] = numpy.rint(
+ connections["delay"] * machine_time_step_per_ms())
+
+ # Scale weights
+ connections["weight"] = (connections["weight"] * weight_scales[
+ synapse_info.synapse_type])
+
+ # Split the connections up based on the delays
+ if max_delay is not None:
+ plastic_delay_mask = (connections["delay"] <= max_delay)
+ undelayed_connections = connections[
+ numpy.where(plastic_delay_mask)]
+ delayed_connections = connections[
+ numpy.where(~plastic_delay_mask)]
+ else:
+ undelayed_connections = connections
+ delayed_connections = numpy.zeros(
+ 0, dtype=AbstractConnector.NUMPY_SYNAPSES_DTYPE)
+
+ # Get the data for the connections
+ row_data = numpy.zeros(0, dtype="uint32")
+ if gen_undelayed and max_row_info.undelayed_max_n_synapses:
+ # Get which row each connection will go into
+ undelayed_row_indices = (
+ undelayed_connections["source"] - pre_vertex_slice.lo_atom)
+ row_data = _get_row_data(
+ undelayed_connections, undelayed_row_indices,
+ pre_vertex_slice.n_atoms, post_vertex_slice, n_synapse_types,
+ synapse_info.synapse_dynamics,
+ max_row_info.undelayed_max_n_synapses,
+ max_row_info.undelayed_max_words)
+
+ del undelayed_row_indices
+ del undelayed_connections
+
+ # Get the data for the delayed connections
+ delayed_row_data = numpy.zeros(0, dtype="uint32")
+ stages = numpy.zeros(0, dtype="uint32")
+ delayed_source_ids = numpy.zeros(0, dtype="uint32")
+ if gen_delayed and max_row_info.delayed_max_n_synapses:
+ # Get the delay stages and which row each delayed connection will
+ # go into
+ stages = numpy.floor((numpy.round(
+ delayed_connections["delay"] - 1.0)) / max_delay).astype(
+ "uint32")
+ delayed_row_indices = (
+ (delayed_connections[
+ "source"] - pre_vertex_slice.lo_atom) +
+ ((stages - 1) * pre_vertex_slice.n_atoms))
+ delayed_connections["delay"] -= max_delay * stages
+ delayed_source_ids = (
+ delayed_connections["source"] - pre_vertex_slice.lo_atom)
+
+ # Get the data
+ delayed_row_data = _get_row_data(
+ delayed_connections, delayed_row_indices,
+ pre_vertex_slice.n_atoms * n_delay_stages, post_vertex_slice,
+ n_synapse_types, synapse_info.synapse_dynamics,
+ max_row_info.delayed_max_n_synapses,
+ max_row_info.delayed_max_words)
+ del delayed_row_indices
+ del delayed_connections
+
+ return row_data, delayed_row_data, delayed_source_ids, stages
+
+
+def _get_row_data(
+ connections, row_indices, n_rows, post_vertex_slice,
+ n_synapse_types, synapse_dynamics, max_row_n_synapses,
+ max_row_n_words):
+ """
+ :param ~numpy.ndarray connections:
+ The connections to convert; the dtype is
+ AbstractConnector.NUMPY_SYNAPSES_DTYPE
+ :param ~numpy.ndarray row_indices:
+ The row into which each connection should go; same length as
+ connections
+ :param int n_rows: The total number of rows
+ :param ~pacman.model.graphs.common.Slice post_vertex_slice:
+ The slice of the post vertex to get the data for
+ :param int n_synapse_types: The number of synapse types allowed
+ :param AbstractSynapseDynamics synapse_dynamics:
+ The synapse dynamics of the synapses
+ :param int max_row_n_synapses: The maximum number of synapses in a row
+ :param int max_row_n_words: The maximum number of words in a row
+ :rtype: tuple(int, ~numpy.ndarray)
+ """
+ # pylint: disable=too-many-arguments, too-many-locals
+ row_ids = range(n_rows)
+ ff_data, ff_size = None, None
+ fp_data, pp_data, fp_size, pp_size = None, None, None, None
+ if isinstance(synapse_dynamics, AbstractStaticSynapseDynamics):
+
+ # Get the static data
+ ff_data, ff_size = synapse_dynamics.get_static_synaptic_data(
connections, row_indices, n_rows, post_vertex_slice,
- n_synapse_types, synapse_dynamics, max_row_n_synapses,
- max_row_n_words):
- """
- :param ~numpy.ndarray connections:
- The connections to convert; the dtype is
- AbstractConnector.NUMPY_SYNAPSES_DTYPE
- :param ~numpy.ndarray row_indices:
- The row into which each connection should go; same length as
- connections
- :param int n_rows: The total number of rows
- :param ~pacman.model.graphs.common.Slice post_vertex_slice:
- The slice of the post vertex to get the data for
- :param int n_synapse_types: The number of synapse types allowed
- :param AbstractSynapseDynamics synapse_dynamics:
- The synapse dynamics of the synapses
- :param int max_row_n_synapses: The maximum number of synapses in a row
- :param int max_row_n_words: The maximum number of words in a row
- :rtype: tuple(int, ~numpy.ndarray)
- """
- # pylint: disable=too-many-arguments, too-many-locals
- row_ids = range(n_rows)
- ff_data, ff_size = None, None
- fp_data, pp_data, fp_size, pp_size = None, None, None, None
- if isinstance(synapse_dynamics, AbstractStaticSynapseDynamics):
-
- # Get the static data
- ff_data, ff_size = synapse_dynamics.get_static_synaptic_data(
+ n_synapse_types, max_row_n_synapses)
+
+ # Blank the plastic data
+ fp_data = [numpy.zeros(0, dtype="uint32") for _ in range(n_rows)]
+ pp_data = [numpy.zeros(0, dtype="uint32") for _ in range(n_rows)]
+ fp_size = [numpy.zeros(1, dtype="uint32") for _ in range(n_rows)]
+ pp_size = [numpy.zeros(1, dtype="uint32") for _ in range(n_rows)]
+ else:
+
+ # Blank the static data
+ ff_data = [numpy.zeros(0, dtype="uint32") for _ in row_ids]
+ ff_size = [numpy.zeros(1, dtype="uint32") for _ in row_ids]
+
+ # Get the plastic data
+ fp_data, pp_data, fp_size, pp_size = \
+ synapse_dynamics.get_plastic_synaptic_data(
connections, row_indices, n_rows, post_vertex_slice,
n_synapse_types, max_row_n_synapses)
- # Blank the plastic data
- fp_data = [numpy.zeros(0, dtype="uint32") for _ in range(n_rows)]
- pp_data = [numpy.zeros(0, dtype="uint32") for _ in range(n_rows)]
- fp_size = [numpy.zeros(1, dtype="uint32") for _ in range(n_rows)]
- pp_size = [numpy.zeros(1, dtype="uint32") for _ in range(n_rows)]
- else:
-
- # Blank the static data
- ff_data = [numpy.zeros(0, dtype="uint32") for _ in row_ids]
- ff_size = [numpy.zeros(1, dtype="uint32") for _ in row_ids]
-
- # Get the plastic data
- fp_data, pp_data, fp_size, pp_size = \
- synapse_dynamics.get_plastic_synaptic_data(
- connections, row_indices, n_rows, post_vertex_slice,
- n_synapse_types, max_row_n_synapses)
-
- # Add some padding
- row_lengths = [
- pp_data[i].size + fp_data[i].size + ff_data[i].size
- for i in row_ids]
- padding = [
- numpy.zeros(max_row_n_words - row_length, dtype="uint32")
- for row_length in row_lengths]
-
- # Join the bits into rows
- items_to_join = [
- pp_size, pp_data, ff_size, fp_size, ff_data, fp_data, padding]
- rows = [numpy.concatenate(items) for items in zip(*items_to_join)]
- row_data = numpy.concatenate(rows)
-
- # Return the data
- return row_data
-
- def get_synapses(
- self, synapse_info, n_delay_stages, n_synapse_types, weight_scales,
- machine_edge, max_row_info, gen_undelayed, gen_delayed, app_edge):
- """ Get the synapses as an array of words for non-delayed synapses and\
- an array of words for delayed synapses. This is used to prepare\
- information for *deployment to SpiNNaker*.
-
- :param SynapseInformation synapse_info:
- The synapse information to convert to synapses
- :param int n_delay_stages:
- The number of delay stages in total to be represented
- :param int n_synapse_types:
- The number of synapse types in total to be represented
- :param list(float) weight_scales:
- The scaling of the weights for each synapse type
- :param ~pacman.model.graphs.machine.MachineEdge machine_edge:
- The incoming machine edge that the synapses are on
- :param ProjectionApplicationEdge app_edge:
- :param MaxRowInfo max_row_info:
- The maximum row information for the synapses
- :param bool gen_undelayed:
- Whether to generate undelayed data
- :param bool gen_delayed:
- Whether to generate delayed data
- :return:
- (``row_data``, ``delayed_row_data``, ``delayed_source_ids``,
- ``stages``) where:
-
- * ``row_data`` is the undelayed connectivity data arranged into a
- row per source, each row the same length
- * ``delayed_row_data`` is the delayed connectivity data arranged
- into a row per source per delay stage, each row the same length
- * ``delayed_source_ids`` is the machine-vertex-local source neuron
- id of each connection of the delayed vertices
- * ``stages`` is the delay stage of each delayed connection
- :rtype:
- tuple(~numpy.ndarray, ~numpy.ndarray, ~numpy.ndarray,
- ~numpy.ndarray)
- """
- # pylint: disable=too-many-arguments, too-many-locals
- # pylint: disable=assignment-from-no-return
- # Get delays in timesteps
- max_delay = self.get_maximum_delay_supported_in_ms(
- app_edge.post_vertex.splitter.max_support_delay())
- if max_delay is not None:
- max_delay *= machine_time_step_per_ms()
-
- # Get the actual connections
- app_edge = machine_edge.app_edge
- pre_slices = app_edge.pre_vertex.vertex_slices
- post_slices = app_edge.post_vertex.vertex_slices
- pre_vertex_slice = machine_edge.pre_vertex.vertex_slice
- post_vertex_slice = machine_edge.post_vertex.vertex_slice
- connections = synapse_info.connector.create_synaptic_block(
- pre_slices, post_slices, pre_vertex_slice, post_vertex_slice,
- synapse_info.synapse_type, synapse_info)
-
- # Convert delays to timesteps
- connections["delay"] = numpy.rint(
- connections["delay"] * machine_time_step_per_ms())
-
- # Scale weights
- connections["weight"] = (connections["weight"] * weight_scales[
- synapse_info.synapse_type])
-
- # Set connections for structural plasticity
- if isinstance(synapse_info.synapse_dynamics,
- AbstractSynapseDynamicsStructural):
- synapse_info.synapse_dynamics.set_connections(
- connections, post_vertex_slice, app_edge, synapse_info,
- machine_edge)
-
- # Split the connections up based on the delays
- if max_delay is not None:
- plastic_delay_mask = (connections["delay"] <= max_delay)
- undelayed_connections = connections[
- numpy.where(plastic_delay_mask)]
- delayed_connections = connections[
- numpy.where(~plastic_delay_mask)]
- else:
- undelayed_connections = connections
- delayed_connections = numpy.zeros(
- 0, dtype=AbstractConnector.NUMPY_SYNAPSES_DTYPE)
- del connections
-
- # Get the data for the connections
- row_data = numpy.zeros(0, dtype="uint32")
- if gen_undelayed and max_row_info.undelayed_max_n_synapses:
- # Get which row each connection will go into
- undelayed_row_indices = (
- undelayed_connections["source"] - pre_vertex_slice.lo_atom)
- row_data = self._get_row_data(
- undelayed_connections, undelayed_row_indices,
- pre_vertex_slice.n_atoms, post_vertex_slice, n_synapse_types,
- synapse_info.synapse_dynamics,
- max_row_info.undelayed_max_n_synapses,
- max_row_info.undelayed_max_words)
-
- del undelayed_row_indices
- del undelayed_connections
-
- # Get the data for the delayed connections
- delayed_row_data = numpy.zeros(0, dtype="uint32")
- stages = numpy.zeros(0, dtype="uint32")
- delayed_source_ids = numpy.zeros(0, dtype="uint32")
- if gen_delayed and max_row_info.delayed_max_n_synapses:
- # Get the delay stages and which row each delayed connection will
- # go into
- stages = numpy.floor((numpy.round(
- delayed_connections["delay"] - 1.0)) / max_delay).astype(
- "uint32")
- delayed_row_indices = (
- (delayed_connections[
- "source"] - pre_vertex_slice.lo_atom) +
- ((stages - 1) * pre_vertex_slice.n_atoms))
- delayed_connections["delay"] -= max_delay * stages
- delayed_source_ids = (
- delayed_connections["source"] - pre_vertex_slice.lo_atom)
-
- # Get the data
- delayed_row_data = self._get_row_data(
- delayed_connections, delayed_row_indices,
- pre_vertex_slice.n_atoms * n_delay_stages, post_vertex_slice,
- n_synapse_types, synapse_info.synapse_dynamics,
- max_row_info.delayed_max_n_synapses,
- max_row_info.delayed_max_words)
- del delayed_row_indices
- del delayed_connections
-
- return row_data, delayed_row_data, delayed_source_ids, stages
-
- @staticmethod
- def _rescale_connections(
- connections, weight_scales, synapse_info):
- """ Scale the connection data into machine values
-
- :param ~numpy.ndarray connections: The connections to be rescaled
- :param list(float) weight_scales: The weight scale of each synapse type
- :param SynapseInformation synapse_info:
- The synapse information of the connections
- """
- # Return the delays values to milliseconds
- connections["delay"] /= machine_time_step_per_ms()
- # Undo the weight scaling
- connections["weight"] /= weight_scales[synapse_info.synapse_type]
- return connections
-
- def convert_to_connections(
- self, synapse_info, pre_vertex_slice, post_vertex_slice,
- max_row_length, n_synapse_types, weight_scales, data,
- delayed, post_vertex_max_delay_ticks):
- """ Read the synapses for a given projection synapse information\
- object out of the given data and convert to connection data
-
- :param SynapseInformation synapse_info:
- The synapse information of the synapses
- :param ~pacman.model.graphs.common.Slice pre_vertex_slice:
- The slice of the source neurons of the synapses in the data
- :param ~pacman.model.graphs.common.Slice post_vertex_slice:
- The slice of the target neurons of the synapses in the data
- :param int max_row_length:
- The length of each row in the data
- :param int n_synapse_types:
- The number of synapse types in total
- :param list(float) weight_scales:
- The weight scaling of each synapse type
- :param bytearray data:
- The raw data containing the synapses
- :param bool delayed: True if the data should be considered delayed
- :param int post_vertex_max_delay_ticks:
+ # Add some padding
+ row_lengths = [
+ pp_data[i].size + fp_data[i].size + ff_data[i].size
+ for i in row_ids]
+ padding = [
+ numpy.zeros(max_row_n_words - row_length, dtype="uint32")
+ for row_length in row_lengths]
+
+ # Join the bits into rows
+ items_to_join = [
+ pp_size, pp_data, ff_size, fp_size, ff_data, fp_data, padding]
+ rows = [numpy.concatenate(items) for items in zip(*items_to_join)]
+ row_data = numpy.concatenate(rows)
+
+ # Return the data
+ return row_data
+
+
+def convert_to_connections(
+ synapse_info, pre_vertex_slice, post_vertex_slice,
+ max_row_length, n_synapse_types, weight_scales, data,
+ delayed, post_vertex_max_delay_ticks):
+ """ Read the synapses for a given projection synapse information\
+ object out of the given data and convert to connection data
+
+ :param SynapseInformation synapse_info:
+ The synapse information of the synapses
+ :param ~pacman.model.graphs.common.Slice pre_vertex_slice:
+ The slice of the source neurons of the synapses in the data
+ :param ~pacman.model.graphs.common.Slice post_vertex_slice:
+ The slice of the target neurons of the synapses in the data
+ :param int max_row_length:
+ The length of each row in the data
+ :param int n_synapse_types:
+ The number of synapse types in total
+ :param list(float) weight_scales:
+ The weight scaling of each synapse type
+ :param bytearray data:
+ The raw data containing the synapses
+ :param bool delayed: True if the data should be considered delayed
+ :param int post_vertex_max_delay_ticks:
max delayed ticks supported from post vertex
- :return: The connections read from the data; the dtype is
- AbstractSynapseDynamics.NUMPY_CONNECTORS_DTYPE
- :rtype: ~numpy.ndarray
- """
- # If there is no data, return nothing
- if data is None or not len(data):
- return numpy.zeros(
- 0, dtype=AbstractSynapseDynamics.NUMPY_CONNECTORS_DTYPE)
-
- # Translate the data into rows
- row_data = numpy.frombuffer(data, dtype=".
-
-import math
-import numpy
-from scipy import special # @UnresolvedImport
-
-from spinn_utilities.progress_bar import ProgressBar
-from data_specification.enums import DataType
-from spinn_utilities.config_holder import (
- get_config_float, get_config_int, get_config_bool)
-from spinn_front_end_common.utilities.constants import (
- BYTES_PER_WORD, MICRO_TO_SECOND_CONVERSION)
-from spinn_front_end_common.utilities.globals_variables import (
- machine_time_step)
-from spynnaker.pyNN.models.neural_projections import ProjectionApplicationEdge
-from spynnaker.pyNN.models.abstract_models import AbstractMaxSpikes
-from spynnaker.pyNN.models.neuron.synapse_io import SynapseIORowBased
-from spynnaker.pyNN.utilities.constants import (
- POPULATION_BASED_REGIONS, POSSION_SIGMA_SUMMATION_LIMIT)
-from spynnaker.pyNN.utilities.utility_calls import (get_n_bits)
-from spynnaker.pyNN.utilities.running_stats import RunningStats
-from .synapse_dynamics import (
- AbstractSynapseDynamics, AbstractSynapseDynamicsStructural)
-from .synaptic_matrices import SYNAPSES_BASE_GENERATOR_SDRAM_USAGE_IN_BYTES
-from .synaptic_matrices import SynapticMatrices
-
-TIME_STAMP_BYTES = BYTES_PER_WORD
-
-# TODO: Make sure these values are correct (particularly CPU cycles)
-_SYNAPSES_BASE_DTCM_USAGE_IN_BYTES = 7 * BYTES_PER_WORD
-
-# 1 for drop late packets.
-_SYNAPSES_BASE_SDRAM_USAGE_IN_BYTES = 1 * BYTES_PER_WORD
-_SYNAPSES_BASE_N_CPU_CYCLES_PER_NEURON = 10
-_SYNAPSES_BASE_N_CPU_CYCLES = 8
-
-
-class SynapticManager(object):
- """ Deals with synapses
- """
- # pylint: disable=too-many-arguments, too-many-locals
- __slots__ = [
- # The number of synapse types
- "__n_synapse_types",
- # The maximum size of the direct or single synaptic matrix
- "__all_single_syn_sz",
- # The number of sigmas to use when calculating the ring buffer upper
- # bound
- "__ring_buffer_sigma",
- # The spikes-per-second to use for an incoming population that doesn't
- # specify this
- "__spikes_per_second",
- # The dynamics used by the synapses e.g. STDP, static etc.
- "__synapse_dynamics",
- # The reader and writer of synapses to and from SpiNNaker
- "__synapse_io",
- # A list of scale factors for the weights for each synapse type
- "__weight_scales",
- # A list of ring buffer shift values corresponding to the weight
- # scales; a left shift by this amount will do the multiplication by
- # the weight scale
- "__ring_buffer_shifts",
- # The actual synaptic matrix handling code, split for simplicity
- "__synaptic_matrices",
- # Determine whether spikes should be dropped if they arrive after the
- # end of a timestep
- "__drop_late_spikes",
- # Overridable (for testing only) region IDs
- "_synapse_params_region",
- "_pop_table_region",
- "_synaptic_matrix_region",
- "_synapse_dynamics_region",
- "_struct_dynamics_region",
- "_connector_builder_region",
- "_direct_matrix_region"]
-
- # TODO make this right
- FUDGE = 0
-
- # 1. address of direct addresses, 2. size of direct addresses matrix size
- STATIC_SYNAPSE_MATRIX_SDRAM_IN_BYTES = 2 * BYTES_PER_WORD
-
- NOT_EXACT_SLICES_ERROR_MESSAGE = (
- "The splitter {} is returning estimated slices during DSG. "
- "This is deemed an error. Please fix and try again")
-
- TOO_MUCH_WRITTEN_SYNAPTIC_DATA = (
- "Too much synaptic memory has been written: {} of {} ")
-
- INDEXS_DONT_MATCH_ERROR_MESSAGE = (
- "Delay index {} and normal index {} do not match")
-
- NO_DELAY_EDGE_FOR_SRC_IDS_MESSAGE = (
- "Found delayed source IDs but no delay machine edge for {}")
-
- def __init__(self, n_synapse_types, ring_buffer_sigma, spikes_per_second,
- drop_late_spikes):
- """
- :param int n_synapse_types:
- number of synapse types on a neuron (e.g., 2 for excitatory and
- inhibitory)
- :param ring_buffer_sigma:
- How many SD above the mean to go for upper bound; a
- good starting choice is 5.0. Given length of simulation we can
- set this for approximate number of saturation events.
- :type ring_buffer_sigma: float or None
- :param spikes_per_second: Estimated spikes per second
- :type spikes_per_second: float or None
- :param bool drop_late_spikes: control flag for dropping late packets.
- """
- self.__n_synapse_types = n_synapse_types
- self.__ring_buffer_sigma = ring_buffer_sigma
- self.__spikes_per_second = spikes_per_second
- self.__drop_late_spikes = drop_late_spikes
- self._synapse_params_region = \
- POPULATION_BASED_REGIONS.SYNAPSE_PARAMS.value
- self._pop_table_region = \
- POPULATION_BASED_REGIONS.POPULATION_TABLE.value
- self._synaptic_matrix_region = \
- POPULATION_BASED_REGIONS.SYNAPTIC_MATRIX.value
- self._synapse_dynamics_region = \
- POPULATION_BASED_REGIONS.SYNAPSE_DYNAMICS.value
- self._struct_dynamics_region = \
- POPULATION_BASED_REGIONS.STRUCTURAL_DYNAMICS.value
- self._connector_builder_region = \
- POPULATION_BASED_REGIONS.CONNECTOR_BUILDER.value
- self._direct_matrix_region = \
- POPULATION_BASED_REGIONS.DIRECT_MATRIX.value
-
- # Create the synapse IO
- self.__synapse_io = SynapseIORowBased()
-
- if self.__ring_buffer_sigma is None:
- self.__ring_buffer_sigma = get_config_float(
- "Simulation", "ring_buffer_sigma")
-
- if self.__spikes_per_second is None:
- self.__spikes_per_second = get_config_float(
- "Simulation", "spikes_per_second")
-
- if self.__drop_late_spikes is None:
- self.__drop_late_spikes = get_config_bool(
- "Simulation", "drop_late_spikes")
-
- # Prepare for dealing with STDP - there can only be one (non-static)
- # synapse dynamics per vertex at present
- self.__synapse_dynamics = None
-
- # Keep the details once computed to allow reading back
- self.__weight_scales = None
- self.__ring_buffer_shifts = None
-
- # Limit the DTCM used by one-to-one connections
- self.__all_single_syn_sz = get_config_int(
- "Simulation", "one_to_one_connection_dtcm_max_bytes")
-
- # Post vertex slice to synaptic matrices
- self.__synaptic_matrices = dict()
-
- def __get_synaptic_matrices(self, post_vertex_slice):
- """ Get the synaptic matrices for a given slice of the vertex
-
- :param ~pacman.model.graphs.common.Slice post_vertex_slice:
- the slice of the vertex to get the matrices for
- :rtype: SynapticMatrices
- """
- # Use the cached version if possible
- if post_vertex_slice in self.__synaptic_matrices:
- return self.__synaptic_matrices[post_vertex_slice]
-
- # Otherwise generate new ones
- matrices = SynapticMatrices(
- post_vertex_slice, self.__n_synapse_types,
- self.__all_single_syn_sz, self.__synapse_io,
- self._synaptic_matrix_region, self._direct_matrix_region,
- self._pop_table_region)
- self.__synaptic_matrices[post_vertex_slice] = matrices
- return matrices
-
- def host_written_matrix_size(self, post_vertex_slice):
- """ The size of the matrix written by the host for a given\
- machine vertex
-
- :param post_vertex_slice: The slice of the vertex to get the size of
- :rtype: int
- """
- matrices = self.__get_synaptic_matrices(post_vertex_slice)
- return matrices.host_generated_block_addr
-
- def on_chip_written_matrix_size(self, post_vertex_slice):
- """ The size of the matrix that will be written on the machine for a\
- given machine vertex
-
- :param post_vertex_slice: The slice of the vertex to get the size of
- :rtype: int
- """
- matrices = self.__get_synaptic_matrices(post_vertex_slice)
- return (matrices.on_chip_generated_block_addr -
- matrices.host_generated_block_addr)
-
- @property
- def synapse_dynamics(self):
- """ The synapse dynamics used by the synapses e.g. plastic or static.\
- Settable.
-
- :rtype: AbstractSynapseDynamics or None
- """
- return self.__synapse_dynamics
-
- @property
- def drop_late_spikes(self):
- return self.__drop_late_spikes
-
- @synapse_dynamics.setter
- def synapse_dynamics(self, synapse_dynamics):
- """ Set the synapse dynamics. Note that after setting, the dynamics\
- might not be the type set as it can be combined with the existing\
- dynamics in exciting ways.
- """
- if self.__synapse_dynamics is None:
- self.__synapse_dynamics = synapse_dynamics
- else:
- self.__synapse_dynamics = self.__synapse_dynamics.merge(
- synapse_dynamics)
-
- @property
- def ring_buffer_sigma(self):
- """ The sigma in the estimation of the maximum summed ring buffer\
- weights. Settable.
-
- :rtype: float
- """
- return self.__ring_buffer_sigma
-
- @ring_buffer_sigma.setter
- def ring_buffer_sigma(self, ring_buffer_sigma):
- self.__ring_buffer_sigma = ring_buffer_sigma
-
- @property
- def spikes_per_second(self):
- """ The assumed maximum spikes per second of an incoming population.\
- Used when calculating the ring buffer weight scaling. Settable.
-
- :rtype: float
- """
- return self.__spikes_per_second
-
- @spikes_per_second.setter
- def spikes_per_second(self, spikes_per_second):
- self.__spikes_per_second = spikes_per_second
-
- @property
- def vertex_executable_suffix(self):
- """ The suffix of the executable name due to the type of synapses \
- in use.
-
- :rtype: str
- """
- if self.__synapse_dynamics is None:
- return ""
- return self.__synapse_dynamics.get_vertex_executable_suffix()
-
- def get_n_cpu_cycles(self):
- """
- :rtype: int
- """
- # TODO: Calculate this correctly
- return self.FUDGE
-
- def get_dtcm_usage_in_bytes(self):
- """
- :rtype: int
- """
- # TODO: Calculate this correctly
- return self.FUDGE
-
- def _get_synapse_params_size(self):
- """
- :rtype: int
- """
- return (_SYNAPSES_BASE_SDRAM_USAGE_IN_BYTES +
- (BYTES_PER_WORD * self.__n_synapse_types))
-
- def _get_synapse_dynamics_parameter_size(
- self, n_atoms, app_graph, app_vertex):
- """ Get the size of the synapse dynamics region
-
- :param int n_atoms: The number of atoms on the core
- :param ~.ApplicationGraph app_graph: The application graph
- :param ~.ApplicationVertex app_vertex: The application vertex
- :rtype: int
- """
- if self.__synapse_dynamics is None:
- return 0
-
- # Does the size of the parameters area depend on presynaptic
- # connections in any way?
- if isinstance(self.__synapse_dynamics,
- AbstractSynapseDynamicsStructural):
- return self.__synapse_dynamics\
- .get_structural_parameters_sdram_usage_in_bytes(
- app_graph, app_vertex, n_atoms)
- else:
- return self.__synapse_dynamics.get_parameters_sdram_usage_in_bytes(
- n_atoms, self.__n_synapse_types)
-
- def get_sdram_usage_in_bytes(
- self, post_vertex_slice, application_graph, app_vertex):
- """ Get the SDRAM usage of a slice of atoms of this vertex
-
- :param ~pacman.model.graphs.common.Slice post_vertex_slice:
- The slice of atoms to get the size of
- :param ~pacman.model.graphs.application.ApplicationGraph \
- application_graph: The application graph
- :param AbstractPopulationVertex app_vertex: The application vertex
- :rtype: int
- """
- in_edges = application_graph.get_edges_ending_at_vertex(app_vertex)
- matrices = self.__get_synaptic_matrices(post_vertex_slice)
- return (
- self._get_synapse_params_size() +
- self._get_synapse_dynamics_parameter_size(
- post_vertex_slice.n_atoms, application_graph, app_vertex) +
- matrices.size(in_edges))
-
- def _reserve_memory_regions(
- self, spec, vertex_slice, all_syn_block_sz, machine_graph,
- machine_vertex):
- """ Reserve memory regions for a core
-
- :param ~.DataSpecificationGenerator spec: The data spec to reserve in
- :param ~pacman.model.graphs.common.Slice vertex_slice:
- The slice of the vertex to allocate for
- :param int all_syn_block_sz: The memory to reserve for synapses
- :param ~pacman.model.graphs.machine.MachineGraph machine_graph:
- The machine graph
- :param ~pacman.model.graphs.machine.MachineVertex machine_vertex:
- The machine vertex
- """
- spec.reserve_memory_region(
- region=self._synapse_params_region,
- size=self._get_synapse_params_size(),
- label='SynapseParams')
-
- if all_syn_block_sz > 0:
- spec.reserve_memory_region(
- region=self._synaptic_matrix_region,
- size=all_syn_block_sz, label='SynBlocks')
-
- # return if not got a synapse dynamics
- if self.__synapse_dynamics is None:
- return
-
- synapse_dynamics_sz = \
- self.__synapse_dynamics.get_parameters_sdram_usage_in_bytes(
- vertex_slice.n_atoms, self.__n_synapse_types)
- if synapse_dynamics_sz != 0:
- spec.reserve_memory_region(
- region=self._synapse_dynamics_region,
- size=synapse_dynamics_sz, label='synapseDynamicsParams')
-
- # if structural, create structural region
- if isinstance(
- self.__synapse_dynamics, AbstractSynapseDynamicsStructural):
-
- synapse_structural_dynamics_sz = (
- self.__synapse_dynamics.
- get_structural_parameters_sdram_usage_in_bytes(
- machine_graph, machine_vertex, vertex_slice.n_atoms))
-
- if synapse_structural_dynamics_sz != 0:
- spec.reserve_memory_region(
- region=self._struct_dynamics_region,
- size=synapse_structural_dynamics_sz,
- label='synapseDynamicsStructuralParams')
-
- @staticmethod
- def _ring_buffer_expected_upper_bound(
- weight_mean, weight_std_dev, spikes_per_second, n_synapses_in,
- sigma):
- """ Provides expected upper bound on accumulated values in a ring\
- buffer element.
-
- Requires an assessment of maximum Poisson input rate.
-
- Assumes knowledge of mean and SD of weight distribution, fan-in
- and timestep.
-
- All arguments should be assumed real values except n_synapses_in
- which will be an integer.
-
- :param float weight_mean: Mean of weight distribution (in either nA or
- microSiemens as required)
- :param float weight_std_dev: SD of weight distribution
- :param float spikes_per_second: Maximum expected Poisson rate in Hz
- :param int n_synapses_in: No of connected synapses
- :param float sigma: How many SD above the mean to go for upper bound;
- a good starting choice is 5.0. Given length of simulation we can
- set this for approximate number of saturation events.
- :rtype: float
- """
- # E[ number of spikes ] in a timestep
- steps_per_second = (MICRO_TO_SECOND_CONVERSION /
- machine_time_step())
-
- average_spikes_per_timestep = (
- float(n_synapses_in * spikes_per_second) / steps_per_second)
-
- # Exact variance contribution from inherent Poisson variation
- poisson_variance = average_spikes_per_timestep * (weight_mean ** 2)
-
- # Upper end of range for Poisson summation required below
- # upper_bound needs to be an integer
- upper_bound = int(round(average_spikes_per_timestep +
- POSSION_SIGMA_SUMMATION_LIMIT *
- math.sqrt(average_spikes_per_timestep)))
-
- # Closed-form exact solution for summation that gives the variance
- # contributed by weight distribution variation when modulated by
- # Poisson PDF. Requires scipy.special for gamma and incomplete gamma
- # functions. Beware: incomplete gamma doesn't work the same as
- # Mathematica because (1) it's regularised and needs a further
- # multiplication and (2) it's actually the complement that is needed
- # i.e. 'gammaincc']
-
- weight_variance = 0.0
-
- if weight_std_dev > 0:
- # pylint: disable=no-member
- lngamma = special.gammaln(1 + upper_bound)
- gammai = special.gammaincc(
- 1 + upper_bound, average_spikes_per_timestep)
-
- big_ratio = (math.log(average_spikes_per_timestep) * upper_bound -
- lngamma)
-
- if -701.0 < big_ratio < 701.0 and big_ratio != 0.0:
- log_weight_variance = (
- -average_spikes_per_timestep +
- math.log(average_spikes_per_timestep) +
- 2.0 * math.log(weight_std_dev) +
- math.log(math.exp(average_spikes_per_timestep) * gammai -
- math.exp(big_ratio)))
- weight_variance = math.exp(log_weight_variance)
-
- # upper bound calculation -> mean + n * SD
- return ((average_spikes_per_timestep * weight_mean) +
- (sigma * math.sqrt(poisson_variance + weight_variance)))
-
- def _get_ring_buffer_to_input_left_shifts(
- self, machine_vertex, machine_graph, weight_scale):
- """ Get the scaling of the ring buffer to provide as much accuracy as\
- possible without too much overflow
-
- :param ~pacman.model.graphs.machine.MachineVertex machine_vertex:
- :param ~pacman.model.graphs.machine.MachineGraph machine_graph:
- :param float weight_scale:
- :rtype: list(int)
- """
- weight_scale_squared = weight_scale * weight_scale
- n_synapse_types = self.__n_synapse_types
- running_totals = [RunningStats() for _ in range(n_synapse_types)]
- delay_running_totals = [RunningStats() for _ in range(n_synapse_types)]
- total_weights = numpy.zeros(n_synapse_types)
- biggest_weight = numpy.zeros(n_synapse_types)
- weights_signed = False
- rate_stats = [RunningStats() for _ in range(n_synapse_types)]
- steps_per_second = (
- MICRO_TO_SECOND_CONVERSION /
- machine_time_step())
-
- synapse_map = dict()
- for machine_edge in machine_graph.get_edges_ending_at_vertex(
- machine_vertex):
- if isinstance(machine_edge.app_edge, ProjectionApplicationEdge):
- for synapse_info in machine_edge.app_edge.synapse_information:
- # Per synapse info we need any one of the edges
- synapse_map[synapse_info] = machine_edge
-
- for synapse_info in synapse_map:
- synapse_type = synapse_info.synapse_type
- synapse_dynamics = synapse_info.synapse_dynamics
- connector = synapse_info.connector
-
- weight_mean = (
- synapse_dynamics.get_weight_mean(
- connector, synapse_info) * weight_scale)
- n_connections = \
- connector.get_n_connections_to_post_vertex_maximum(
- synapse_info)
- weight_variance = synapse_dynamics.get_weight_variance(
- connector, synapse_info.weights,
- synapse_info) * weight_scale_squared
- running_totals[synapse_type].add_items(
- weight_mean, weight_variance, n_connections)
-
- delay_variance = synapse_dynamics.get_delay_variance(
- connector, synapse_info.delays, synapse_info)
- delay_running_totals[synapse_type].add_items(
- 0.0, delay_variance, n_connections)
-
- weight_max = (synapse_dynamics.get_weight_maximum(
- connector, synapse_info) * weight_scale)
- biggest_weight[synapse_type] = max(
- biggest_weight[synapse_type], weight_max)
-
- spikes_per_tick = max(
- 1.0, self.__spikes_per_second / steps_per_second)
- spikes_per_second = self.__spikes_per_second
- pre_vertex = synapse_map[synapse_info].pre_vertex
- if isinstance(pre_vertex, AbstractMaxSpikes):
- rate = pre_vertex.max_spikes_per_second()
- if rate != 0:
- spikes_per_second = rate
- spikes_per_tick = \
- pre_vertex.max_spikes_per_ts()
- rate_stats[synapse_type].add_items(
- spikes_per_second, 0, n_connections)
- total_weights[synapse_type] += spikes_per_tick * (
- weight_max * n_connections)
-
- if synapse_dynamics.are_weights_signed():
- weights_signed = True
-
- max_weights = numpy.zeros(n_synapse_types)
- for synapse_type in range(n_synapse_types):
- if delay_running_totals[synapse_type].variance == 0.0:
- max_weights[synapse_type] = max(total_weights[synapse_type],
- biggest_weight[synapse_type])
- else:
- stats = running_totals[synapse_type]
- rates = rate_stats[synapse_type]
- max_weights[synapse_type] = min(
- self._ring_buffer_expected_upper_bound(
- stats.mean, stats.standard_deviation, rates.mean,
- stats.n_items, self.__ring_buffer_sigma),
- total_weights[synapse_type])
- max_weights[synapse_type] = max(
- max_weights[synapse_type], biggest_weight[synapse_type])
-
- # Convert these to powers; we could use int.bit_length() for this if
- # they were integers, but they aren't...
- max_weight_powers = (
- 0 if w <= 0 else int(math.ceil(max(0, math.log(w, 2))))
- for w in max_weights)
-
- # If 2^max_weight_power equals the max weight, we have to add another
- # power, as range is 0 - (just under 2^max_weight_power)!
- max_weight_powers = (
- w + 1 if (2 ** w) <= a else w
- for w, a in zip(max_weight_powers, max_weights))
-
- # If we have synapse dynamics that uses signed weights,
- # Add another bit of shift to prevent overflows
- if weights_signed:
- max_weight_powers = (m + 1 for m in max_weight_powers)
-
- return list(max_weight_powers)
-
- @staticmethod
- def __get_weight_scale(ring_buffer_to_input_left_shift):
- """ Return the amount to scale the weights by to convert them from \
- floating point values to 16-bit fixed point numbers which can be \
- shifted left by ring_buffer_to_input_left_shift to produce an\
- s1615 fixed point number
-
- :param int ring_buffer_to_input_left_shift:
- :rtype: float
- """
- return float(math.pow(2, 16 - (ring_buffer_to_input_left_shift + 1)))
-
- def __update_ring_buffer_shifts_and_weight_scales(
- self, machine_vertex, machine_graph, weight_scale):
- """ Update the ring buffer shifts and weight scales for this vertex
-
- :param ~pacman.model.graphs.machine.MachineVertex machine_vertex:
- :param ~pacman.model.graphs.machine.MachineGraph machine_graph:
- :param float weight_scale:
- """
- if self.__ring_buffer_shifts is None:
- self.__ring_buffer_shifts = \
- self._get_ring_buffer_to_input_left_shifts(
- machine_vertex, machine_graph, weight_scale)
- self.__weight_scales = numpy.array([
- self.__get_weight_scale(r) * weight_scale
- for r in self.__ring_buffer_shifts])
-
- def write_data_spec(
- self, spec, application_vertex, post_vertex_slice, machine_vertex,
- machine_graph, application_graph, routing_info, weight_scale):
- """
- :param ~data_specification.DataSpecificationGenerator spec:
- The data specification to write to
- :param AbstractPopulationVertex application_vertex:
- The vertex owning the synapses
- :param ~pacman.model.graphs.common.Slice post_vertex_slice:
- The part of the vertex we're dealing with
- :param PopulationMachineVertex machine_vertex: The machine vertex
- :param ~pacman.model.graphs.machine.MachineGraph machine_graph:
- The graph containing the machine vertex
- :param ~pacman.model.graphs.application.ApplicationGraph \
- application_graph:
- The graph containing the application vertex
- :param ~pacman.model.routing_info.RoutingInfo routing_info:
- How messages are routed
- :param float weight_scale: How to scale the weights of the synapses
- """
- # Reserve the memory
- in_edges = application_graph.get_edges_ending_at_vertex(
- application_vertex)
- matrices = self.__get_synaptic_matrices(post_vertex_slice)
- all_syn_block_sz = matrices.synapses_size(in_edges)
- self._reserve_memory_regions(
- spec, post_vertex_slice, all_syn_block_sz, machine_graph,
- machine_vertex)
-
- self.__update_ring_buffer_shifts_and_weight_scales(
- machine_vertex, machine_graph, weight_scale)
- spec.switch_write_focus(self._synapse_params_region)
- # write the bool for deleting packets that were too late for a timer
- spec.write_value(int(self.__drop_late_spikes))
- # Write the ring buffer shifts
- spec.write_array(self.__ring_buffer_shifts)
-
- gen_data = matrices.write_synaptic_matrix_and_master_population_table(
- spec, machine_vertex, all_syn_block_sz, self.__weight_scales,
- routing_info, machine_graph)
-
- if self.__synapse_dynamics is not None:
- self.__synapse_dynamics.write_parameters(
- spec, self._synapse_dynamics_region,
- self.__weight_scales)
-
- if isinstance(self.__synapse_dynamics,
- AbstractSynapseDynamicsStructural):
- self.__synapse_dynamics.write_structural_parameters(
- spec, self._struct_dynamics_region, self.__weight_scales,
- machine_graph, machine_vertex, routing_info, matrices)
-
- self._write_on_machine_data_spec(spec, post_vertex_slice, gen_data)
-
- def _write_on_machine_data_spec(
- self, spec, post_vertex_slice, generator_data):
- """ Write the data spec for the synapse expander
-
- :param ~.DataSpecificationGenerator spec:
- The specification to write to
- :param ~pacman.model.common.Slice post_vertex_slice:
- The slice of the vertex being written
- :param list(GeneratorData) generator_data:
- """
- if not generator_data:
- return
-
- n_bytes = (
- SYNAPSES_BASE_GENERATOR_SDRAM_USAGE_IN_BYTES +
- (self.__n_synapse_types * DataType.U3232.size))
- for data in generator_data:
- n_bytes += data.size
-
- spec.reserve_memory_region(
- region=self._connector_builder_region,
- size=n_bytes, label="ConnectorBuilderRegion")
- spec.switch_write_focus(self._connector_builder_region)
-
- spec.write_value(len(generator_data))
- spec.write_value(post_vertex_slice.lo_atom)
- spec.write_value(post_vertex_slice.n_atoms)
- spec.write_value(self.__n_synapse_types)
- spec.write_value(get_n_bits(self.__n_synapse_types))
- n_neuron_id_bits = get_n_bits(post_vertex_slice.n_atoms)
- spec.write_value(n_neuron_id_bits)
- for w in self.__weight_scales:
- # if the weights are high enough and the population size large
- # enough, then weight_scales < 1 will result in a zero scale
- # if converted to an int, so we use U3232 here instead (as there
- # can be scales larger than U1616.max in conductance-based models)
- dtype = DataType.U3232
- spec.write_value(data=min(w, dtype.max), data_type=dtype)
-
- for data in generator_data:
- spec.write_array(data.gen_data)
-
- def get_connections_from_machine(
- self, transceiver, placements, app_edge, synapse_info):
- """ Read the connections from the machine for a given projection
-
- :param ~spinnman.transciever.Transceiver transceiver:
- Used to read the data from the machine
- :param ~pacman.model.placements.Placements placements:
- Where the vertices are on the machine
- :param ProjectionApplicationEdge app_edge:
- The application edge of the projection
- :param SynapseInformation synapse_info:
- The synapse information of the projection
- :return: The connections from the machine, with dtype
- AbstractSynapseDynamics.NUMPY_CONNECTORS_DTYPE
- :rtype: ~numpy.ndarray
- """
-
- post_vertices = app_edge.post_vertex.machine_vertices
-
- # Start with something in the list so that concatenate works
- connections = [numpy.zeros(
- 0, dtype=AbstractSynapseDynamics.NUMPY_CONNECTORS_DTYPE)]
- progress = ProgressBar(
- len(post_vertices),
- "Getting synaptic data between {} and {}".format(
- app_edge.pre_vertex.label, app_edge.post_vertex.label))
- for post_vertex in progress.over(post_vertices):
- post_slice = post_vertex.vertex_slice
- placement = placements.get_placement_of_vertex(post_vertex)
- matrix = self.__get_synaptic_matrices(post_slice)
- connections.extend(matrix.get_connections_from_machine(
- transceiver, placement, app_edge, synapse_info))
- return numpy.concatenate(connections)
-
- def gen_on_machine(self, post_vertex_slice):
- """ True if the synapses should be generated on the machine
-
- :param ~pacman.model.graphs.common.Slice post_vertex_slice:
- The slice of the vertex to determine the generation status of
- :rtype: bool
- """
- matrices = self.__get_synaptic_matrices(post_vertex_slice)
- return matrices.gen_on_machine
-
- def reset_ring_buffer_shifts(self):
- """ Reset the ring buffer shifts; needed if projection data changes
- between runs
- """
- self.__ring_buffer_shifts = None
- self.__weight_scales = None
-
- def clear_connection_cache(self):
- """ Flush the cache of connection information; needed for a second run
- """
- for matrices in self.__synaptic_matrices.values():
- matrices.clear_connection_cache()
-
- @property
- def changes_during_run(self):
- """ Whether the synapses being managed change during running.
-
- :rtype: bool
- """
- if self.__synapse_dynamics is None:
- return False
- return self.__synapse_dynamics.changes_during_run
-
- def read_generated_connection_holders(self, transceiver, placement):
- """ Fill in any pre-run connection holders for data which is generated
- on the machine, after it has been generated
-
- :param ~spinnman.transceiver.Transceiver transceiver:
- How to read the data from the machine
- :param ~pacman.model.placements.Placement placement:
- where the data is to be read from
- """
- matrices = self.__get_synaptic_matrices(placement.vertex.vertex_slice)
- matrices.read_generated_connection_holders(transceiver, placement)
-
- def clear_all_caches(self):
- """ Clears all cached data in the case that a reset requires remapping
- which might change things
- """
- # Clear the local caches
- self.clear_connection_cache()
- self.reset_ring_buffer_shifts()
-
- # We can simply reset this dict to reset everything downstream
- self.__synaptic_matrices = dict()
diff --git a/spynnaker/pyNN/models/neuron/synaptic_matrices.py b/spynnaker/pyNN/models/neuron/synaptic_matrices.py
index 91b5f32aef..9bda2c7512 100644
--- a/spynnaker/pyNN/models/neuron/synaptic_matrices.py
+++ b/spynnaker/pyNN/models/neuron/synaptic_matrices.py
@@ -19,22 +19,23 @@
from spinn_utilities.ordered_set import OrderedSet
from pacman.model.routing_info import BaseKeyAndMask
+from data_specification.enums.data_type import DataType
+
from spinn_front_end_common.utilities.constants import BYTES_PER_WORD
-from spynnaker.pyNN.models.neural_projections import (
- ProjectionApplicationEdge, DelayedApplicationEdge)
from spynnaker.pyNN.models.neuron.master_pop_table import (
MasterPopTableAsBinarySearch)
+from spynnaker.pyNN.utilities.utility_calls import get_n_bits
from .key_space_tracker import KeySpaceTracker
from .synaptic_matrix_app import SynapticMatrixApp
-
+# 1 for synaptic matrix region
# 1 for n_edges
# 2 for post_vertex_slice.lo_atom, post_vertex_slice.n_atoms
# 1 for n_synapse_types
# 1 for n_synapse_type_bits
# 1 for n_synapse_index_bits
SYNAPSES_BASE_GENERATOR_SDRAM_USAGE_IN_BYTES = (
- 1 + 2 + 1 + 1 + 1) * BYTES_PER_WORD
+ 1 + 1 + 2 + 1 + 1 + 1) * BYTES_PER_WORD
DIRECT_MATRIX_HEADER_COST_BYTES = 1 * BYTES_PER_WORD
@@ -50,14 +51,14 @@ class SynapticMatrices(object):
"__n_synapse_types",
# The maximum summed size of the "direct" or "single" matrices
"__all_single_syn_sz",
- # The synapse reader and writer to convert between SpiNNaker and host
- "__synapse_io",
# The ID of the synaptic matrix region
"__synaptic_matrix_region",
# The ID of the "direct" or "single" matrix region
"__direct_matrix_region",
# The ID of the master population table region
"__poptable_region",
+ # The ID of the connection builder region
+ "__connection_builder_region",
# The master population table data structure
"__poptable",
# The sub-matrices for each incoming edge
@@ -69,34 +70,65 @@ class SynapticMatrices(object):
# generated matrix will be written
"__on_chip_generated_block_addr",
# Determine if any of the matrices can be generated on the machine
- "__gen_on_machine"
+ "__gen_on_machine",
+ # Reference to give the synaptic matrix
+ "__synaptic_matrix_ref",
+ # Reference to give the direct matrix
+ "__direct_matrix_ref",
+ # Reference to give the master population table
+ "__poptable_ref",
+ # Reference to give the connection builder
+ "__connection_builder_ref"
]
def __init__(
self, post_vertex_slice, n_synapse_types, all_single_syn_sz,
- synapse_io, synaptic_matrix_region, direct_matrix_region,
- poptable_region):
+ synaptic_matrix_region, direct_matrix_region, poptable_region,
+ connection_builder_region, synaptic_matrix_ref=None,
+ direct_matrix_ref=None, poptable_ref=None,
+ connection_builder_ref=None):
"""
:param ~pacman.model.graphs.common.Slice post_vertex_slice:
The slice of the post vertex that these matrices are for
:param int n_synapse_types: The number of synapse types available
:param int all_single_syn_sz:
The space available for "direct" or "single" synapses
- :param SynapseIORowBased synapse_io: How to read and write synapses
:param int synaptic_matrix_region:
The region where synaptic matrices are stored
:param int direct_matrix_region:
The region where "direct" or "single" synapses are stored
:param int poptable_region:
The region where the population table is stored
+ :param int connection_builder_region:
+ The region where the synapse generator information is stored
+ :param synaptic_matrix_ref:
+ The reference to the synaptic matrix region, or None if not
+ referenceable
+ :type synaptic_matrix_ref: int or None
+ :param direct_matrix_ref:
+ The reference to the direct matrix region, or None if not
+ referenceable
+ :type direct_matrix_ref: int or None
+ :param poptable_ref:
+ The reference to the pop table region, or None if not
+ referenceable
+ :type poptable_ref: int or None
+ :param connection_builder_ref:
+ The reference to the connection builder region, or None if not
+ referenceable
+ :type connection_builder_ref: int or None
"""
self.__post_vertex_slice = post_vertex_slice
self.__n_synapse_types = n_synapse_types
self.__all_single_syn_sz = all_single_syn_sz
- self.__synapse_io = synapse_io
self.__synaptic_matrix_region = synaptic_matrix_region
self.__direct_matrix_region = direct_matrix_region
self.__poptable_region = poptable_region
+ self.__connection_builder_region = connection_builder_region
+ self.__synaptic_matrix_ref = synaptic_matrix_ref
+ self.__direct_matrix_ref = direct_matrix_ref
+ self.__poptable_ref = poptable_ref
+ self.__connection_builder_ref = connection_builder_ref
# Set up the master population table
self.__poptable = MasterPopTableAsBinarySearch()
@@ -114,16 +146,24 @@ def __init__(
@property
def host_generated_block_addr(self):
""" The address within the synaptic region after the last block
- written by the on-host synaptic generation
+ written by the on-host synaptic generation i.e. the start of
+ the space that can be overwritten provided the synapse expander
+ is run again
+
+ :rtype: int
"""
return self.__host_generated_block_addr
@property
- def on_chip_generated_block_addr(self):
- """ The address within the synaptic region after the last block
- reserved for the on-machine synaptic generation
+ def on_chip_generated_matrix_size(self):
+ """ The size of the space used by the generated matrix i.e. the
+ space that can be overwritten provided the synapse expander
+ is run again
+
+ :rtype: int
"""
- return self.__on_chip_generated_block_addr
+ return (self.__on_chip_generated_block_addr -
+ self.__host_generated_block_addr)
def __app_matrix(self, app_edge, synapse_info):
""" Get or create an application synaptic matrix object
@@ -139,81 +179,22 @@ def __app_matrix(self, app_edge, synapse_info):
return self.__matrices[key]
matrix = SynapticMatrixApp(
- self.__synapse_io, self.__poptable, synapse_info, app_edge,
+ self.__poptable, synapse_info, app_edge,
self.__n_synapse_types, self.__all_single_syn_sz,
self.__post_vertex_slice, self.__synaptic_matrix_region,
self.__direct_matrix_region)
self.__matrices[key] = matrix
return matrix
- def synapses_size(self, app_edges):
- """ The size of the synaptic blocks in bytes
-
- :param iterable(~pacman.model.graphs.application.ApplicationEdge) \
- app_edges:
- The incoming application edges
- :rtype: int
- """
- # Base size requirements
- # 1 word for address of direct addresses, and
- # 1 word for the size of the direct addresses matrix in bytes
- memory_size = 2 * BYTES_PER_WORD
- for in_edge in app_edges:
- if isinstance(in_edge, ProjectionApplicationEdge):
- for synapse_info in in_edge.synapse_information:
- matrix = self.__app_matrix(in_edge, synapse_info)
- memory_size = matrix.add_matrix_size(memory_size)
- memory_size = matrix.add_delayed_matrix_size(memory_size)
- return memory_size
-
- def size(self, app_edges):
- """ The size required by all parts of the matrices
-
- :param iterable(~pacman.model.graphs.application.ApplicationEdge) \
- app_edges:
- The incoming application edges
- :rtype: int
- """
- return (
- self.synapses_size(app_edges) +
- self.__gen_info_size(app_edges) + DIRECT_MATRIX_HEADER_COST_BYTES +
- self.__poptable.get_master_population_table_size(app_edges))
-
- def __gen_info_size(self, app_edges):
- """ The size in bytes of the synaptic expander parameters
-
- :param iterable(~pacman.model.graphs.application.ApplicationEdge) \
- app_edges:
- The incoming application edges
- :rtype: int
- """
- gen_on_machine = False
- size = 0
- for app_edge in app_edges:
- if not isinstance(app_edge, ProjectionApplicationEdge):
- continue
- for synapse_info in app_edge.synapse_information:
- matrix = self.__app_matrix(app_edge, synapse_info)
- m_size = matrix.generator_info_size
- if m_size > 0:
- gen_on_machine = True
- size += m_size
-
- if gen_on_machine:
- size += SYNAPSES_BASE_GENERATOR_SDRAM_USAGE_IN_BYTES
- size += self.__n_synapse_types * BYTES_PER_WORD
- return size
-
- def write_synaptic_matrix_and_master_population_table(
- self, spec, machine_vertex, all_syn_block_sz, weight_scales,
- routing_info, machine_graph):
- """ Simultaneously generates both the master population table and
- the synaptic matrix.
+ def write_synaptic_data(
+ self, spec, incoming_projections, all_syn_block_sz, weight_scales,
+ routing_info):
+ """ Write the synaptic data for all incoming projections
:param ~data_specification.DataSpecificationGenerator spec:
The spec to write to
- :param ~pacman.model.graphs.machine.MachineVertex machine_vertex:
- The machine vertex to write for
+ :param list(~spynnaker8.models.Projection) incoming_projection:
+ The projections to generate data for
:param int all_syn_block_sz:
The size in bytes of the space reserved for synapses
:param list(float) weight_scales: The weight scale of each synapse
@@ -221,21 +202,26 @@ def write_synaptic_matrix_and_master_population_table(
The routing information for all edges
:param ~pacman.model.graphs.machine.MachineGraph machine_graph:
The machine graph
- :return: A list of generator data to be written elsewhere
- :rtype: list(GeneratorData)
"""
+ # If there are no synapses, there is nothing to do!
+ if all_syn_block_sz == 0:
+ return
+
+ # Reserve the region
spec.comment(
"\nWriting Synaptic Matrix and Master Population Table:\n")
+ spec.reserve_memory_region(
+ region=self.__synaptic_matrix_region,
+ size=all_syn_block_sz, label='SynBlocks',
+ reference=self.__synaptic_matrix_ref)
# Track writes inside the synaptic matrix region:
block_addr = 0
self.__poptable.initialise_table()
- # Get the application projection edges incoming to this machine vertex
- in_machine_edges = machine_graph.get_edges_ending_at_vertex(
- machine_vertex)
+ # Convert the data for convenience
in_edges_by_app_edge, key_space_tracker = self.__in_edges_by_app_edge(
- in_machine_edges, routing_info)
+ incoming_projections, routing_info)
# Set up for single synapses
# The list is seeded with an empty array so we can just concatenate
@@ -286,7 +272,7 @@ def write_synaptic_matrix_and_master_population_table(
# Finish the master population table
self.__poptable.finish_master_pop_table(
- spec, self.__poptable_region)
+ spec, self.__poptable_region, self.__poptable_ref)
# Write the size and data of single synapses to the direct region
single_data = numpy.concatenate(single_synapses)
@@ -296,39 +282,111 @@ def write_synaptic_matrix_and_master_population_table(
size=(
single_data_words * BYTES_PER_WORD +
DIRECT_MATRIX_HEADER_COST_BYTES),
- label='DirectMatrix')
+ label='DirectMatrix',
+ reference=self.__direct_matrix_ref)
spec.switch_write_focus(self.__direct_matrix_region)
spec.write_value(single_data_words * BYTES_PER_WORD)
if single_data_words:
spec.write_array(single_data)
- return generator_data
+ self.__write_synapse_expander_data_spec(
+ spec, generator_data, weight_scales)
+
+ def __write_synapse_expander_data_spec(
+ self, spec, generator_data, weight_scales):
+ """ Write the data spec for the synapse expander
- def __in_edges_by_app_edge(self, in_machine_edges, routing_info):
- """ Convert a list of machine edges to a dict of
+ :param ~.DataSpecificationGenerator spec:
+ The specification to write to
+ :param list(GeneratorData) generator_data: The data to be written
+ :param weight_scales: scaling of weights on each synapse
+ :type weight_scales: list(int or float)
+ """
+ if not generator_data:
+ if self.__connection_builder_ref is not None:
+ # If there is a reference, we still need a region to create
+ spec.reserve_memory_region(
+ region=self.__connection_builder_region,
+ size=4, label="ConnectorBuilderRegion",
+ reference=self.__connection_builder_ref)
+ return
+
+ n_bytes = (
+ SYNAPSES_BASE_GENERATOR_SDRAM_USAGE_IN_BYTES +
+ (self.__n_synapse_types * DataType.U3232.size))
+ for data in generator_data:
+ n_bytes += data.size
+
+ spec.reserve_memory_region(
+ region=self.__connection_builder_region,
+ size=n_bytes, label="ConnectorBuilderRegion",
+ reference=self.__connection_builder_ref)
+ spec.switch_write_focus(self.__connection_builder_region)
+
+ spec.write_value(self.__synaptic_matrix_region)
+ spec.write_value(len(generator_data))
+ spec.write_value(self.__post_vertex_slice.lo_atom)
+ spec.write_value(self.__post_vertex_slice.n_atoms)
+ spec.write_value(self.__n_synapse_types)
+ spec.write_value(get_n_bits(self.__n_synapse_types))
+ n_neuron_id_bits = get_n_bits(self.__post_vertex_slice.n_atoms)
+ spec.write_value(n_neuron_id_bits)
+ for w in weight_scales:
+ # if the weights are high enough and the population size large
+ # enough, then weight_scales < 1 will result in a zero scale
+ # if converted to an int, so we use U3232 here instead (as there
+ # can be scales larger than U1616.max in conductance-based models)
+ dtype = DataType.U3232
+ spec.write_value(data=min(w, dtype.max), data_type=dtype)
+
+ items = list()
+ for data in generator_data:
+ items.extend(data.gen_data)
+ spec.write_array(numpy.concatenate(items))
+
+ def __in_edges_by_app_edge(self, incoming_projections, routing_info):
+ """ Convert a list of incoming projections to a dict of
application edge -> list of machine edges, and a key tracker
- :param list(~pacman.model.graphs.machine.MachineEdge) in_machine_edges:
- The incoming machine edges
+ :param list(~spynnaker.pyNN.models.Projection) incoming_projections:
+ The incoming projections
:param RoutingInfo routing_info: Routing information for all edges
:rtype: tuple(dict, KeySpaceTracker)
"""
in_edges_by_app_edge = defaultdict(OrderedSet)
key_space_tracker = KeySpaceTracker()
- for machine_edge in in_machine_edges:
- rinfo = routing_info.get_routing_info_for_edge(machine_edge)
- key_space_tracker.allocate_keys(rinfo)
- app_edge = machine_edge.app_edge
- if isinstance(app_edge, ProjectionApplicationEdge):
- in_edges_by_app_edge[app_edge].add(machine_edge)
- elif isinstance(app_edge, DelayedApplicationEdge):
- # We need to make sure that if an undelayed edge is filtered
- # but a delayed one is not, we still pick it up
- undelayed_machine_edge = (
- machine_edge.app_edge.undelayed_edge.get_machine_edge(
- machine_edge.pre_vertex, machine_edge.post_vertex))
- in_edges_by_app_edge[app_edge.undelayed_edge].add(
- undelayed_machine_edge)
+ for proj in incoming_projections:
+ app_edge = proj._projection_edge
+
+ # Skip if already done
+ if app_edge in in_edges_by_app_edge:
+ continue
+
+ # Add all incoming machine edges for this slice
+ for machine_edge in app_edge.machine_edges:
+ if (machine_edge.post_vertex.vertex_slice ==
+ self.__post_vertex_slice):
+ rinfo = routing_info.get_routing_info_for_edge(
+ machine_edge)
+ key_space_tracker.allocate_keys(rinfo)
+ in_edges_by_app_edge[app_edge].add(machine_edge)
+
+ # Also go through the delay edges in case an undelayed edge
+ # was filtered
+ delay_edge = app_edge.delay_edge
+ if delay_edge is not None:
+ for machine_edge in delay_edge.machine_edges:
+ if (machine_edge.post_vertex.vertex_slice ==
+ self.__post_vertex_slice):
+ rinfo = routing_info.get_routing_info_for_edge(
+ machine_edge)
+ key_space_tracker.allocate_keys(rinfo)
+ undelayed_machine_edge = (
+ app_edge.get_machine_edge(
+ machine_edge.pre_vertex,
+ machine_edge.post_vertex))
+ in_edges_by_app_edge[app_edge].add(
+ undelayed_machine_edge)
return in_edges_by_app_edge, key_space_tracker
@staticmethod
diff --git a/spynnaker/pyNN/models/neuron/synaptic_matrix.py b/spynnaker/pyNN/models/neuron/synaptic_matrix.py
index d27c557305..e591af9e5a 100644
--- a/spynnaker/pyNN/models/neuron/synaptic_matrix.py
+++ b/spynnaker/pyNN/models/neuron/synaptic_matrix.py
@@ -16,12 +16,11 @@
import numpy
from spinn_front_end_common.utilities.constants import BYTES_PER_WORD
-
-from spynnaker.pyNN.models.neuron.synapse_dynamics import SynapseDynamicsStatic
-from spynnaker.pyNN.models.neural_projections.connectors import (
- OneToOneConnector)
+from spynnaker.pyNN.models.neuron.synapse_dynamics import (
+ AbstractSynapseDynamicsStructural)
from .generator_data import GeneratorData, SYN_REGION_UNUSED
+from .synapse_io import get_synapses, convert_to_connections
class SynapticMatrix(object):
@@ -29,8 +28,6 @@ class SynapticMatrix(object):
"""
__slots__ = [
- # The reader and writer of synaptic matrices
- "__synapse_io",
# The master population table
"__poptable",
# The synapse info used to generate the matrices
@@ -51,6 +48,8 @@ class SynapticMatrix(object):
"__weight_scales",
# The maximum summed size of the synaptic matrices
"__all_syn_block_sz",
+ # True if the matrix could be direct with enough space
+ "__is_direct_capable",
# The maximum summed size of the "direct" or "single" matrices
"__all_single_syn_sz",
# The expected size of a synaptic matrix
@@ -75,12 +74,11 @@ class SynapticMatrix(object):
"__delay_received_block"
]
- def __init__(self, synapse_io, poptable, synapse_info, machine_edge,
+ def __init__(self, poptable, synapse_info, machine_edge,
app_edge, n_synapse_types, max_row_info, routing_info,
delay_routing_info, weight_scales, all_syn_block_sz,
- all_single_syn_sz):
+ all_single_syn_sz, is_direct_capable):
"""
- :param SynapseIORowBased synapse_io: The reader and writer of synapses
:param MasterPopTableAsBinarySearch poptable:
The master population table
:param SynapseInformation synapse_info:
@@ -101,8 +99,9 @@ def __init__(self, synapse_io, poptable, synapse_info, machine_edge,
The space available for all synaptic matrices
:param int all_single_syn_sz:
The space available for "direct" or "single" synapses
+ :param bool is_direct_capable:
+ True if this matrix can be direct if there is space
"""
- self.__synapse_io = synapse_io
self.__poptable = poptable
self.__synapse_info = synapse_info
self.__machine_edge = machine_edge
@@ -114,6 +113,7 @@ def __init__(self, synapse_io, poptable, synapse_info, machine_edge,
self.__weight_scales = weight_scales
self.__all_syn_block_sz = all_syn_block_sz
self.__all_single_syn_sz = all_single_syn_sz
+ self.__is_direct_capable = is_direct_capable
# The matrix size can be calculated up-front; use for checking later
self.__matrix_size = (
@@ -135,37 +135,17 @@ def __init__(self, synapse_io, poptable, synapse_info, machine_edge,
self.__received_block = None
self.__delay_received_block = None
- @property
- def is_delayed(self):
- """ Is there a delay matrix?
-
- :rtype: bool
- """
- return self.__app_edge.n_delay_stages > 0
-
- def is_direct(self, single_addr):
+ def __is_direct(self, single_addr):
""" Determine if the given connection can be done with a "direct"\
synaptic matrix - this must have an exactly 1 entry per row
:param int single_addr: The current offset of the direct matrix
- :return: A tuple of a boolean indicating if the matrix is direct and
- the next offset of the single matrix
- :rtype: (bool, int)
+ :rtype: bool
"""
- pre_vertex_slice = self.__machine_edge.pre_vertex.vertex_slice
- post_vertex_slice = self.__machine_edge.post_vertex.vertex_slice
+ if not self.__is_direct_capable:
+ return False
next_addr = single_addr + self.__single_matrix_size
- is_direct = (
- next_addr <= self.__all_single_syn_sz and
- not self.is_delayed and
- isinstance(self.__synapse_info.connector, OneToOneConnector) and
- isinstance(self.__synapse_info.synapse_dynamics,
- SynapseDynamicsStatic) and
- (pre_vertex_slice.lo_atom == post_vertex_slice.lo_atom) and
- (pre_vertex_slice.hi_atom == post_vertex_slice.hi_atom) and
- not self.__synapse_info.prepop_is_view and
- not self.__synapse_info.postpop_is_view)
- return is_direct, next_addr
+ return next_addr <= self.__all_single_syn_sz
def get_row_data(self):
""" Generate the row data for a synaptic matrix from the description
@@ -174,17 +154,34 @@ def get_row_data(self):
:rtype: tuple(~numpy.ndarray or None, ~numpy.ndarray or None)
"""
+ # Get the actual connections
+ pre_slices =\
+ self.__app_edge.pre_vertex.splitter.get_out_going_slices()[0]
+ post_slices =\
+ self.__app_edge.post_vertex.splitter.get_in_coming_slices()[0]
+ pre_vertex_slice = self.__machine_edge.pre_vertex.vertex_slice
+ post_vertex_slice = self.__machine_edge.post_vertex.vertex_slice
+ connections = self.__synapse_info.connector.create_synaptic_block(
+ pre_slices, post_slices, pre_vertex_slice, post_vertex_slice,
+ self.__synapse_info.synapse_type, self.__synapse_info)
+
# Get the row data; note that we use the availability of the routing
# keys to decide if we should actually generate any data; this is
# because a single edge might have been filtered
(row_data, delayed_row_data, delayed_source_ids,
- delay_stages) = self.__synapse_io.get_synapses(
- self.__synapse_info, self.__app_edge.n_delay_stages,
- self.__n_synapse_types, self.__weight_scales,
- self.__machine_edge, self.__max_row_info,
+ delay_stages) = get_synapses(
+ connections, self.__synapse_info, self.__app_edge.n_delay_stages,
+ self.__n_synapse_types, self.__weight_scales, self.__app_edge,
+ pre_vertex_slice, post_vertex_slice, self.__max_row_info,
self.__routing_info is not None,
- self.__delay_routing_info is not None,
- self.__app_edge)
+ self.__delay_routing_info is not None)
+
+ # Set connections for structural plasticity
+ if isinstance(self.__synapse_info.synapse_dynamics,
+ AbstractSynapseDynamicsStructural):
+ self.__synapse_info.synapse_dynamics.set_connections(
+ connections, post_vertex_slice, self.__app_edge,
+ self.__synapse_info, self.__machine_edge)
if self.__app_edge.delay_edge is not None:
pre_vertex_slice = self.__machine_edge.pre_vertex.vertex_slice
@@ -219,7 +216,7 @@ def write_machine_matrix(
# If we have routing info but no synapses, write an invalid entry
if self.__max_row_info.undelayed_max_n_synapses == 0:
- self.__index = self.__poptable.add_invalid_entry(
+ self.__index = self.__poptable.add_invalid_machine_entry(
self.__routing_info.first_key_and_mask)
return block_addr, single_addr
@@ -228,8 +225,7 @@ def write_machine_matrix(
raise Exception("Data is incorrect size: {} instead of {}".format(
size, self.__matrix_size))
- is_direct, _ = self.is_direct(single_addr)
- if is_direct:
+ if self.__is_direct(single_addr):
single_addr = self.__write_single_machine_matrix(
single_synapses, single_addr, row_data)
return block_addr, single_addr
@@ -260,7 +256,7 @@ def write_delayed_machine_matrix(self, spec, block_addr, row_data):
# If we have routing info but no synapses, write an invalid entry
if self.__max_row_info.delayed_max_n_synapses == 0:
- self.__delay_index = self.__poptable.add_invalid_entry(
+ self.__delay_index = self.__poptable.add_invalid_machine_entry(
self.__delay_routing_info.first_key_and_mask)
return block_addr
@@ -303,50 +299,8 @@ def __write_single_machine_matrix(
single_addr = single_addr + self.__single_matrix_size
return single_addr
- def next_app_on_chip_address(self, app_block_addr, max_app_addr):
- """ Allocate a machine-level address of a matrix from within an\
- app-level allocation
-
- :param int app_block_addr:
- The current position in the application block
- :param int max_app_addr:
- The position of the end of the allocation
- :return: The address after the allocation and the allocated address
- :rtype: int, int
- """
- if self.__max_row_info.undelayed_max_n_synapses == 0:
- return app_block_addr, SYN_REGION_UNUSED
-
- # Note: No master population table padding is needed here because
- # the allocation is at the application level
- addr = app_block_addr
- app_block_addr = self.__next_addr(
- app_block_addr, self.__matrix_size, max_app_addr)
- return app_block_addr, addr
-
- def next_app_delay_on_chip_address(self, app_block_addr, max_app_addr):
- """ Allocate a machine-level address of a delayed matrix from within\
- an app-level allocation
-
- :param int app_block_addr:
- The current position in the application block
- :param int max_app_addr:
- The position of the end of the allocation
- :return: The address after the allocation and the allocated address
- :rtype: int, int
- """
- if self.__max_row_info.delayed_max_n_synapses == 0:
- return app_block_addr, SYN_REGION_UNUSED
-
- # Note: No master population table padding is needed here because
- # the allocation is at the application level
- addr = app_block_addr
- app_block_addr = self.__next_addr(
- app_block_addr, self.__delay_matrix_size, max_app_addr)
- return app_block_addr, addr
-
def next_on_chip_address(self, block_addr):
- """ Allocate an address for a machine matrix and add it to the\
+ """ Allocate an address for a machine matrix and add it to the
population table
:param int block_addr:
@@ -360,7 +314,7 @@ def next_on_chip_address(self, block_addr):
# If we have routing info but no synapses, add an invalid entry
if self.__max_row_info.undelayed_max_n_synapses == 0:
- self.__index = self.__poptable.add_invalid_entry(
+ self.__index = self.__poptable.add_invalid_machine_entry(
self.__routing_info.first_key_and_mask)
return block_addr, SYN_REGION_UNUSED
@@ -375,8 +329,8 @@ def next_on_chip_address(self, block_addr):
return block_addr, self.__syn_mat_offset
def next_delay_on_chip_address(self, block_addr):
- """ Allocate an address for a delayed machine matrix and add it to \
- the population table
+ """ Allocate an address for a delayed machine matrix and add it to the
+ population table
:param int block_addr:
The address at which to start the allocation
@@ -389,7 +343,7 @@ def next_delay_on_chip_address(self, block_addr):
# If we have routing info but no synapses, add an invalid entry
if self.__max_row_info.delayed_max_n_synapses == 0:
- self.__delay_index = self.__poptable.add_invalid_entry(
+ self.__delay_index = self.__poptable.add_invalid_machine_entry(
self.__delay_routing_info.first_key_and_mask)
return block_addr, SYN_REGION_UNUSED
@@ -403,36 +357,31 @@ def next_delay_on_chip_address(self, block_addr):
block_addr = self.__next_addr(block_addr, self.__delay_matrix_size)
return block_addr, self.__delay_syn_mat_offset
- def get_generator_data(
- self, syn_mat_offset, d_mat_offset, max_delay_per_stage):
+ def get_generator_data(self, syn_mat_offset, d_mat_offset):
""" Get the generator data for this matrix
:param int syn_mat_offset:
The synaptic matrix offset to write the data to
:param int d_mat_offset:
The synaptic matrix offset to write the delayed data to
- :param int max_delay_per_stage: around of timer ticks each delay stage
- holds.
:rtype: GeneratorData
"""
- self.__write_on_chip_delay_data(max_delay_per_stage)
+ self.__write_on_chip_delay_data()
return GeneratorData(
syn_mat_offset, d_mat_offset,
self.__max_row_info.undelayed_max_words,
self.__max_row_info.delayed_max_words,
self.__max_row_info.undelayed_max_n_synapses,
self.__max_row_info.delayed_max_n_synapses,
- self.__app_edge.pre_vertex.vertex_slices,
- self.__app_edge.post_vertex.vertex_slices,
+ self.__app_edge.pre_vertex.splitter.get_out_going_slices()[0],
+ self.__app_edge.post_vertex.splitter.get_in_coming_slices()[0],
self.__machine_edge.pre_vertex.vertex_slice,
self.__machine_edge.post_vertex.vertex_slice,
self.__synapse_info, self.__app_edge.n_delay_stages + 1,
- max_delay_per_stage)
+ self.__app_edge.post_vertex.splitter.max_support_delay())
- def __write_on_chip_delay_data(self, max_delay_per_stage):
+ def __write_on_chip_delay_data(self):
""" Write data for delayed on-chip generation
-
- :param max_delay_per_stage: max delay supported by psot vertex
"""
# If delay edge exists, tell this about the data too, so it can
# generate its own data
@@ -441,19 +390,19 @@ def __write_on_chip_delay_data(self, max_delay_per_stage):
self.__app_edge.delay_edge.pre_vertex.add_generator_data(
self.__max_row_info.undelayed_max_n_synapses,
self.__max_row_info.delayed_max_n_synapses,
- self.__app_edge.pre_vertex.vertex_slices,
- self.__app_edge.post_vertex.vertex_slices,
+ self.__app_edge.pre_vertex.splitter.get_out_going_slices()[0],
+ self.__app_edge.post_vertex.splitter.get_in_coming_slices()[0],
self.__machine_edge.pre_vertex.vertex_slice,
self.__machine_edge.post_vertex.vertex_slice,
self.__synapse_info, self.__app_edge.n_delay_stages + 1,
- max_delay_per_stage)
+ self.__app_edge.post_vertex.splitter.max_support_delay())
elif self.__max_row_info.delayed_max_n_synapses != 0:
raise Exception(
"Found delayed items but no delay machine edge for {}".format(
self.__app_edge.label))
def __next_addr(self, block_addr, size, max_addr=None):
- """ Get the next block address and check it hasn't overflowed the\
+ """ Get the next block address and check it hasn't overflowed the
allocation
:param int block_addr: The address of the allocation
@@ -515,7 +464,7 @@ def read_connections(
block = self.__get_block(
transceiver, placement, synapses_address)
splitter = self.__app_edge.post_vertex.splitter
- connections.append(self.__synapse_io.convert_to_connections(
+ connections.append(convert_to_connections(
self.__synapse_info, pre_slice, post_slice,
self.__max_row_info.undelayed_max_words,
self.__n_synapse_types, self.__weight_scales, block,
@@ -525,7 +474,7 @@ def read_connections(
block = self.__get_delayed_block(
transceiver, placement, synapses_address)
splitter = self.__app_edge.post_vertex.splitter
- connections.append(self.__synapse_io.convert_to_connections(
+ connections.append(convert_to_connections(
self.__synapse_info, pre_slice, post_slice,
self.__max_row_info.delayed_max_words, self.__n_synapse_types,
self.__weight_scales, block,
diff --git a/spynnaker/pyNN/models/neuron/synaptic_matrix_app.py b/spynnaker/pyNN/models/neuron/synaptic_matrix_app.py
index c1c4a9c2e9..9e0ea4aa9c 100644
--- a/spynnaker/pyNN/models/neuron/synaptic_matrix_app.py
+++ b/spynnaker/pyNN/models/neuron/synaptic_matrix_app.py
@@ -12,16 +12,18 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-import math
import numpy
from pacman.model.graphs.common.slice import Slice
from spinn_front_end_common.utilities.constants import BYTES_PER_WORD
from spinn_front_end_common.utilities.helpful_functions import (
locate_memory_region_for_placement)
-from spynnaker.pyNN.models.neuron.synaptic_matrix import SynapticMatrix
-from spynnaker.pyNN.models.neuron.generator_data import (
- GeneratorData, SYN_REGION_UNUSED)
+from .synaptic_matrix import SynapticMatrix
+from .generator_data import GeneratorData, SYN_REGION_UNUSED
+from .synapse_io import read_all_synapses, convert_to_connections
+
+# The most pre-atoms that the generator can handle at once
+MAX_GENERATED_ATOMS = 1024
class SynapticMatrixApp(object):
@@ -30,8 +32,6 @@ class SynapticMatrixApp(object):
"""
__slots__ = [
- # The reader and writer of the synapses
- "__synapse_io",
# The master population table
"__poptable",
# The synaptic info that these matrices are for
@@ -70,10 +70,6 @@ class SynapticMatrixApp(object):
"__matrix_size",
# The expected size in bytes of a delayed synaptic matrix
"__delay_matrix_size",
- # The number of atoms in the machine-level pre-vertices
- "__n_sub_atoms",
- # The number of machine edges expected for this application edge
- "__n_sub_edges",
# The offset of the undelayed synaptic matrix in the region
"__syn_mat_offset",
# The offset of the delayed synaptic matrix in the region
@@ -90,12 +86,11 @@ class SynapticMatrixApp(object):
]
def __init__(
- self, synapse_io, poptable, synapse_info, app_edge,
+ self, poptable, synapse_info, app_edge,
n_synapse_types, all_single_syn_sz, post_vertex_slice,
synaptic_matrix_region, direct_matrix_region):
"""
- :param SynapseIORowBased synapse_io: The reader and writer of synapses
:param MasterPopTableAsBinarySearch poptable:
The master population table
:param SynapseInformation synapse_info:
@@ -112,7 +107,6 @@ def __init__(
:param int direct_matrix_region:
The region where "direct" or "single" synapses are stored
"""
- self.__synapse_io = synapse_io
self.__poptable = poptable
self.__synapse_info = synapse_info
self.__app_edge = app_edge
@@ -126,12 +120,8 @@ def __init__(
self.__matrices = dict()
# Calculate the max row info for this edge
- n_delay_stages = 0
- if app_edge.delay_edge is not None:
- n_delay_stages = app_edge.delay_edge.pre_vertex.n_delay_stages
- self.__max_row_info = self.__synapse_io.get_max_row_info(
- synapse_info, self.__post_vertex_slice, n_delay_stages,
- self.__poptable, app_edge)
+ self.__max_row_info = self.__app_edge.post_vertex.get_max_row_info(
+ synapse_info, self.__post_vertex_slice, app_edge)
# These are set directly later
self.__all_syn_block_sz = None
@@ -149,11 +139,6 @@ def __init__(
self.__app_edge.pre_vertex.n_atoms *
self.__app_edge.n_delay_stages *
self.__max_row_info.delayed_max_bytes)
- vertex = self.__app_edge.pre_vertex
- self.__n_sub_atoms = int(min(
- vertex.get_max_atoms_per_core(), vertex.n_atoms))
- self.__n_sub_edges = int(
- math.ceil(vertex.n_atoms / self.__n_sub_atoms))
# These are computed during synaptic generation
self.__syn_mat_offset = None
@@ -186,65 +171,14 @@ def __get_matrix(self, machine_edge):
self.__routing_info.get_routing_info_for_edge(
delayed_machine_edge))
matrix = SynapticMatrix(
- self.__synapse_io, self.__poptable, self.__synapse_info,
+ self.__poptable, self.__synapse_info,
machine_edge, self.__app_edge, self.__n_synapse_types,
self.__max_row_info, r_info, delayed_r_info, self.__weight_scales,
- self.__all_syn_block_sz, self.__all_single_syn_sz)
+ self.__all_syn_block_sz, self.__all_single_syn_sz,
+ self.__is_direct_capable(machine_edge))
self.__matrices[machine_edge] = matrix
return matrix
- def add_matrix_size(self, addr):
- """ Add the bytes required by the synaptic matrices
-
- :param int addr: The initial address
- :return: The final address after adding synapses
- :rtype: int
- """
- if self.__max_row_info.undelayed_max_n_synapses > 0:
- size = self.__n_sub_atoms * self.__max_row_info.undelayed_max_bytes
- for _ in range(self.__n_sub_edges):
- addr = self.__poptable.get_next_allowed_address(addr)
- addr += size
- return addr
-
- def add_delayed_matrix_size(self, addr):
- """ Add the bytes required by the delayed synaptic matrices
-
- :param int addr: The initial address
- :return: The final address after adding synapses
- :rtype: int
- """
- if self.__max_row_info.delayed_max_n_synapses > 0:
- size = (self.__n_sub_atoms *
- self.__max_row_info.delayed_max_bytes *
- self.__app_edge.n_delay_stages)
- for _ in range(self.__n_sub_edges):
- addr = self.__poptable.get_next_allowed_address(addr)
- addr += size
- return addr
-
- @property
- def generator_info_size(self):
- """ The number of bytes required by the generator information
-
- :rtype: int
- """
- if not self.__synapse_info.may_generate_on_machine():
- return 0
-
- connector = self.__synapse_info.connector
- dynamics = self.__synapse_info.synapse_dynamics
- gen_size = sum((
- GeneratorData.BASE_SIZE,
- connector.gen_delay_params_size_in_bytes(
- self.__synapse_info.delays),
- connector.gen_weight_params_size_in_bytes(
- self.__synapse_info.weights),
- connector.gen_connector_params_size_in_bytes,
- dynamics.gen_matrix_params_size_in_bytes
- ))
- return gen_size * self.__n_sub_edges
-
def can_generate_on_machine(self, single_addr):
""" Determine if an app edge can be generated on the machine
@@ -266,12 +200,33 @@ def __is_app_edge_direct(self, single_addr):
"""
next_single_addr = single_addr
for m_edge in self.__m_edges:
- matrix = self.__get_matrix(m_edge)
- is_direct, next_single_addr = matrix.is_direct(next_single_addr)
- if not is_direct:
+ if not self.__is_direct_capable(m_edge):
+ return False
+ n_single_bytes = (
+ m_edge.pre_vertex.vertex_slice.n_atoms * BYTES_PER_WORD)
+ next_single_addr += n_single_bytes
+ if next_single_addr > self.__all_single_syn_sz:
return False
return True
+ def __is_direct_capable(self, machine_edge):
+ """ Determine if the given edge can be done with a "direct"\
+ synaptic matrix - this must have an exactly 1 entry per row
+
+ :param ~pacman.model.graphs.machine.MachineEdge machine_edge:
+ The edge to test
+ :return: A tuple of a boolean indicating if the matrix is direct and
+ the next offset of the single matrix
+ :rtype: (bool, int)
+ """
+ pre_vertex_slice = machine_edge.pre_vertex.vertex_slice
+ post_vertex_slice = machine_edge.post_vertex.vertex_slice
+ return (
+ self.__app_edge.n_delay_stages == 0 and
+ self.__synapse_info.may_use_direct_matrix() and
+ (pre_vertex_slice.lo_atom == post_vertex_slice.lo_atom) and
+ (pre_vertex_slice.hi_atom == post_vertex_slice.hi_atom))
+
def set_info(self, all_syn_block_sz, app_key_info, delay_app_key_info,
routing_info, weight_scales, m_edges):
""" Set extra information that isn't necessarily available when the
@@ -309,8 +264,7 @@ class is created.
self.__use_app_keys = (
is_app_key and is_delay_app_key and len(m_edges) > 1)
- def write_matrix(
- self, spec, block_addr, single_addr, single_synapses):
+ def write_matrix(self, spec, block_addr, single_addr, single_synapses):
""" Write a synaptic matrix from host
:param ~data_specification.DataSpecificationGenerator spec:
@@ -374,8 +328,10 @@ def __write_app_matrix(self, spec, block_addr, matrix_data):
# If we have routing info but no synapses, write an invalid entry
if self.__max_row_info.undelayed_max_n_synapses == 0:
- self.__index = self.__poptable.add_invalid_entry(
- self.__app_key_info.key_and_mask)
+ self.__index = self.__poptable.add_invalid_application_entry(
+ self.__app_key_info.key_and_mask,
+ self.__app_key_info.core_mask, self.__app_key_info.core_shift,
+ self.__app_key_info.n_neurons)
return block_addr
# Write a matrix for the whole application vertex
@@ -423,8 +379,11 @@ def __write_delay_app_matrix(self, spec, block_addr, matrix_data):
# If we have routing info but no synapses, write an invalid entry
if self.__max_row_info.delayed_max_n_synapses == 0:
- self.__delay_index = self.__poptable.add_invalid_entry(
- self.__delay_app_key_info.key_and_mask)
+ self.__delay_index = self.__poptable.add_invalid_application_entry(
+ self.__delay_app_key_info.key_and_mask,
+ self.__delay_app_key_info.core_mask,
+ self.__delay_app_key_info.core_shift,
+ self.__delay_app_key_info.n_neurons)
return block_addr
# Write a matrix for the whole application vertex
@@ -465,40 +424,161 @@ def write_on_chip_matrix_data(self, generator_data, block_addr):
:rtype: int
"""
- # Reserve the space in the matrix for an application-level key,
- # and tell the pop table
- (block_addr, syn_addr, del_addr, syn_max_addr,
- del_max_addr) = self.__reserve_app_blocks(block_addr)
+ if self.__use_app_keys:
+ # Reserve the space in the matrix for an application-level key,
+ # and tell the pop table
+ (block_addr, syn_addr, del_addr, syn_max_addr,
+ del_max_addr) = self.__reserve_app_blocks(block_addr)
+
+ pre_slices =\
+ self.__app_edge.pre_vertex.splitter.get_out_going_slices()[0]
+ if self.__max_row_info.delayed_max_n_synapses == 0:
+ # If we are not using delays (as we have to sync with delays)
+ # Generate for theoretical maximum pre-slices that the
+ # generator can handle; Note that the generator can't handle
+ # full pre-vertices without running out of memory in general,
+ # so we break it down, but as little as possible
+ max_atom = self.__app_edge.pre_vertex.n_atoms - 1
+ pre_slices = [
+ Slice(lo_atom,
+ min(lo_atom + MAX_GENERATED_ATOMS - 1, max_atom))
+ for lo_atom in range(0, max_atom + 1, MAX_GENERATED_ATOMS)]
+ for pre_slice in pre_slices:
+ syn_addr, syn_mat_offset = self.__next_app_on_chip_address(
+ syn_addr, syn_max_addr, pre_slice)
+ del_addr, d_mat_offset = self.__next_app_delay_on_chip_address(
+ del_addr, del_max_addr, pre_slice)
+ generator_data.append(self.__get_generator_data(
+ syn_mat_offset, d_mat_offset, pre_slices, pre_slice))
+ for pre_slice in pre_slices:
+ self.__write_on_chip_delay_data(pre_slices, pre_slice)
+ return block_addr
# Go through the edges of the application edge and write data for the
- # generator; this has to be done on a machine-edge basis to avoid
- # overloading the generator, even if an application matrix is generated
+ # generator
for m_edge in self.__m_edges:
matrix = self.__get_matrix(m_edge)
- max_delay_per_stage = (
- m_edge.post_vertex.app_vertex.splitter.max_support_delay())
-
- if self.__use_app_keys:
- syn_addr, syn_mat_offset = matrix.next_app_on_chip_address(
- syn_addr, syn_max_addr)
- del_addr, d_mat_offset = matrix.next_app_delay_on_chip_address(
- del_addr, del_max_addr)
- else:
- block_addr, syn_mat_offset = matrix.next_on_chip_address(
- block_addr)
- block_addr, d_mat_offset = matrix.next_delay_on_chip_address(
- block_addr)
+ block_addr, syn_mat_offset = matrix.next_on_chip_address(
+ block_addr)
+ block_addr, d_mat_offset = matrix.next_delay_on_chip_address(
+ block_addr)
# Create the generator data and note it exists for this post vertex
- # Note generator data is written per machine-edge even when a whole
- # application vertex matrix exists, because these are just appended
- # to each other in the latter case; this makes it easier to
- # generate since it is still doing it in chunks, so less local
- # memory is needed.
generator_data.append(matrix.get_generator_data(
- syn_mat_offset, d_mat_offset, max_delay_per_stage))
+ syn_mat_offset, d_mat_offset))
return block_addr
+ def __next_app_on_chip_address(
+ self, app_block_addr, max_app_addr, pre_vertex_slice):
+ """ Allocate a machine-level address of a matrix from within an
+ app-level allocation
+
+ :param int app_block_addr:
+ The current position in the application block
+ :param int max_app_addr:
+ The position of the end of the allocation
+ :param ~pacman.model.graphs.common.Slice pre_vertex_slice:
+ The slice to be allocated
+ :return: The address after the allocation and the allocated address
+ :rtype: int, int
+ """
+ if self.__max_row_info.undelayed_max_n_synapses == 0:
+ return app_block_addr, SYN_REGION_UNUSED
+
+ # Get the matrix size
+ matrix_size = (
+ pre_vertex_slice.n_atoms *
+ self.__max_row_info.undelayed_max_bytes)
+
+ # Note: No master population table padding is needed here because
+ # the allocation is at the application level
+ addr = app_block_addr
+ app_block_addr = self.__next_addr(
+ app_block_addr, matrix_size, max_app_addr)
+ return app_block_addr, addr
+
+ def __next_app_delay_on_chip_address(
+ self, app_block_addr, max_app_addr, pre_vertex_slice):
+ """ Allocate a machine-level address of a delayed matrix from within an
+ app-level allocation
+
+ :param int app_block_addr:
+ The current position in the application block
+ :param int max_app_addr:
+ The position of the end of the allocation
+ :param ~pacman.model.graphs.common.Slice pre_vertex_slice:
+ The slice to be allocated
+ :return: The address after the allocation and the allocated address
+ :rtype: int, int
+ """
+ if self.__max_row_info.delayed_max_n_synapses == 0:
+ return app_block_addr, SYN_REGION_UNUSED
+
+ # Get the matrix size
+ delay_matrix_size = (
+ pre_vertex_slice.n_atoms *
+ self.__app_edge.n_delay_stages *
+ self.__max_row_info.delayed_max_bytes)
+
+ # Note: No master population table padding is needed here because
+ # the allocation is at the application level
+ addr = app_block_addr
+ app_block_addr = self.__next_addr(
+ app_block_addr, delay_matrix_size, max_app_addr)
+ return app_block_addr, addr
+
+ def __get_generator_data(
+ self, syn_mat_offset, d_mat_offset, pre_vertex_slices,
+ pre_vertex_slice):
+ """ Get the generator data for this matrix
+
+ :param int syn_mat_offset:
+ The synaptic matrix offset to write the data to
+ :param int d_mat_offset:
+ The synaptic matrix offset to write the delayed data to
+ :param list(pacman.model.graphs.common.Slice) pre_vertex_slices:
+ The pre-vertex-slices to get the data for
+ :param ~pacman.model.graphs.common.Slice pre_vertex_slice:
+ The slice to be allocated
+ :rtype: GeneratorData
+ """
+ post_slices =\
+ self.__app_edge.post_vertex.splitter.get_in_coming_slices()[0]
+ return GeneratorData(
+ syn_mat_offset, d_mat_offset,
+ self.__max_row_info.undelayed_max_words,
+ self.__max_row_info.delayed_max_words,
+ self.__max_row_info.undelayed_max_n_synapses,
+ self.__max_row_info.delayed_max_n_synapses, pre_vertex_slices,
+ post_slices, pre_vertex_slice, self.__post_vertex_slice,
+ self.__synapse_info, self.__app_edge.n_delay_stages + 1,
+ self.__app_edge.post_vertex.splitter.max_support_delay())
+
+ def __write_on_chip_delay_data(self, pre_vertex_slices, pre_vertex_slice):
+ """ Write data for delayed on-chip generation
+
+ :param list(pacman.model.graphs.common.Slice) pre_vertex_slices:
+ The pre-vertex-slices to get the data for
+ :param ~pacman.model.graphs.common.Slice pre_vertex_slice:
+ The slice to be allocated
+ """
+ # If delay edge exists, tell this about the data too, so it can
+ # generate its own data
+ post_slices =\
+ self.__app_edge.post_vertex.splitter.get_in_coming_slices()[0]
+ if (self.__max_row_info.delayed_max_n_synapses > 0 and
+ self.__app_edge.delay_edge is not None):
+ self.__app_edge.delay_edge.pre_vertex.add_generator_data(
+ self.__max_row_info.undelayed_max_n_synapses,
+ self.__max_row_info.delayed_max_n_synapses, pre_vertex_slices,
+ post_slices, pre_vertex_slice, self.__post_vertex_slice,
+ self.__synapse_info, self.__app_edge.n_delay_stages + 1,
+ self.__app_edge.post_vertex.splitter.max_support_delay())
+ elif self.__max_row_info.delayed_max_n_synapses != 0:
+ raise Exception(
+ "Found delayed items but no delay machine edge for {}".format(
+ self.__app_edge.label))
+
def __reserve_app_blocks(self, block_addr):
""" Reserve blocks for a whole-application-vertex matrix if possible,
and tell the master population table
@@ -524,7 +604,7 @@ def __reserve_app_blocks(self, block_addr):
delay_max_addr)
def __reserve_mpop_block(self, block_addr):
- """ Reserve a block in the master population table for an undelayed\
+ """ Reserve a block in the master population table for an undelayed
matrix
:param int block_addr:
@@ -539,8 +619,10 @@ def __reserve_mpop_block(self, block_addr):
# If we have routing info but no synapses, write an invalid entry
if self.__max_row_info.undelayed_max_n_synapses == 0:
- self.__index = self.__poptable.add_invalid_entry(
- self.__app_key_info.key_and_mask)
+ self.__index = self.__poptable.add_invalid_application_entry(
+ self.__app_key_info.key_and_mask,
+ self.__app_key_info.core_mask, self.__app_key_info.core_shift,
+ self.__app_key_info.n_neurons)
return block_addr, SYN_REGION_UNUSED, None
block_addr = self.__poptable.get_next_allowed_address(block_addr)
@@ -567,8 +649,11 @@ def __reserve_delay_mpop_block(self, block_addr):
# If we have routing info but no synapses, write an invalid entry
if self.__max_row_info.delayed_max_n_synapses == 0:
- self.__delay_index = self.__poptable.add_invalid_entry(
- self.__delay_app_key_info.key_and_mask)
+ self.__delay_index = self.__poptable.add_invalid_application_entry(
+ self.__delay_app_key_info.key_and_mask,
+ self.__delay_app_key_info.core_mask,
+ self.__delay_app_key_info.core_shift,
+ self.__delay_app_key_info.n_neurons)
return block_addr, SYN_REGION_UNUSED, None
block_addr = self.__poptable.get_next_allowed_address(block_addr)
@@ -590,44 +675,37 @@ def __update_connection_holders(self, data, delayed_data, machine_edge):
:param ~pacman.model.graphs.machine.MachineEdge machine_edge:
The machine edge the connections are for
"""
+ pre_vertex_slice = machine_edge.pre_vertex.vertex_slice
+ post_vertex_slice = machine_edge.post_vertex.vertex_slice
+ post_splitter = machine_edge.post_vertex.app_vertex.splitter
+ post_vertex_max_delay_ticks = post_splitter.max_support_delay()
for conn_holder in self.__synapse_info.pre_run_connection_holders:
conn_holder.add_connections(
- self.__synapse_io.read_all_synapses(
+ read_all_synapses(
data, delayed_data, self.__synapse_info,
self.__n_synapse_types, self.__weight_scales,
- machine_edge, self.__max_row_info))
+ pre_vertex_slice, post_vertex_slice,
+ post_vertex_max_delay_ticks, self.__max_row_info))
- def __next_addr(self, block_addr, size):
+ def __next_addr(self, block_addr, size, max_addr=None):
""" Get the next address after a block, checking it is in range
:param int block_addr: The address of the start of the block
:param int size: The size of the block in bytes
+ :param int max_addr: The maximum allowed address
:return: The updated address
:rtype: int
:raises Exception: If the updated address is out of range
"""
+ if not max_addr:
+ max_addr = self.__all_syn_block_sz
next_addr = block_addr + size
- if next_addr > self.__all_syn_block_sz:
+ if next_addr > max_addr:
raise Exception(
"Too much synaptic memory has been written: {} of {} "
- .format(next_addr, self.__all_syn_block_sz))
+ .format(next_addr, max_addr))
return next_addr
- def __update_synapse_index(self, index):
- """ Update the index of a synapse, checking it matches against indices\
- for other synapse_info for the same edge
-
- :param index: The index to set
- :raises Exception: If the index doesn't match the currently set index
- """
- if self.__index is None:
- self.__index = index
- elif self.__index != index:
- # This should never happen as things should be aligned over all
- # machine vertices, but check just in case!
- raise Exception(
- "Index of " + self.__synapse_info + " has changed!")
-
def get_connections(self, transceiver, placement):
""" Get the connections for this matrix from the machine
@@ -699,7 +777,7 @@ def __read_connections(self, transceiver, placement, synapses_address):
if self.__syn_mat_offset is not None:
block = self.__get_block(transceiver, placement, synapses_address)
splitter = self.__app_edge.post_vertex.splitter
- connections.append(self.__synapse_io.convert_to_connections(
+ connections.append(convert_to_connections(
self.__synapse_info, pre_slice, self.__post_vertex_slice,
self.__max_row_info.undelayed_max_words,
self.__n_synapse_types, self.__weight_scales, block,
@@ -709,7 +787,7 @@ def __read_connections(self, transceiver, placement, synapses_address):
block = self.__get_delayed_block(
transceiver, placement, synapses_address)
splitter = self.__app_edge.post_vertex.splitter
- connections.append(self.__synapse_io.convert_to_connections(
+ connections.append(convert_to_connections(
self.__synapse_info, pre_slice, self.__post_vertex_slice,
self.__max_row_info.delayed_max_words, self.__n_synapse_types,
self.__weight_scales, block, True,
diff --git a/spynnaker/pyNN/models/projection.py b/spynnaker/pyNN/models/projection.py
index b0886b7d35..c69644e6db 100644
--- a/spynnaker/pyNN/models/projection.py
+++ b/spynnaker/pyNN/models/projection.py
@@ -37,6 +37,8 @@
SynapseDynamicsStatic)
from spynnaker._version import __version__
from spynnaker.pyNN.models.populations import Population, PopulationView
+from spynnaker.pyNN.models.neuron import AbstractPopulationVertex
+from spynnaker.pyNN.models.spike_source import SpikeSourcePoissonVertex
logger = FormatAdapter(logging.getLogger(__name__))
@@ -182,9 +184,6 @@ def __init__(
# add projection to the SpiNNaker control system
sim.add_projection(self)
- # reset the ring buffer shifts
- post_vertex.reset_ring_buffer_shifts()
-
# If there is a virtual board, we need to hold the data in case the
# user asks for it
self.__virtual_connection_list = None
@@ -197,6 +196,15 @@ def __init__(
self.__synapse_information.add_pre_run_connection_holder(
connection_holder)
+ # If the target is a population, add to the list of incoming
+ # projections
+ if isinstance(post_vertex, AbstractPopulationVertex):
+ post_vertex.add_incoming_projection(self)
+
+ # If the source is a poisson, add to the list of outgoing projections
+ if isinstance(pre_vertex, SpikeSourcePoissonVertex):
+ pre_vertex.add_outgoing_projection(self)
+
@staticmethod
def __check_population(param, connector):
"""
diff --git a/spynnaker/pyNN/models/recorder.py b/spynnaker/pyNN/models/recorder.py
index 0d4a8531a1..508322884d 100644
--- a/spynnaker/pyNN/models/recorder.py
+++ b/spynnaker/pyNN/models/recorder.py
@@ -473,11 +473,9 @@ def __append_current_segment(self, block, variables, view_indexes, clear):
elif variable == REWIRING:
self.__read_in_event(
segment=segment,
- block=block,
event_array=self.get_events(variable),
variable=variable,
- recording_start_time=self._recording_start_time,
- label=self.__population.label)
+ recording_start_time=self._recording_start_time)
else:
(data, data_indexes, sampling_interval) = \
self.get_recorded_matrix(variable)
@@ -537,11 +535,9 @@ def _append_previous_segment(
elif variable == REWIRING:
self.__read_in_event(
segment=segment,
- block=block,
event_array=variable_cache.data,
variable=variable,
- recording_start_time=data_cache.recording_start_time,
- label=self.__population.label)
+ recording_start_time=data_cache.recording_start_time)
else:
self.__read_in_signal(
segment=segment,
@@ -716,18 +712,15 @@ def __read_in_signal(
channel_index.analogsignals.append(data_array)
def __read_in_event(
- self, segment, block, event_array, variable, recording_start_time,
- label):
+ self, segment, event_array, variable, recording_start_time):
""" Reads in a data item that is an event (i.e. rewiring form/elim)\
and saves this data to the segment.
:param ~neo.core.Segment segment: Segment to add data to
- :param ~neo.core.Block block: neo block
:param ~numpy.ndarray signal_array: the raw "event" data
:param str variable: the variable name
:param recording_start_time: when recording started
:type recording_start_time: float or int
- :param str label: human readable label
"""
# pylint: disable=too-many-arguments, no-member
t_start = recording_start_time * quantities.ms
diff --git a/spynnaker/pyNN/models/spike_source/__init__.py b/spynnaker/pyNN/models/spike_source/__init__.py
index 44882b9c09..9a2eb7f7b8 100644
--- a/spynnaker/pyNN/models/spike_source/__init__.py
+++ b/spynnaker/pyNN/models/spike_source/__init__.py
@@ -20,7 +20,9 @@
from .spike_source_poisson_variable import SpikeSourcePoissonVariable
from .spike_source_poisson_machine_vertex import (
SpikeSourcePoissonMachineVertex)
+from .spike_source_poisson_vertex import SpikeSourcePoissonVertex
__all__ = ["SpikeSourceArray", "SpikeSourceArrayVertex",
"SpikeSourceFromFile", "SpikeSourcePoisson",
- "SpikeSourcePoissonMachineVertex", "SpikeSourcePoissonVariable"]
+ "SpikeSourcePoissonMachineVertex", "SpikeSourcePoissonVariable",
+ "SpikeSourcePoissonVertex"]
diff --git a/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py b/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py
index cb406076d3..1ca45bed86 100644
--- a/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py
+++ b/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py
@@ -23,7 +23,7 @@
from spinn_front_end_common.utilities import helpful_functions
from spinn_front_end_common.utilities.constants import (
MICRO_TO_SECOND_CONVERSION, SIMULATION_N_BYTES, BYTES_PER_WORD,
- MICRO_TO_MILLISECOND_CONVERSION)
+ MICRO_TO_MILLISECOND_CONVERSION, BYTES_PER_SHORT)
from spinn_utilities.overrides import overrides
from pacman.executor.injection_decorator import inject_items
from pacman.model.graphs.machine import MachineVertex
@@ -50,6 +50,9 @@
from spynnaker.pyNN.utilities.constants import (
LIVE_POISSON_CONTROL_PARTITION_ID)
from spynnaker.pyNN.utilities.struct import Struct
+from spynnaker.pyNN.models.abstract_models import (
+ SendsSynapticInputsOverSDRAM, ReceivesSynapticInputsOverSDRAM)
+from spynnaker.pyNN.exceptions import SynapticConfigurationException
def _flatten(alist):
@@ -72,6 +75,15 @@ def get_rates_bytes(vertex_slice, rate_data):
(n_rates * PARAMS_WORDS_PER_RATE)) * BYTES_PER_WORD
+def get_sdram_edge_params_bytes(vertex_slice):
+ """ Gets the size of the Poisson SDRAM region in bytes
+ :param ~pacman.model.graphs.common.Slice vertex_slice:
+ :rtype: int
+ """
+ return SDRAM_EDGE_PARAMS_BASE_BYTES + (
+ vertex_slice.n_atoms * SDRAM_EDGE_PARAMS_BYTES_PER_WEIGHT)
+
+
# uint32_t n_rates; uint32_t index
PARAMS_WORDS_PER_NEURON = 2
@@ -79,6 +91,14 @@ def get_rates_bytes(vertex_slice, rate_data):
# sqrt_lambda, isi_val, time_to_spike
PARAMS_WORDS_PER_RATE = 8
+# The size of each weight to be stored for SDRAM transfers
+SDRAM_EDGE_PARAMS_BYTES_PER_WEIGHT = BYTES_PER_SHORT
+
+# SDRAM edge param base size:
+# 1. address, 2. size of transfer,
+# 3. offset to start writing, 4. VLA of weights (not counted here)
+SDRAM_EDGE_PARAMS_BASE_BYTES = 3 * BYTES_PER_WORD
+
_ONE_WORD = struct.Struct(".
-import math
from enum import Enum
from pacman.executor.injection_decorator import inject_items
from spinn_front_end_common.interface.simulation import simulation_utilities
from spinn_front_end_common.utilities.constants import (
- BITS_PER_WORD, BYTES_PER_WORD, SIMULATION_N_BYTES)
+ BYTES_PER_WORD, SIMULATION_N_BYTES)
from spinn_utilities.overrides import overrides
from pacman.model.graphs.machine import MachineVertex
from spinn_front_end_common.interface.provenance import (
@@ -32,8 +31,6 @@
# 6. n_delay_stages, 7. the number of delay supported by each delay stage
from spynnaker.pyNN.utilities.constants import SPIKE_PARTITION_ID
-_DELAY_PARAM_HEADER_WORDS = 8
-
_EXPANDER_BASE_PARAMS_SIZE = 3 * BYTES_PER_WORD
DELAY_EXPANDER_APLX = "delay_expander.aplx"
@@ -45,6 +42,7 @@ class DelayExtensionMachineVertex(
__slots__ = [
"__resources",
+ "__slice_index",
"__drop_late_spikes"]
class _DELAY_EXTENSION_REGIONS(Enum):
@@ -86,7 +84,7 @@ class EXTRA_PROVENANCE_DATA_ENTRIES(Enum):
BACKGROUND_MAX_QUEUED_NAME = "Max_backgrounds_queued"
def __init__(self, resources_required, label, constraints=None,
- app_vertex=None, vertex_slice=None):
+ app_vertex=None, vertex_slice=None, slice_index=None):
"""
:param ~pacman.model.resources.ResourceContainer resources_required:
The resources required by the vertex
@@ -104,6 +102,7 @@ def __init__(self, resources_required, label, constraints=None,
super().__init__(
label, constraints=constraints, app_vertex=app_vertex,
vertex_slice=vertex_slice)
+ self.__slice_index = slice_index
self.__resources = resources_required
@property
@@ -240,11 +239,8 @@ def generate_data_specification(
# ###################################################################
# Reserve SDRAM space for memory areas:
- n_words_per_stage = int(
- math.ceil(self._vertex_slice.n_atoms / BITS_PER_WORD))
- delay_params_sz = BYTES_PER_WORD * (
- _DELAY_PARAM_HEADER_WORDS +
- (self._app_vertex.n_delay_stages * n_words_per_stage))
+ delay_params_sz = self._app_vertex.delay_params_size(
+ self._vertex_slice)
spec.reserve_memory_region(
region=self._DELAY_EXTENSION_REGIONS.SYSTEM.value,
@@ -305,7 +301,7 @@ def generate_data_specification(
self._DELAY_EXTENSION_REGIONS.TDMA_REGION.value)
spec.write_array(
self._app_vertex.generate_tdma_data_specification_data(
- self._app_vertex.vertex_slices.index(self._vertex_slice)))
+ self.__slice_index))
# End-of-Spec:
spec.end_specification()
diff --git a/spynnaker/pyNN/models/utility_models/delays/delay_extension_vertex.py b/spynnaker/pyNN/models/utility_models/delays/delay_extension_vertex.py
index ccdfad49bf..fd876d865a 100644
--- a/spynnaker/pyNN/models/utility_models/delays/delay_extension_vertex.py
+++ b/spynnaker/pyNN/models/utility_models/delays/delay_extension_vertex.py
@@ -19,6 +19,8 @@
from pacman.model.constraints.key_allocator_constraints import (
ContiguousKeyRangeContraint)
from spinn_utilities.config_holder import get_config_bool
+from spinn_front_end_common.utilities.constants import (
+ BITS_PER_WORD, BYTES_PER_WORD)
from spinn_front_end_common.abstract_models import (
AbstractProvidesOutgoingPartitionConstraints)
from spinn_front_end_common.abstract_models.impl import (
@@ -30,6 +32,8 @@
from .delay_block import DelayBlock
from .delay_generator_data import DelayGeneratorData
+_DELAY_PARAM_HEADER_WORDS = 8
+
class DelayExtensionVertex(
TDMAAwareApplicationVertex, AbstractHasDelayStages,
@@ -40,16 +44,14 @@ class DelayExtensionVertex(
__slots__ = [
"__delay_blocks",
"__delay_per_stage",
- "__max_delay_needed_to_support",
"__n_atoms",
"__n_delay_stages",
"__source_vertex",
"__delay_generator_data",
- "__n_data_specs",
"__drop_late_spikes"]
# this maps to what master assumes
- MAX_TICKS_POSSIBLE_TO_SUPPORT = 8 * 16
+ MAX_SLOTS = 8
SAFETY_FACTOR = 5000
MAX_DTCM_AVAILABLE = 59756 - SAFETY_FACTOR
@@ -58,12 +60,12 @@ class DelayExtensionVertex(
"yet feasible. Please report it to Spinnaker user mail list.")
def __init__(
- self, n_neurons, delay_per_stage, max_delay_to_support,
+ self, n_neurons, delay_per_stage, n_delay_stages,
source_vertex, constraints=None, label="DelayExtension"):
"""
:param int n_neurons: the number of neurons
:param int delay_per_stage: the delay per stage
- :param int max_delay_to_support: the max delay this will cover
+ :param int n_delay_stages: the (initial) number of delay stages needed
:param ~pacman.model.graphs.application.ApplicationVertex \
source_vertex:
where messages are coming from
@@ -77,13 +79,9 @@ def __init__(
label, constraints, POP_TABLE_MAX_ROW_LENGTH, splitter=None)
self.__source_vertex = source_vertex
- self.__n_delay_stages = 0
- self.__max_delay_needed_to_support = max_delay_to_support
+ self.__n_delay_stages = n_delay_stages
self.__delay_per_stage = delay_per_stage
self.__delay_generator_data = defaultdict(list)
- self.__n_data_specs = 0
- self.set_new_n_delay_stages_and_delay_per_stage(
- self.__delay_per_stage, self.__max_delay_needed_to_support)
# atom store
self.__n_atoms = self.round_n_atoms(n_neurons, "n_neurons")
@@ -104,10 +102,7 @@ def drop_late_spikes(self):
@staticmethod
def get_max_delay_ticks_supported(delay_ticks_at_post_vertex):
- max_slots = math.floor(
- DelayExtensionVertex.MAX_TICKS_POSSIBLE_TO_SUPPORT /
- delay_ticks_at_post_vertex)
- return max_slots * delay_ticks_at_post_vertex
+ return DelayExtensionVertex.MAX_SLOTS * delay_ticks_at_post_vertex
@property
@overrides(AbstractHasDelayStages.n_delay_stages)
@@ -120,18 +115,14 @@ def n_delay_stages(self):
return self.__n_delay_stages
def set_new_n_delay_stages_and_delay_per_stage(
- self, new_post_vertex_n_delay, new_max_delay):
- if new_post_vertex_n_delay != self.__delay_per_stage:
+ self, n_delay_stages, delay_per_stage):
+ if delay_per_stage != self.__delay_per_stage:
raise DelayExtensionException(
self.MISMATCHED_DELAY_PER_STAGE_ERROR_MESSAGE.format(
- self.__delay_per_stage, new_post_vertex_n_delay))
-
- new_n_stages = int(math.ceil(
- (new_max_delay - self.__delay_per_stage) /
- self.__delay_per_stage))
+ self.__delay_per_stage, delay_per_stage))
- if new_n_stages > self.__n_delay_stages:
- self.__n_delay_stages = new_n_stages
+ if n_delay_stages > self.__n_delay_stages:
+ self.__n_delay_stages = n_delay_stages
@property
def delay_per_stage(self):
@@ -192,7 +183,6 @@ def add_generator_data(
~spynnaker.pyNN.models.neural_projections.SynapseInformation
:param int max_stage:
The maximum delay stage
- :param int machine_time_step: sim machine time step
"""
self.__delay_generator_data[pre_vertex_slice].append(
DelayGeneratorData(
@@ -216,3 +206,18 @@ def gen_on_machine(self, vertex_slice):
def delay_generator_data(self, vertex_slice):
return self.__delay_generator_data.get(vertex_slice, None)
+
+ def delay_params_size(self, vertex_slice):
+ """ The size of the delay parameters for a given vertex slice
+
+ :param Slice slice: The slice to get the size of the parameters for
+ """
+ n_words_per_stage = int(
+ math.ceil(vertex_slice.n_atoms / BITS_PER_WORD))
+ return BYTES_PER_WORD * (
+ _DELAY_PARAM_HEADER_WORDS +
+ (self.__n_delay_stages * n_words_per_stage))
+
+ @overrides(TDMAAwareApplicationVertex.get_n_cores)
+ def get_n_cores(self):
+ return len(self._splitter.get_out_going_slices()[0])
diff --git a/spynnaker/pyNN/spynnaker.cfg b/spynnaker/pyNN/spynnaker.cfg
index 24fbde890d..f1d1c71dff 100644
--- a/spynnaker/pyNN/spynnaker.cfg
+++ b/spynnaker/pyNN/spynnaker.cfg
@@ -33,10 +33,14 @@ one_to_one_connection_dtcm_max_bytes = 2048
# performance limiter to throw away packets not processed in a given time step
drop_late_spikes = True
+# The overhead to add to the transfer clocks
+# when using a split synapse neuron model
+transfer_overhead_clocks = 200
+
[Mapping]
# Algorithms below - format is ,<>
-application_to_machine_graph_algorithms = DelaySupportAdder,SpynnakerSplitterSelector,SpYNNakerSplitterPartitioner
+application_to_machine_graph_algorithms = SplitterReset,DelaySupportAdder,SpynnakerSplitterSelector,SpYNNakerSplitterPartitioner
machine_graph_to_machine_algorithms = EdgeToNKeysMapper,SpreaderPlacer,NerRouteTrafficAware,BasicTagAllocator,ProcessPartitionConstraints,ZonedRoutingInfoAllocator,BasicRoutingTableGenerator,RouterCollisionPotentialReport
machine_graph_to_virtual_machine_algorithms = EdgeToNKeysMapper,SpreaderPlacer,NerRouteTrafficAware,BasicTagAllocator,ProcessPartitionConstraints,ZonedRoutingInfoAllocator,BasicRoutingTableGenerator,PairCompressor
loading_algorithms = PairOnChipRouterCompression
diff --git a/spynnaker/pyNN/utilities/bit_field_utilities.py b/spynnaker/pyNN/utilities/bit_field_utilities.py
index f62d4f40d1..bf89dc99a7 100644
--- a/spynnaker/pyNN/utilities/bit_field_utilities.py
+++ b/spynnaker/pyNN/utilities/bit_field_utilities.py
@@ -16,8 +16,6 @@
import math
from pacman.utilities.constants import FULL_MASK
from spinn_front_end_common.utilities.constants import BYTES_PER_WORD
-from spynnaker.pyNN.models.neural_projections import ProjectionApplicationEdge
-from spynnaker.pyNN.models.abstract_models import AbstractHasDelayStages
#: number of elements
ELEMENTS_USED_IN_EACH_BIT_FIELD = 3 # n words, key, pointer to bitfield
@@ -41,86 +39,62 @@
BIT_IN_A_WORD = 32.0
-def get_estimated_sdram_for_bit_field_region(app_graph, vertex):
+def get_estimated_sdram_for_bit_field_region(incoming_projections):
""" estimates the SDRAM for the bit field region
- :param ~pacman.model.graphs.application.ApplicationGraph app_graph:
- the app graph
- :param ~pacman.model.graphs.application.ApplicationVertex vertex:
- app vertex
+ :param iterable(~spynnaker.pyNN.models.Projection) incoming_projections:
+ The projections that target the vertex in question
:return: the estimated number of bytes used by the bit field region
:rtype: int
"""
sdram = ELEMENTS_USED_IN_BIT_FIELD_HEADER * BYTES_PER_WORD
- for incoming_edge in app_graph.get_edges_ending_at_vertex(vertex):
- if isinstance(incoming_edge, ProjectionApplicationEdge):
- slices, _ = (
- incoming_edge.pre_vertex.splitter.get_out_going_slices())
+ seen_app_edges = set()
+ for proj in incoming_projections:
+ app_edge = proj._projection_edge
+ if app_edge not in seen_app_edges:
+ seen_app_edges.add(app_edge)
+ slices, _ = app_edge.pre_vertex.splitter.get_out_going_slices()
n_machine_vertices = len(slices)
- slice_atoms = list()
- for vertex_slice in slices:
- slice_atoms.append(vertex_slice.n_atoms)
- n_atoms_per_machine_vertex = max(slice_atoms)
-
- if isinstance(incoming_edge.pre_vertex, AbstractHasDelayStages):
- n_atoms_per_machine_vertex *= \
- incoming_edge.pre_vertex.n_delay_stages
- n_words_for_atoms = int(math.ceil(
- n_atoms_per_machine_vertex / BIT_IN_A_WORD))
+ atoms_per_core = max(
+ vertex_slice.n_atoms for vertex_slice in slices)
+ n_words_for_atoms = int(math.ceil(atoms_per_core / BIT_IN_A_WORD))
+ sdram += (
+ ((ELEMENTS_USED_IN_EACH_BIT_FIELD + n_words_for_atoms) *
+ n_machine_vertices) * BYTES_PER_WORD)
+ # Also add for delay vertices if needed
+ n_words_for_delays = int(math.ceil(
+ atoms_per_core * app_edge.n_delay_stages / BIT_IN_A_WORD))
sdram += (
- (ELEMENTS_USED_IN_EACH_BIT_FIELD + (
- n_words_for_atoms * n_machine_vertices)) *
- BYTES_PER_WORD)
+ ((ELEMENTS_USED_IN_EACH_BIT_FIELD + n_words_for_delays) *
+ n_machine_vertices) * BYTES_PER_WORD)
return sdram
-def get_estimated_sdram_for_key_region(app_graph, vertex):
+def get_estimated_sdram_for_key_region(incoming_projections):
""" gets an estimate of the bitfield builder region
- :param ~pacman.model.graphs.application.ApplicationGraph app_graph:
- the app graph
- :param ~pacman.model.graphs.application.ApplicationVertex vertex:
- app vertex
+ :param iterable(~spynnaker.pyNN.models.Projection) incoming_projections:
+ The projections that target the vertex in question
:return: SDRAM needed
:rtype: int
"""
# basic sdram
sdram = N_KEYS_DATA_SET_IN_WORDS * BYTES_PER_WORD
- for in_edge in app_graph.get_edges_ending_at_vertex(vertex):
-
- # Get the number of likely vertices
- slices, _ = in_edge.pre_vertex.splitter.get_out_going_slices()
- sdram += (
- len(slices) * N_ELEMENTS_IN_EACH_KEY_N_ATOM_MAP * BYTES_PER_WORD)
- return sdram
-
-
-def _exact_sdram_for_bit_field_region(
- machine_graph, vertex, n_key_map):
- """ calculates the correct SDRAM for the bitfield region based off \
- the machine graph
-
- :param ~pacman.model.graphs.machine.MachineGraph machine_graph:
- machine graph
- :param ~pacman.model.graphs.machine.MachineVertex vertex:
- the machine vertex
- :param ~pacman.model.routing_info.AbstractMachinePartitionNKeysMap \
- n_key_map:
- n keys map
- :return: SDRAM in bytes
- :rtype: int
- """
- sdram = ELEMENTS_USED_IN_BIT_FIELD_HEADER * BYTES_PER_WORD
- for incoming_edge in machine_graph.get_edges_ending_at_vertex(vertex):
- n_keys = n_key_map.n_keys_for_partition(
- machine_graph.get_outgoing_partition_for_edge(incoming_edge))
- n_words_for_atoms = int(math.ceil(n_keys / BIT_IN_A_WORD))
-
- sdram += (
- (ELEMENTS_USED_IN_EACH_BIT_FIELD + n_words_for_atoms) *
- BYTES_PER_WORD)
+ seen_app_edges = set()
+ for proj in incoming_projections:
+ in_edge = proj._projection_edge
+ if in_edge not in seen_app_edges:
+ seen_app_edges.add(in_edge)
+
+ # Get the number of likely vertices
+ slices, _ = in_edge.pre_vertex.splitter.get_out_going_slices()
+ sdram += (len(slices) * N_ELEMENTS_IN_EACH_KEY_N_ATOM_MAP *
+ BYTES_PER_WORD)
+ if in_edge.n_delay_stages:
+ sdram += (len(slices) * N_ELEMENTS_IN_EACH_KEY_N_ATOM_MAP *
+ BYTES_PER_WORD)
return sdram
@@ -133,61 +107,55 @@ def exact_sdram_for_bit_field_builder_region():
return N_REGIONS_ADDRESSES * BYTES_PER_WORD
-def _exact_sdram_for_bit_field_key_region(machine_graph, vertex):
- """ Calculates the exact SDRAM for the bitfield key region
-
- :param ~pacman.model.graphs.machine.MachineGraph machine_graph:
- machine graph
- :param ~pacman.model.graphs.machine.MachineVertex vertex: machine vertex
- :return: bytes
- :rtype: int
- """
- return (
- N_KEYS_DATA_SET_IN_WORDS +
- len(machine_graph.get_edges_ending_at_vertex(vertex)) *
- N_ELEMENTS_IN_EACH_KEY_N_ATOM_MAP) * BYTES_PER_WORD
-
-
def reserve_bit_field_regions(
- spec, machine_graph, n_key_map, vertex, bit_field_builder_region,
- bit_filter_region, bit_field_key_region):
+ spec, incoming_projections, bit_field_builder_region,
+ bit_filter_region, bit_field_key_region,
+ bit_field_builder_region_ref=None, bit_filter_region_ref=None,
+ bit_field_key_region_ref=None):
""" reserves the regions for the bitfields
:param ~data_specification.DataSpecificationGenerator spec:
dsg spec writer
- :param ~pacman.model.graphs.machine.MachineGraph machine_graph:
- machine graph
- :param ~pacman.model.routing_info.AbstractMachinePartitionNKeysMap \
- n_key_map:
- map between partitions and n keys
- :param ~pacman.model.graphs.machine.MachineVertex vertex: machine vertex
+ :param list(~spynnaker.pyNN.models.Projection) incoming_projections:
+ The projections to generate bitfields for
:param int bit_field_builder_region: region id for the builder region
:param int bit_filter_region: region id for the bitfield region
:param int bit_field_key_region: region id for the key map
+ :param bit_field_builder_region_ref:
+ Reference to give the region, or None if not referencable
+ :type bit_field_builder_region_ref: int or None
+ :param bit_filter_region_ref:
+ Reference to give the region, or None if not referencable
+ :type bit_filter_region_ref: int or None
+ :param bit_field_key_region_ref:
+ Reference to give the region, or None if not referencable
+ :type bit_field_key_region_ref: int or None
"""
# reserve the final destination for the bitfields
spec.reserve_memory_region(
region=bit_filter_region,
- size=_exact_sdram_for_bit_field_region(
- machine_graph, vertex, n_key_map),
- label="bit_field region")
+ size=get_estimated_sdram_for_bit_field_region(incoming_projections),
+ label="bit_field region",
+ reference=bit_filter_region_ref)
# reserve region for the bitfield builder
spec.reserve_memory_region(
region=bit_field_builder_region,
size=exact_sdram_for_bit_field_builder_region(),
- label="bit field builder region")
+ label="bit field builder region",
+ reference=bit_field_builder_region_ref)
# reserve memory region for the key region
spec.reserve_memory_region(
region=bit_field_key_region,
- size=_exact_sdram_for_bit_field_key_region(machine_graph, vertex),
- label="bit field key data")
+ size=get_estimated_sdram_for_key_region(incoming_projections),
+ label="bit field key data",
+ reference=bit_field_key_region_ref)
def write_bitfield_init_data(
- spec, machine_vertex, machine_graph, routing_info, n_key_map,
+ spec, incoming_projections, vertex_slice, routing_info,
bit_field_builder_region, master_pop_region_id,
synaptic_matrix_region_id, direct_matrix_region_id,
bit_field_region_id, bit_field_key_map_region_id,
@@ -196,14 +164,11 @@ def write_bitfield_init_data(
:param ~data_specification.DataSpecificationGenerator spec:
data spec writer
- :param ~pacman.model.graphs.machine.MachineVertex machine_vertex:
- machine vertex
- :param ~pacman.model.graphs.machine.MachineGraph machine_graph:
- machine graph
+ :param list(~spynnaker.pyNN.models.Projection) incoming_projections:
+ The projections to generate bitfields for
+ :param ~pacman.model.graphs.common.slice vertex_slice:
+ The slice of the target vertex
:param ~pacman.model.routing_info.RoutingInfo routing_info: keys
- :param ~pacman.model.routing_info.AbstractMachinePartitionNKeysMap \
- n_key_map:
- map for edge to n keys
:param int bit_field_builder_region: the region id for the bitfield builder
:param int master_pop_region_id: the region id for the master pop table
:param int synaptic_matrix_region_id: the region id for the synaptic matrix
@@ -231,17 +196,24 @@ def write_bitfield_init_data(
spec.switch_write_focus(bit_field_key_map_region_id)
+ # Gather the machine edges that target this core
+ machine_edges = list()
+ seen_app_edges = set()
+ for proj in incoming_projections:
+ in_edge = proj._projection_edge
+ if in_edge not in seen_app_edges:
+ seen_app_edges.add(in_edge)
+ for machine_edge in in_edge.machine_edges:
+ if machine_edge.post_vertex.vertex_slice == vertex_slice:
+ machine_edges.append(machine_edge)
+
# write n keys max atom map
- spec.write_value(
- len(machine_graph.get_edges_ending_at_vertex(machine_vertex)))
+ spec.write_value(len(machine_edges))
# load in key to max atoms map
- for out_going_partition in machine_graph.\
- get_multicast_edge_partitions_ending_at_vertex(machine_vertex):
- spec.write_value(
- routing_info.get_first_key_from_partition(out_going_partition))
- spec.write_value(
- n_key_map.n_keys_for_partition(out_going_partition))
+ for machine_edge in machine_edges:
+ spec.write_value(routing_info.get_first_key_for_edge(machine_edge))
+ spec.write_value(machine_edge.pre_vertex.vertex_slice.n_atoms)
# ensure if nothing else that n bitfields in bitfield region set to 0
spec.switch_write_focus(bit_field_region_id)
diff --git a/spynnaker/pyNN/utilities/constants.py b/spynnaker/pyNN/utilities/constants.py
index fe6215c51a..f84f631619 100644
--- a/spynnaker/pyNN/utilities/constants.py
+++ b/spynnaker/pyNN/utilities/constants.py
@@ -13,7 +13,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-from enum import Enum
from spinn_front_end_common.utilities.constants import (
BYTES_PER_WORD, BYTES_PER_KB)
@@ -58,26 +57,6 @@
#: the minimum supported delay slot between two neurons
MIN_SUPPORTED_DELAY = 1
-
-class POPULATION_BASED_REGIONS(Enum):
- """Regions for populations."""
- SYSTEM = 0
- NEURON_PARAMS = 1
- SYNAPSE_PARAMS = 2
- POPULATION_TABLE = 3
- SYNAPTIC_MATRIX = 4
- SYNAPSE_DYNAMICS = 5
- STRUCTURAL_DYNAMICS = 6
- NEURON_RECORDING = 7
- PROVENANCE_DATA = 8
- PROFILING = 9
- CONNECTOR_BUILDER = 10
- DIRECT_MATRIX = 11
- BIT_FIELD_FILTER = 12
- BIT_FIELD_BUILDER = 13
- BIT_FIELD_KEY_MAP = 14
-
-
#: The partition ID used for spike data
SPIKE_PARTITION_ID = "SPIKE"
@@ -93,3 +72,9 @@ class POPULATION_BASED_REGIONS(Enum):
#: The maximum row length of the master population table
POP_TABLE_MAX_ROW_LENGTH = 256
+
+#: The name of the partition for Synaptic SDRAM
+SYNAPSE_SDRAM_PARTITION_ID = "SDRAM Synaptic Inputs"
+
+#: The conservative amount of write bandwidth available on a chip
+WRITE_BANDWIDTH_BYTES_PER_SECOND = 250 * 1024 * 1024
diff --git a/spynnaker/pyNN/utilities/utility_calls.py b/spynnaker/pyNN/utilities/utility_calls.py
index 485209fe85..e5bdf534df 100644
--- a/spynnaker/pyNN/utilities/utility_calls.py
+++ b/spynnaker/pyNN/utilities/utility_calls.py
@@ -30,6 +30,9 @@
RandomStatsNormalClippedImpl, RandomStatsNormalImpl,
RandomStatsPoissonImpl, RandomStatsRandIntImpl, RandomStatsUniformImpl,
RandomStatsVonmisesImpl, RandomStatsBinomialImpl)
+from spinn_front_end_common.utilities.constants import (
+ MICRO_TO_SECOND_CONVERSION)
+from spynnaker.pyNN.utilities.constants import WRITE_BANDWIDTH_BYTES_PER_SECOND
logger = FormatAdapter(logging.getLogger(__name__))
@@ -356,3 +359,14 @@ def moved_in_v6(old_location, new_location):
logger.warning("File {} moved to {}. Please fix your imports. "
"In version 7 this will fail completely."
"".format(old_location, new_location))
+
+
+def get_time_to_write_us(n_bytes, n_cores):
+ """ Determine how long a write of a given number of bytes will take in us
+
+ :param int n_bytes: The number of bytes to transfer
+ :param int n_cores: How many cores will be writing at the same time
+ """
+ bandwidth_per_core = WRITE_BANDWIDTH_BYTES_PER_SECOND / n_cores
+ seconds = n_bytes / bandwidth_per_core
+ return int(math.ceil(seconds * MICRO_TO_SECOND_CONVERSION))
diff --git a/spynnaker_integration_tests/test_auto_pause_and_resume_tests/test_long_run/test_synfire_low_sdram_long_run.py b/spynnaker_integration_tests/test_auto_pause_and_resume_tests/test_long_run/test_synfire_low_sdram_long_run.py
index f38fd92934..3ec78d01b2 100644
--- a/spynnaker_integration_tests/test_auto_pause_and_resume_tests/test_long_run/test_synfire_low_sdram_long_run.py
+++ b/spynnaker_integration_tests/test_auto_pause_and_resume_tests/test_long_run/test_synfire_low_sdram_long_run.py
@@ -24,7 +24,7 @@
n_neurons = 200 # number of neurons in each population
runtime = 3000
-neurons_per_core = int(n_neurons / 2)
+neurons_per_core = 9
synfire_run = SynfireRunner()
diff --git a/spynnaker_integration_tests/test_auto_pause_and_resume_tests/test_medium_fail_run/spynnaker.cfg b/spynnaker_integration_tests/test_auto_pause_and_resume_tests/test_medium_fail_run/spynnaker.cfg
index 0fa0891b20..0c04e72214 100644
--- a/spynnaker_integration_tests/test_auto_pause_and_resume_tests/test_medium_fail_run/spynnaker.cfg
+++ b/spynnaker_integration_tests/test_auto_pause_and_resume_tests/test_medium_fail_run/spynnaker.cfg
@@ -5,23 +5,7 @@ max_sdram_allowed_per_chip = 1519836
[Buffers]
use_auto_pause_and_resume = True
# Lower than defaults
-# This cause less partitioning and therefor more auto pause runs
+# This cause less partitioning and therefore more auto pause runs
minimum_auto_time_steps = 1000
-[Simulation]
-
-# performance controller to ensure only so many packets from a given
-# app vertex happen at any given time (aka how many machine vertices
-# from this app vertex can fire at same time)
-app_machine_quantity = 5
-
-time_between_cores = 1.2
-
-# performance controller for how much of the time step to use for sending
-fraction_of_time_spike_sending = 0.5
-
-# performance controller for how much of the time step to use for before the
-# TDMA
-fraction_of_time_before_sending = 0.01
-
diff --git a/spynnaker_integration_tests/test_auto_pause_and_resume_tests/test_medium_fail_run/test_synfire_low_sdram_medium_run.py b/spynnaker_integration_tests/test_auto_pause_and_resume_tests/test_medium_fail_run/test_synfire_low_sdram_medium_run.py
index 0ced2def4d..878aa9aabf 100644
--- a/spynnaker_integration_tests/test_auto_pause_and_resume_tests/test_medium_fail_run/test_synfire_low_sdram_medium_run.py
+++ b/spynnaker_integration_tests/test_auto_pause_and_resume_tests/test_medium_fail_run/test_synfire_low_sdram_medium_run.py
@@ -26,7 +26,7 @@
n_neurons = 200 # number of neurons in each population
runtime = 3000
-neurons_per_core = n_neurons / 2
+neurons_per_core = 43
synfire_run = SynfireRunner()
diff --git a/spynnaker_integration_tests/test_auto_pause_and_resume_tests/test_medium_run/test_synfire_low_sdram_medium_run.py b/spynnaker_integration_tests/test_auto_pause_and_resume_tests/test_medium_run/test_synfire_low_sdram_medium_run.py
index db4f18aa57..6bac83ff42 100644
--- a/spynnaker_integration_tests/test_auto_pause_and_resume_tests/test_medium_run/test_synfire_low_sdram_medium_run.py
+++ b/spynnaker_integration_tests/test_auto_pause_and_resume_tests/test_medium_run/test_synfire_low_sdram_medium_run.py
@@ -24,7 +24,7 @@
n_neurons = 200 # number of neurons in each population
runtime = 3000
-neurons_per_core = n_neurons / 2
+neurons_per_core = 55
synfire_run = SynfireRunner()
diff --git a/spynnaker_integration_tests/test_auto_pause_and_resume_tests/too_little_memory/test_synfire_1_too_low_sdram.py b/spynnaker_integration_tests/test_auto_pause_and_resume_tests/too_little_memory/test_synfire_1_too_low_sdram.py
index 14a09a3c11..52ba3a3ff3 100644
--- a/spynnaker_integration_tests/test_auto_pause_and_resume_tests/too_little_memory/test_synfire_1_too_low_sdram.py
+++ b/spynnaker_integration_tests/test_auto_pause_and_resume_tests/too_little_memory/test_synfire_1_too_low_sdram.py
@@ -19,11 +19,11 @@
import pytest
from spinnaker_testbase import BaseTestCase
from spynnaker_integration_tests.scripts import SynfireRunner
-from pacman.exceptions import PacmanPartitionException
+from pacman.exceptions import PacmanException
n_neurons = 200 # number of neurons in each population
runtime = 3000
-neurons_per_core = n_neurons / 2
+neurons_per_core = 1
synfire_run = SynfireRunner()
@@ -33,7 +33,7 @@ class TestTooLow(BaseTestCase):
"""
def test_too_low(self):
- with pytest.raises(PacmanPartitionException):
+ with pytest.raises(PacmanException):
synfire_run.do_run(n_neurons, neurons_per_core=neurons_per_core,
run_times=[runtime])
diff --git a/spynnaker_integration_tests/test_iobuf/test_only_binaries_recorded/spynnaker.cfg b/spynnaker_integration_tests/test_iobuf/test_only_binaries_recorded/spynnaker.cfg
index b491ffb6bb..063a67b895 100644
--- a/spynnaker_integration_tests/test_iobuf/test_only_binaries_recorded/spynnaker.cfg
+++ b/spynnaker_integration_tests/test_iobuf/test_only_binaries_recorded/spynnaker.cfg
@@ -1,7 +1,7 @@
[Reports]
extract_iobuf = True
extract_iobuf_from_cores = None
-extract_iobuf_from_binary_types = IF_curr_exp.aplx
+extract_iobuf_from_binary_types = reverse_iptag_multicast_source.aplx
extract_iobuf_during_run = True
clear_iobuf_during_run = True
diff --git a/spynnaker_integration_tests/test_iobuf/test_only_binaries_recorded/test_only_binaries_recording.py b/spynnaker_integration_tests/test_iobuf/test_only_binaries_recorded/test_only_binaries_recording.py
index ecacdedcfa..8e99d77086 100644
--- a/spynnaker_integration_tests/test_iobuf/test_only_binaries_recorded/test_only_binaries_recording.py
+++ b/spynnaker_integration_tests/test_iobuf/test_only_binaries_recorded/test_only_binaries_recording.py
@@ -25,11 +25,11 @@ class TestCoresAndBinariesRecording(BaseTestCase):
def do_run(self):
sim.setup(timestep=1.0)
- sim.set_number_of_neurons_per_core(sim.IF_curr_exp, 100)
+ sim.set_number_of_neurons_per_core(sim.SpikeSourceArray, 1)
- input = sim.Population(1, sim.SpikeSourceArray(spike_times=[0]),
+ input = sim.Population(10, sim.SpikeSourceArray(spike_times=[0]),
label="input")
- pop_1 = sim.Population(200, sim.IF_curr_exp(), label="pop_1")
+ pop_1 = sim.Population(100, sim.IF_curr_exp(), label="pop_1")
sim.Projection(input, pop_1, sim.AllToAllConnector(),
synapse_type=sim.StaticSynapse(weight=5, delay=18))
sim.run(500)
@@ -38,7 +38,7 @@ def do_run(self):
placements = globals_variables.get_simulator()._placements
sim.end()
- machine_verts = pop_1._vertex.machine_vertices
+ machine_verts = input._vertex.machine_vertices
data = set()
false_data = list()
diff --git a/spynnaker_integration_tests/test_iobuf/test_only_cores_recorded/test_only_core_recording.py b/spynnaker_integration_tests/test_iobuf/test_only_cores_recorded/test_only_core_recording.py
index e34fa422a2..f9bb7c7b39 100644
--- a/spynnaker_integration_tests/test_iobuf/test_only_cores_recorded/test_only_core_recording.py
+++ b/spynnaker_integration_tests/test_iobuf/test_only_cores_recorded/test_only_core_recording.py
@@ -31,12 +31,10 @@ def do_run(self):
1, sim.SpikeSourceArray(spike_times=[0]), label="input1")
input2 = sim.Population(
1, sim.SpikeSourceArray(spike_times=[0]), label="input2")
- pop_1 = sim.Population(5, sim.IF_curr_exp(), label="pop_1")
- pop_2 = sim.Population(5, sim.IF_curr_exp(), label="pop_2")
- sim.Projection(input1, pop_1, sim.AllToAllConnector(),
- synapse_type=sim.StaticSynapse(weight=5, delay=1))
- sim.Projection(input2, pop_2, sim.AllToAllConnector(),
- synapse_type=sim.StaticSynapse(weight=5, delay=1))
+ input3 = sim.Population(
+ 1, sim.SpikeSourceArray(spike_times=[0]), label="input3")
+ input4 = sim.Population(
+ 1, sim.SpikeSourceArray(spike_times=[0]), label="input4")
# Make sure there is stuff at the cores specified in the cfg file
input1.set_constraint(
@@ -45,10 +43,10 @@ def do_run(self):
ChipAndCoreConstraint(0, 0, 3))
# While there must be a chip 0,0 chip 1,1 could be missing
if machine.is_chip_at(1, 1):
- pop_1.set_constraint(
+ input3.set_constraint(
ChipAndCoreConstraint(1, 1, 1))
# Make sure there is stuff at a core not specified in the cfg file
- pop_2.set_constraint(
+ input4.set_constraint(
ChipAndCoreConstraint(0, 0, 2))
sim.run(500)
diff --git a/spynnaker_integration_tests/test_master_pop/key_constraint_adder.py b/spynnaker_integration_tests/test_master_pop/key_constraint_adder.py
index 47eb1ba131..e96d999264 100644
--- a/spynnaker_integration_tests/test_master_pop/key_constraint_adder.py
+++ b/spynnaker_integration_tests/test_master_pop/key_constraint_adder.py
@@ -20,12 +20,15 @@
ReverseIPTagMulticastSourceMachineVertex)
from spynnaker.pyNN.models.utility_models.delays import (
DelayExtensionMachineVertex)
+from pacman.model.graphs.machine import MulticastEdgePartition
class KeyConstraintAdder(object):
def __call__(self, machine_graph):
for outgoing_partition in machine_graph.outgoing_edge_partitions:
+ if not isinstance(outgoing_partition, MulticastEdgePartition):
+ continue
mac_vertex = outgoing_partition.pre_vertex
if isinstance(mac_vertex,
ReverseIPTagMulticastSourceMachineVertex):
diff --git a/spynnaker_integration_tests/test_onchip_compressor/many_bitfields.py b/spynnaker_integration_tests/test_onchip_compressor/many_bitfields.py
index b855163a32..c04822b3d5 100644
--- a/spynnaker_integration_tests/test_onchip_compressor/many_bitfields.py
+++ b/spynnaker_integration_tests/test_onchip_compressor/many_bitfields.py
@@ -16,6 +16,8 @@
from unittest import SkipTest
from spynnaker.pyNN.exceptions import ConfigurationException
import spynnaker8 as sim
+from spynnaker.pyNN.extra_algorithms.splitter_components import (
+ SplitterAbstractPopulationVertexSlice)
def find_good_chip(machine, n_target):
@@ -56,8 +58,10 @@ def do_bitfield_run():
label="source_{}".format(s)))
targets = []
for t in range(n_target):
- pop = sim.Population(n_neurons, sim.IF_curr_exp(),
- label="target_{}".format(t))
+ pop = sim.Population(
+ n_neurons, sim.IF_curr_exp(), label="target_{}".format(t),
+ additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexSlice()})
pop.add_placement_constraint(x=target_x, y=target_y)
targets.append(pop)
diff --git a/spynnaker_integration_tests/test_onchip_compressor/many_routes.py b/spynnaker_integration_tests/test_onchip_compressor/many_routes.py
index 39829af9e9..4ef481c57e 100644
--- a/spynnaker_integration_tests/test_onchip_compressor/many_routes.py
+++ b/spynnaker_integration_tests/test_onchip_compressor/many_routes.py
@@ -16,6 +16,8 @@
from unittest import SkipTest
from spynnaker.pyNN.exceptions import ConfigurationException
import spynnaker8 as sim
+from spynnaker.pyNN.extra_algorithms.splitter_components import (
+ SplitterAbstractPopulationVertexSlice)
def find_good_chip(machine, n_target):
@@ -52,12 +54,16 @@ def do_run():
sources = []
for s in range(n_source):
- sources.append(sim.Population(n_neurons, sim.IF_curr_exp(),
- label="source_{}".format(s)))
+ sources.append(sim.Population(
+ n_neurons, sim.IF_curr_exp(), label="source_{}".format(s),
+ additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexSlice()}))
targets = []
for t in range(n_target):
- pop = sim.Population(n_neurons, sim.IF_curr_exp(),
- label="target_{}".format(t))
+ pop = sim.Population(
+ n_neurons, sim.IF_curr_exp(), label="target_{}".format(t),
+ additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexSlice()})
pop.add_placement_constraint(x=target_x, y=target_y)
targets.append(pop)
diff --git a/spynnaker_integration_tests/test_onchip_compressor/one_route.py b/spynnaker_integration_tests/test_onchip_compressor/one_route.py
index 51e46115ff..aadbe7714d 100644
--- a/spynnaker_integration_tests/test_onchip_compressor/one_route.py
+++ b/spynnaker_integration_tests/test_onchip_compressor/one_route.py
@@ -16,6 +16,8 @@
from unittest import SkipTest
from spynnaker.pyNN.exceptions import ConfigurationException
import spynnaker8 as sim
+from spynnaker.pyNN.extra_algorithms.splitter_components import (
+ SplitterAbstractPopulationVertexSlice)
def find_good_chip(machine, n_target):
@@ -52,12 +54,16 @@ def do_one_run():
sources = []
for s in range(n_source):
- sources.append(sim.Population(n_neurons, sim.IF_curr_exp(),
- label="source_{}".format(s)))
+ sources.append(sim.Population(
+ n_neurons, sim.IF_curr_exp(), label="source_{}".format(s),
+ additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexSlice()}))
targets = []
for t in range(n_target):
- pop = sim.Population(n_neurons, sim.IF_curr_exp(),
- label="target_{}".format(t))
+ pop = sim.Population(
+ n_neurons, sim.IF_curr_exp(), label="target_{}".format(t),
+ additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexSlice()})
pop.add_placement_constraint(x=target_x, y=target_y)
targets.append(pop)
diff --git a/spynnaker_integration_tests/test_projection_param_retrieval_from_board/test_synfire_projection_on_same_chip.py b/spynnaker_integration_tests/test_projection_param_retrieval_from_board/test_synfire_projection_on_same_chip.py
index 3b6cdd3221..3083cd8d82 100644
--- a/spynnaker_integration_tests/test_projection_param_retrieval_from_board/test_synfire_projection_on_same_chip.py
+++ b/spynnaker_integration_tests/test_projection_param_retrieval_from_board/test_synfire_projection_on_same_chip.py
@@ -22,7 +22,7 @@
neurons_per_core = None
weight_to_spike = 1.0
delay = 1
-placement_constraint = (0, 0, 9)
+placement_constraint = (0, 0)
get_weights = True
get_delays = True
diff --git a/spynnaker_integration_tests/test_split_various/spynnaker.cfg b/spynnaker_integration_tests/test_split_various/spynnaker.cfg
new file mode 100644
index 0000000000..3ef3dab91a
--- /dev/null
+++ b/spynnaker_integration_tests/test_split_various/spynnaker.cfg
@@ -0,0 +1,2 @@
+[Mode]
+violate_1ms_wall_clock_restriction = True
diff --git a/spynnaker_integration_tests/test_split_various/test_split_delays.py b/spynnaker_integration_tests/test_split_various/test_split_delays.py
new file mode 100644
index 0000000000..c850d326cd
--- /dev/null
+++ b/spynnaker_integration_tests/test_split_various/test_split_delays.py
@@ -0,0 +1,107 @@
+# Copyright (c) 2021 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+import spynnaker8 as sim
+from spynnaker.pyNN.extra_algorithms.splitter_components import (
+ SplitterAbstractPopulationVertexNeuronsSynapses)
+import numpy
+from spinnaker_testbase import BaseTestCase
+
+
+def run_delayed_split():
+ sim.setup(0.1, time_scale_factor=1)
+ source = sim.Population(10, sim.SpikeSourceArray(spike_times=[0]))
+ target_1 = sim.Population(
+ 10, sim.IF_curr_exp(), label="target_1", additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexNeuronsSynapses(1)})
+ target_1.record("spikes")
+ target_2 = sim.Population(
+ 10, sim.IF_curr_exp(), label="target_2", additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexNeuronsSynapses(2)})
+ target_2.record("spikes")
+ target_3 = sim.Population(
+ 10, sim.IF_curr_exp(), label="target_3", additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexNeuronsSynapses(3)})
+ target_3.record("spikes")
+ target_4 = sim.Population(
+ 10, sim.IF_curr_exp(), label="target_4", additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexNeuronsSynapses(3)})
+ target_4.record("spikes")
+
+ # Try from list, which means host generated
+ from_list = sim.Projection(source, target_1, sim.FromListConnector(
+ [(a, a, 5, 0.1 + (a * 10)) for a in range(10)]))
+
+ # Also try a couple of machine generated options
+ fixed_prob = sim.Projection(
+ source, target_2, sim.FixedProbabilityConnector(0.1),
+ sim.StaticSynapse(weight=5.0, delay=34.0))
+
+ # Use power-of-two to check border case
+ fixed_total = sim.Projection(
+ source, target_3, sim.FixedTotalNumberConnector(10),
+ sim.StaticSynapse(weight=5.0, delay=2.0))
+
+ # Try from list with power-of-two delay to check border case
+ from_list_border = sim.Projection(source, target_4, sim.FromListConnector(
+ [(a, a, 5, 4.0) for a in range(10)]))
+
+ sim.run(100)
+
+ from_list_delays = list(
+ from_list.get("delay", "list", with_address=False))
+ fixed_prob_delays = list(
+ fixed_prob.get("delay", "list", with_address=False))
+ fixed_total_delays = list(
+ fixed_total.get("delay", "list", with_address=False))
+ from_list_border_delays = list(
+ from_list_border.get("delay", "list", with_address=False))
+
+ from_list_spikes = [
+ s.magnitude
+ for s in target_1.get_data("spikes").segments[0].spiketrains]
+ from_list_border_spikes = [
+ s.magnitude
+ for s in target_4.get_data("spikes").segments[0].spiketrains]
+
+ sim.end()
+
+ print(from_list_delays)
+ print(from_list_spikes)
+ print(fixed_prob_delays)
+ print(fixed_total_delays)
+ print(from_list_border_delays)
+ print(from_list_border_spikes)
+
+ # Check the delays worked out
+ assert(numpy.array_equal(from_list_delays,
+ [0.1 + (a * 10) for a in range(10)]))
+ assert(all(d == 34.0 for d in fixed_prob_delays))
+ assert(all(d == 2.0 for d in fixed_total_delays))
+ assert(all(d == 4.0 for d in from_list_border_delays))
+
+ for d, s in zip(from_list_delays, from_list_spikes):
+ assert(s > d)
+ for s in from_list_border_spikes:
+ assert(s > 4.0)
+
+
+class TestSplitDelays(BaseTestCase):
+
+ def test_run_simple_split(self):
+ self.runsafe(run_delayed_split)
+
+
+if __name__ == "__main__":
+ run_delayed_split()
diff --git a/spynnaker_integration_tests/test_split_various/test_split_impossible.py b/spynnaker_integration_tests/test_split_various/test_split_impossible.py
new file mode 100644
index 0000000000..5ae09be9fc
--- /dev/null
+++ b/spynnaker_integration_tests/test_split_various/test_split_impossible.py
@@ -0,0 +1,59 @@
+# Copyright (c) 2021 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+import spynnaker8 as sim
+import pytest
+from spynnaker.pyNN.extra_algorithms.splitter_components import (
+ SplitterAbstractPopulationVertexNeuronsSynapses)
+from spynnaker.pyNN.exceptions import SynapticConfigurationException
+from spinnaker_testbase import BaseTestCase
+
+
+def mission_impossible():
+ sim.setup(0.1, time_scale_factor=1)
+
+ # Can't do that many neurons and delays together
+ sim.Population(128, sim.IF_curr_exp(), additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexNeuronsSynapses(
+ 1, 128, False)})
+
+ with pytest.raises(SynapticConfigurationException):
+ sim.run(100)
+
+
+def mission_impossible_2():
+ sim.setup(0.1, time_scale_factor=1)
+
+ # Can't do structural on multiple synapse cores
+ source = sim.Population(1, sim.SpikeSourcePoisson(rate=10))
+ pop = sim.Population(128, sim.IF_curr_exp(), additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexNeuronsSynapses(
+ 2)})
+
+ sim.Projection(source, pop, sim.FromListConnector([]),
+ sim.StructuralMechanismStatic(
+ sim.RandomSelection(), sim.DistanceDependentFormation(),
+ sim.RandomByWeightElimination(0.5)))
+
+ with pytest.raises(SynapticConfigurationException):
+ sim.run(100)
+
+
+class TestSplitImpossible(BaseTestCase):
+
+ def test_mission_impossible(self):
+ self.runsafe(mission_impossible)
+
+ def test_mission_impossible_2(self):
+ self.runsafe(mission_impossible_2)
diff --git a/spynnaker_integration_tests/test_split_various/test_split_simple.py b/spynnaker_integration_tests/test_split_various/test_split_simple.py
new file mode 100644
index 0000000000..78a796a953
--- /dev/null
+++ b/spynnaker_integration_tests/test_split_various/test_split_simple.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2021 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+import spynnaker8 as sim
+from spynnaker.pyNN.extra_algorithms.splitter_components import (
+ SplitterPoissonDelegate, SplitterAbstractPopulationVertexNeuronsSynapses)
+from pacman.model.partitioner_splitters import SplitterSliceLegacy
+from spinnaker_testbase import BaseTestCase
+import numpy
+
+
+def run_simple_split():
+ sim.setup(0.1, time_scale_factor=1)
+ sim.set_number_of_neurons_per_core(sim.IF_curr_exp, 16)
+ # Note, this next one is ignored on one-to-one Poisson sources
+ sim.set_number_of_neurons_per_core(sim.SpikeSourcePoisson, 10)
+
+ one_to_one_source = sim.Population(
+ 50, sim.SpikeSourcePoisson(rate=10000), additional_parameters={
+ "seed": 0,
+ "splitter": SplitterPoissonDelegate()})
+ rand_source = sim.Population(
+ 50, sim.SpikeSourcePoisson(rate=10), additional_parameters={
+ "seed": 1,
+ "splitter": SplitterSliceLegacy()})
+ rand_source.record("spikes")
+ target = sim.Population(
+ 50, sim.IF_curr_exp(), additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexNeuronsSynapses(3)})
+ target.record(["spikes", "packets-per-timestep"])
+ sim.Projection(
+ one_to_one_source, target, sim.OneToOneConnector(),
+ sim.StaticSynapse(weight=0.01))
+ sim.Projection(
+ rand_source, target, sim.OneToOneConnector(),
+ sim.StaticSynapse(weight=2.0))
+
+ sim.run(1000)
+
+ source_spikes = [
+ s.magnitude
+ for s in rand_source.get_data("spikes").segments[0].spiketrains]
+ target_spikes = [
+ s.magnitude
+ for s in target.get_data("spikes").segments[0].spiketrains]
+ target_ppts = (numpy.nonzero(numpy.sum([
+ s.magnitude
+ for s in target.get_data("packets-per-timestep").segments[0].filter(
+ name='packets-per-timestep')[0]], axis=1))[0] - 1) / 10
+
+ sim.end()
+
+ # The only actual spikes received should be from the random source
+ all_source_spikes = numpy.unique(numpy.sort(numpy.concatenate(
+ source_spikes)))
+ assert(numpy.allclose(all_source_spikes, target_ppts))
+
+ # A target spike should be caused by a source spike (though not all sources
+ # will cause a target spike)
+ for s, t in zip(source_spikes, target_spikes):
+ assert(len(t) <= len(s))
+
+
+class TestSplitSimple(BaseTestCase):
+
+ def test_run_simple_split(self):
+ self.runsafe(run_simple_split)
+
+
+if __name__ == "__main__":
+ run_simple_split()
diff --git a/spynnaker_integration_tests/test_split_various/test_split_stdp.py b/spynnaker_integration_tests/test_split_various/test_split_stdp.py
new file mode 100644
index 0000000000..cdc6dea8cc
--- /dev/null
+++ b/spynnaker_integration_tests/test_split_various/test_split_stdp.py
@@ -0,0 +1,122 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+import spynnaker8 as p
+from spynnaker.pyNN.models.neuron.synapse_dynamics import (
+ calculate_spike_pair_additive_stdp_weight)
+from spinnaker_testbase import BaseTestCase
+
+import numpy
+from spynnaker.pyNN.extra_algorithms.splitter_components import (
+ SplitterAbstractPopulationVertexNeuronsSynapses)
+
+
+def split_potentiation_and_depression():
+ p.setup(1.0)
+ runtime = 100
+ initial_run = 1000 # to negate any initial conditions
+
+ # STDP parameters
+ a_plus = 0.01
+ a_minus = 0.01
+ tau_plus = 20
+ tau_minus = 20
+ plastic_delay = 3
+ initial_weight = 2.5
+ max_weight = 5
+ min_weight = 0
+
+ pre_spikes = [10, 50]
+ extra_spikes = [30]
+
+ for i in range(len(pre_spikes)):
+ pre_spikes[i] += initial_run
+
+ for i in range(len(extra_spikes)):
+ extra_spikes[i] += initial_run
+
+ # Spike source to send spike via plastic synapse
+ pre_pop = p.Population(1, p.SpikeSourceArray,
+ {'spike_times': pre_spikes}, label="pre")
+
+ # Spike source to send spike via static synapse to make
+ # post-plastic-synapse neuron fire
+ extra_pop = p.Population(1, p.SpikeSourceArray,
+ {'spike_times': extra_spikes}, label="extra")
+
+ # Post-plastic-synapse population
+ post_pop = p.Population(
+ 1, p.IF_curr_exp(), label="post", additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexNeuronsSynapses(1)})
+
+ # Create projections
+ p.Projection(
+ pre_pop, post_pop, p.OneToOneConnector(),
+ p.StaticSynapse(weight=5.0, delay=1), receptor_type="excitatory")
+
+ p.Projection(
+ extra_pop, post_pop, p.OneToOneConnector(),
+ p.StaticSynapse(weight=5.0, delay=1), receptor_type="excitatory")
+
+ syn_plas = p.STDPMechanism(
+ timing_dependence=p.SpikePairRule(tau_plus=tau_plus,
+ tau_minus=tau_minus,
+ A_plus=a_plus, A_minus=a_minus),
+ weight_dependence=p.AdditiveWeightDependence(w_min=min_weight,
+ w_max=max_weight),
+ weight=initial_weight, delay=plastic_delay)
+
+ plastic_synapse = p.Projection(pre_pop, post_pop,
+ p.OneToOneConnector(),
+ synapse_type=syn_plas,
+ receptor_type='excitatory')
+
+ # Record the spikes
+ post_pop.record("spikes")
+
+ # Run
+ p.run(initial_run + runtime)
+
+ # Get the weights
+ weights = plastic_synapse.get('weight', 'list',
+ with_address=False)
+
+ # Get the spikes
+ post_spikes = numpy.array(
+ # pylint: disable=no-member
+ post_pop.get_data('spikes').segments[0].spiketrains[0].magnitude)
+
+ # End the simulation as all information gathered
+ p.end()
+
+ new_weight_exact = calculate_spike_pair_additive_stdp_weight(
+ pre_spikes, post_spikes, initial_weight, plastic_delay, max_weight,
+ a_plus, a_minus, tau_plus, tau_minus)
+
+ print("Pre neuron spikes at: {}".format(pre_spikes))
+ print("Post-neuron spikes at: {}".format(post_spikes))
+ target_spikes = [1014, 1032, 1053]
+ assert(all(s1 == s2
+ for s1, s2 in zip(list(post_spikes), target_spikes)))
+ print("New weight exact: {}".format(new_weight_exact))
+ print("New weight SpiNNaker: {}".format(weights))
+
+ assert(numpy.allclose(weights, new_weight_exact, rtol=0.001))
+
+
+class TestSTDPPairAdditive(BaseTestCase):
+
+ def test_split_potentiation_and_depression(self):
+ self.runsafe(split_potentiation_and_depression)
diff --git a/spynnaker_integration_tests/test_split_various/test_split_struct.py b/spynnaker_integration_tests/test_split_various/test_split_struct.py
new file mode 100644
index 0000000000..9de4220394
--- /dev/null
+++ b/spynnaker_integration_tests/test_split_various/test_split_struct.py
@@ -0,0 +1,107 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+from spinnaker_testbase import BaseTestCase
+import spynnaker8 as p
+from spynnaker.pyNN.extra_algorithms.splitter_components import (
+ SplitterAbstractPopulationVertexNeuronsSynapses)
+
+
+def split_structural_without_stdp():
+ p.setup(1.0)
+ stim = p.Population(1, p.SpikeSourceArray(range(10)), label="stim")
+
+ # These populations should experience formation
+ pop = p.Population(
+ 1, p.IF_curr_exp(), label="pop", additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexNeuronsSynapses(1)})
+ pop_2 = p.Population(
+ 1, p.IF_curr_exp(), label="pop_2", additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexNeuronsSynapses(1)})
+
+ # These populations should experience elimination
+ pop_3 = p.Population(
+ 1, p.IF_curr_exp(), label="pop_3", additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexNeuronsSynapses(1)})
+ pop_4 = p.Population(
+ 1, p.IF_curr_exp(), label="pop_4", additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexNeuronsSynapses(1)})
+
+ # Formation with last-neuron selection (0 probability elimination)
+ proj = p.Projection(
+ stim, pop, p.FromListConnector([]), p.StructuralMechanismStatic(
+ partner_selection=p.LastNeuronSelection(),
+ formation=p.DistanceDependentFormation([1, 1], 1.0),
+ elimination=p.RandomByWeightElimination(2.0, 0, 0),
+ f_rew=1000, initial_weight=2.0, initial_delay=5.0,
+ s_max=1, seed=0, weight=0.0, delay=1.0))
+
+ # Formation with random selection (0 probability elimination)
+ proj_2 = p.Projection(
+ stim, pop_2, p.FromListConnector([]), p.StructuralMechanismStatic(
+ partner_selection=p.RandomSelection(),
+ formation=p.DistanceDependentFormation([1, 1], 1.0),
+ elimination=p.RandomByWeightElimination(4.0, 0, 0),
+ f_rew=1000, initial_weight=4.0, initial_delay=3.0,
+ s_max=1, seed=0, weight=0.0, delay=1.0))
+
+ # Elimination with last neuron selection (0 probability formation)
+ proj_3 = p.Projection(
+ stim, pop_3, p.FromListConnector([(0, 0)]),
+ p.StructuralMechanismStatic(
+ partner_selection=p.LastNeuronSelection(),
+ formation=p.DistanceDependentFormation([1, 1], 0.0),
+ elimination=p.RandomByWeightElimination(4.0, 1.0, 1.0),
+ f_rew=1000, initial_weight=2.0, initial_delay=5.0,
+ s_max=1, seed=0, weight=0.0, delay=1.0))
+
+ # Elimination with random selection (0 probability formation)
+ proj_4 = p.Projection(
+ stim, pop_4, p.FromListConnector([(0, 0)]),
+ p.StructuralMechanismStatic(
+ partner_selection=p.RandomSelection(),
+ formation=p.DistanceDependentFormation([1, 1], 0.0),
+ elimination=p.RandomByWeightElimination(4.0, 1.0, 1.0),
+ f_rew=1000, initial_weight=4.0, initial_delay=3.0,
+ s_max=1, seed=0, weight=0.0, delay=1.0))
+ p.run(10)
+
+ # Get the final connections
+ conns = list(proj.get(["weight", "delay"], "list"))
+ conns_2 = list(proj_2.get(["weight", "delay"], "list"))
+ conns_3 = list(proj_3.get(["weight", "delay"], "list"))
+ conns_4 = list(proj_4.get(["weight", "delay"], "list"))
+
+ p.end()
+
+ print(conns)
+ print(conns_2)
+ print(conns_3)
+ print(conns_4)
+
+ # These should be formed with specified parameters
+ assert(len(conns) == 1)
+ assert(tuple(conns[0]) == (0, 0, 2.0, 5.0))
+ assert(len(conns_2) == 1)
+ assert(tuple(conns_2[0]) == (0, 0, 4.0, 3.0))
+
+ # These should have no connections since eliminated
+ assert(len(conns_3) == 0)
+ assert(len(conns_4) == 0)
+
+
+class TestStructuralWithoutSTDP(BaseTestCase):
+
+ def test_split_structural_without_stdp(self):
+ self.runsafe(split_structural_without_stdp)
diff --git a/spynnaker_integration_tests/test_split_various/test_split_struct_stdp.py b/spynnaker_integration_tests/test_split_various/test_split_struct_stdp.py
new file mode 100644
index 0000000000..adac7fc4df
--- /dev/null
+++ b/spynnaker_integration_tests/test_split_various/test_split_struct_stdp.py
@@ -0,0 +1,138 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+from spynnaker.pyNN.models.neuron.synapse_dynamics import (
+ calculate_spike_pair_additive_stdp_weight)
+from spinnaker_testbase import BaseTestCase
+import spynnaker8 as p
+import numpy
+from spynnaker.pyNN.extra_algorithms.splitter_components import (
+ SplitterAbstractPopulationVertexNeuronsSynapses)
+
+
+def split_structural_with_stdp():
+ p.setup(1.0)
+ pre_spikes = numpy.array(range(0, 10, 2))
+ pre_spikes_last_neuron = pre_spikes[pre_spikes > 0]
+ A_plus = 0.01
+ A_minus = 0.01
+ tau_plus = 20.0
+ tau_minus = 20.0
+ w_min = 0.0
+ w_max = 5.0
+ w_init_1 = 5.0
+ delay_1 = 2.0
+ w_init_2 = 4.0
+ delay_2 = 1.0
+ stim = p.Population(1, p.SpikeSourceArray(pre_spikes), label="stim")
+ pop = p.Population(
+ 1, p.IF_curr_exp(), label="pop", additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexNeuronsSynapses(1)})
+ pop_2 = p.Population(
+ 1, p.IF_curr_exp(), label="pop_2", additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexNeuronsSynapses(1)})
+ pop_3 = p.Population(
+ 1, p.IF_curr_exp(), label="pop_3", additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexNeuronsSynapses(1)})
+ pop_4 = p.Population(
+ 1, p.IF_curr_exp(), label="pop_4", additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexNeuronsSynapses(1)})
+ pop.record("spikes")
+ pop_2.record("spikes")
+ proj = p.Projection(
+ stim, pop, p.FromListConnector([]), p.StructuralMechanismSTDP(
+ partner_selection=p.LastNeuronSelection(),
+ formation=p.DistanceDependentFormation([1, 1], 1.0),
+ elimination=p.RandomByWeightElimination(2.0, 0, 0),
+ timing_dependence=p.SpikePairRule(
+ tau_plus, tau_minus, A_plus, A_minus),
+ weight_dependence=p.AdditiveWeightDependence(w_min, w_max),
+ f_rew=1000, initial_weight=w_init_1, initial_delay=delay_1,
+ s_max=1, seed=0, weight=0.0, delay=1.0))
+ proj_2 = p.Projection(
+ stim, pop_2, p.FromListConnector([]), p.StructuralMechanismSTDP(
+ partner_selection=p.RandomSelection(),
+ formation=p.DistanceDependentFormation([1, 1], 1.0),
+ elimination=p.RandomByWeightElimination(4.0, 0, 0),
+ timing_dependence=p.SpikePairRule(
+ tau_plus, tau_minus, A_plus, A_minus),
+ weight_dependence=p.AdditiveWeightDependence(w_min, w_max),
+ f_rew=1000, initial_weight=w_init_2, initial_delay=delay_2,
+ s_max=1, seed=0, weight=0.0, delay=1.0))
+ proj_3 = p.Projection(
+ stim, pop_3, p.FromListConnector([(0, 0)]),
+ p.StructuralMechanismSTDP(
+ partner_selection=p.LastNeuronSelection(),
+ formation=p.DistanceDependentFormation([1, 1], 0.0),
+ elimination=p.RandomByWeightElimination(4.0, 1.0, 1.0),
+ timing_dependence=p.SpikePairRule(
+ tau_plus, tau_minus, A_plus, A_minus),
+ weight_dependence=p.AdditiveWeightDependence(w_min, w_max),
+ f_rew=1000, initial_weight=2.0, initial_delay=5.0,
+ s_max=1, seed=0, weight=0.0, delay=1.0))
+ proj_4 = p.Projection(
+ stim, pop_4, p.FromListConnector([(0, 0)]),
+ p.StructuralMechanismSTDP(
+ partner_selection=p.RandomSelection(),
+ formation=p.DistanceDependentFormation([1, 1], 0.0),
+ elimination=p.RandomByWeightElimination(4.0, 1.0, 1.0),
+ timing_dependence=p.SpikePairRule(
+ tau_plus, tau_minus, A_plus, A_minus),
+ weight_dependence=p.AdditiveWeightDependence(w_min, w_max),
+ f_rew=1000, initial_weight=4.0, initial_delay=3.0,
+ s_max=1, seed=0, weight=0.0, delay=1.0))
+ p.run(10)
+
+ conns = list(proj.get(["weight", "delay"], "list"))
+ conns_2 = list(proj_2.get(["weight", "delay"], "list"))
+ conns_3 = list(proj_3.get(["weight", "delay"], "list"))
+ conns_4 = list(proj_4.get(["weight", "delay"], "list"))
+
+ spikes_1 = [s.magnitude
+ for s in pop.get_data("spikes").segments[0].spiketrains]
+ spikes_2 = [s.magnitude
+ for s in pop_2.get_data("spikes").segments[0].spiketrains]
+
+ p.end()
+
+ print(conns)
+ print(conns_2)
+ print(conns_3)
+ print(conns_4)
+
+ w_final_1 = calculate_spike_pair_additive_stdp_weight(
+ pre_spikes_last_neuron, spikes_1[0], w_init_1, delay_1, w_max,
+ A_plus, A_minus, tau_plus, tau_minus)
+ w_final_2 = calculate_spike_pair_additive_stdp_weight(
+ pre_spikes, spikes_2[0], w_init_2, delay_2, w_max, A_plus, A_minus,
+ tau_plus, tau_minus)
+ print(w_final_1, spikes_1[0])
+ print(w_final_2, spikes_2[0])
+
+ assert(len(conns) == 1)
+ assert(conns[0][3] == delay_1)
+ assert(conns[0][2] >= w_final_1 - 0.01 and
+ conns[0][2] <= w_final_1 + 0.01)
+ assert(len(conns_2) == 1)
+ assert(conns_2[0][3] == delay_2)
+ assert(conns_2[0][2] >= w_final_2 - 0.01 and
+ conns_2[0][2] <= w_final_2 + 0.01)
+ assert(len(conns_3) == 0)
+ assert(len(conns_4) == 0)
+
+
+class TestStructuralWithSTDP(BaseTestCase):
+
+ def test_split_structural_with_stdp(self):
+ self.runsafe(split_structural_with_stdp)
diff --git a/spynnaker_integration_tests/test_various/test_spike_io_multi_run.py b/spynnaker_integration_tests/test_various/test_spike_io_multi_run.py
index 3f2d2cb599..78dd745a8a 100644
--- a/spynnaker_integration_tests/test_various/test_spike_io_multi_run.py
+++ b/spynnaker_integration_tests/test_various/test_spike_io_multi_run.py
@@ -60,6 +60,7 @@ def receive_spikes(label, time, neuron_ids):
def do_run():
+ random.seed(0)
# initial call to set up the front end (pynn requirement)
Frontend.setup(timestep=1.0, min_delay=1.0, max_delay=144.0)
diff --git a/unittests/model_tests/neuron/test_synapse_io.py b/unittests/model_tests/neuron/test_synapse_io.py
index 52ec3d2eb5..9264af0886 100644
--- a/unittests/model_tests/neuron/test_synapse_io.py
+++ b/unittests/model_tests/neuron/test_synapse_io.py
@@ -19,9 +19,7 @@
ProjectionApplicationEdge, SynapseInformation)
from spynnaker.pyNN.models.neuron.synapse_dynamics import (
SynapseDynamicsStatic, SynapseDynamicsSTDP)
-from spynnaker.pyNN.models.neuron.master_pop_table import (
- MasterPopTableAsBinarySearch)
-from spynnaker.pyNN.models.neuron.synapse_io import SynapseIORowBased
+from spynnaker.pyNN.models.neuron.synapse_io import _get_allowed_row_length
from spynnaker.pyNN.models.neuron.plasticity.stdp.weight_dependence import (
WeightDependenceAdditive)
from spynnaker.pyNN.models.neuron.plasticity.stdp.timing_dependence import (
@@ -57,17 +55,13 @@ def test_get_allowed_row_length(
dynamics = dynamics_class(timing(), weight())
else:
dynamics = dynamics_class()
- io = SynapseIORowBased()
- population_table = MasterPopTableAsBinarySearch()
synapse_information = SynapseInformation(
None, None, None, False, False, None, None, dynamics, 0, True)
in_edge = ProjectionApplicationEdge(None, None, synapse_information)
if exception is not None:
with pytest.raises(exception) as exc_info:
- io._get_allowed_row_length(
- size, dynamics, population_table, in_edge, size)
+ _get_allowed_row_length(size, dynamics, in_edge, size)
assert exc_info.value.max_size == max_size
else:
- actual_size = io._get_allowed_row_length(
- size, dynamics, population_table, in_edge, size)
+ actual_size = _get_allowed_row_length(size, dynamics, in_edge, size)
assert actual_size == max_size
diff --git a/unittests/model_tests/neuron/test_synaptic_manager.py b/unittests/model_tests/neuron/test_synaptic_manager.py
index cec230332c..7e1426af6f 100644
--- a/unittests/model_tests/neuron/test_synaptic_manager.py
+++ b/unittests/model_tests/neuron/test_synaptic_manager.py
@@ -13,7 +13,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import io
-import math
import shutil
import struct
import tempfile
@@ -21,27 +20,23 @@
import numpy
import pytest
-from spinn_utilities.overrides import overrides
from spinn_machine import SDRAM
+from spinn_machine.virtual_machine import virtual_machine
+from spinn_utilities.overrides import overrides
+from spinn_utilities.config_holder import load_config
from spinnman.model import CPUInfo
from spinnman.transceiver import Transceiver
-from pacman.model.placements import Placement, Placements
-from pacman.model.graphs.common import Slice
-from pacman.model.graphs.machine import (MachineGraph, MachineEdge)
-from pacman.model.routing_info import (
- RoutingInfo, PartitionRoutingInfo, BaseKeyAndMask)
-from pacman.model.graphs.application import ApplicationGraph
-from pacman.model.partitioner_splitters import SplitterSliceLegacy
-from spinn_utilities.config_holder import set_config
+from pacman.model.placements import Placement
+from pacman.executor.injection_decorator import injection_context
+from pacman.operations.routing_info_allocator_algorithms import (
+ ZonedRoutingInfoAllocator)
from data_specification import (
DataSpecificationGenerator, DataSpecificationExecutor)
from data_specification.constants import MAX_MEM_REGIONS
-from spynnaker.pyNN.models.neuron import SynapticManager
-from spynnaker.pyNN.models.neural_projections import (
- ProjectionApplicationEdge, SynapseInformation, DelayedApplicationEdge)
-from spynnaker.pyNN.models.neural_projections.connectors import (
- OneToOneConnector, AllToAllConnector,
- FromListConnector)
+from spinn_front_end_common.utilities import globals_variables
+from spinn_front_end_common.interface.interface_functions import (
+ EdgeToNKeysMapper)
+from spynnaker.pyNN.models.neuron.synaptic_matrices import SynapticMatrices
from spynnaker.pyNN.models.neuron.synapse_dynamics import (
SynapseDynamicsStatic, SynapseDynamicsStructuralSTDP,
SynapseDynamicsSTDP, SynapseDynamicsStructuralStatic)
@@ -50,23 +45,20 @@
from spynnaker.pyNN.models.neuron.plasticity.stdp.weight_dependence import (
WeightDependenceAdditive, WeightDependenceMultiplicative)
from spynnaker.pyNN.models.neuron.structural_plasticity.synaptogenesis\
- .partner_selection import (
- LastNeuronSelection, RandomSelection)
+ .partner_selection import (LastNeuronSelection, RandomSelection)
from spynnaker.pyNN.models.neuron.structural_plasticity.synaptogenesis\
- .formation import (
- DistanceDependentFormation)
+ .formation import DistanceDependentFormation
from spynnaker.pyNN.models.neuron.structural_plasticity.synaptogenesis\
- .elimination import (
- RandomByWeightElimination)
+ .elimination import RandomByWeightElimination
from spynnaker.pyNN.exceptions import SynapticConfigurationException
-from spynnaker.pyNN.models.utility_models.delays import (
- DelayExtensionVertex, DelayExtensionMachineVertex)
-from spynnaker.pyNN.utilities.constants import POPULATION_BASED_REGIONS
+from spynnaker.pyNN.models.neuron.builds.if_curr_exp_base import IFCurrExpBase
from spynnaker.pyNN.extra_algorithms.splitter_components import (
- SplitterDelayVertexSlice, AbstractSpynnakerSplitterDelay)
-from pacman_test_objects import SimpleTestVertex
-from unittests.mocks import MockPopulation
-import spynnaker8
+ SplitterAbstractPopulationVertexSlice, SpynnakerSplitterPartitioner)
+from spynnaker.pyNN.extra_algorithms import DelaySupportAdder
+from spynnaker.pyNN.models.neural_projections.connectors import (
+ AbstractGenerateConnectorOnMachine)
+from spynnaker.pyNN.config_setup import unittest_setup
+import spynnaker8 as p
class MockTransceiverRawData(object):
@@ -88,127 +80,124 @@ def read_word(self, x, y, base_address, cpu=0):
return datum
-class MockSplitter(SplitterSliceLegacy, AbstractSpynnakerSplitterDelay):
- def __init__(self):
- super().__init__("mock splitter")
+def say_false(self, weights, delays):
+ return False
def test_write_data_spec():
- spynnaker8.setup(timestep=1)
- # Add an sdram so max SDRAM is high enough
- SDRAM(10000)
-
- set_config("Simulation", "one_to_one_connection_dtcm_max_bytes", 40)
-
- placements = Placements()
- pre_app_population = MockPopulation(10, "mock pop pre")
- pre_app_vertex = SimpleTestVertex(10, label="pre")
- pre_app_vertex.splitter = MockSplitter()
- pre_app_vertex.splitter._called = True
- pre_vertex_slice = Slice(0, 9)
-
- post_app_population = MockPopulation(10, "mock pop post")
- pre_vertex = pre_app_vertex.create_machine_vertex(
- pre_vertex_slice, None)
- placements.add_placement(Placement(pre_vertex, 0, 0, 1))
- post_app_vertex = SimpleTestVertex(10, label="post")
- post_app_vertex.splitter = MockSplitter()
- post_app_vertex.splitter._called = True
- post_vertex_slice = Slice(0, 9)
- post_vertex = post_app_vertex.create_machine_vertex(
- post_vertex_slice, None)
- post_vertex_placement = Placement(post_vertex, 0, 0, 2)
- placements.add_placement(post_vertex_placement)
- delay_app_vertex = DelayExtensionVertex(
- 10, 16, 51, pre_app_vertex, label="delay")
- delay_app_vertex.set_new_n_delay_stages_and_delay_per_stage(
- 16, 51)
- delay_app_vertex.splitter = SplitterDelayVertexSlice(
- pre_app_vertex.splitter)
- delay_vertex = DelayExtensionMachineVertex(
- resources_required=None, label="", constraints=[],
- app_vertex=delay_app_vertex, vertex_slice=post_vertex_slice)
- placements.add_placement(Placement(delay_vertex, 0, 0, 3))
- one_to_one_connector_1 = OneToOneConnector(None)
- direct_synapse_information_1 = SynapseInformation(
- one_to_one_connector_1, pre_app_population, post_app_population,
- False, False, None, SynapseDynamicsStatic(), 0, True, 1.5, 1.0)
- one_to_one_connector_1.set_projection_information(
- direct_synapse_information_1)
- one_to_one_connector_2 = OneToOneConnector(None)
- direct_synapse_information_2 = SynapseInformation(
- one_to_one_connector_2, pre_app_population, post_app_population,
- False, False, None, SynapseDynamicsStatic(), 1, True, 2.5, 2.0)
- one_to_one_connector_2.set_projection_information(
- direct_synapse_information_2)
- all_to_all_connector = AllToAllConnector(False)
- all_to_all_synapse_information = SynapseInformation(
- all_to_all_connector, pre_app_population, post_app_population,
- False, False, None, SynapseDynamicsStatic(), 0, True, 4.5, 4.0)
- all_to_all_connector.set_projection_information(
- all_to_all_synapse_information)
+ unittest_setup()
+ # UGLY but the mock transceiver NEED generate_on_machine to be False
+ AbstractGenerateConnectorOnMachine.generate_on_machine = say_false
+ machine = virtual_machine(2, 2)
+
+ p.setup(1.0)
+ load_config()
+ p.set_number_of_neurons_per_core(p.IF_curr_exp, 100)
+ pre_pop = p.Population(
+ 10, p.IF_curr_exp(), label="Pre",
+ additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexSlice()})
+ post_pop = p.Population(
+ 10, p.IF_curr_exp(), label="Post",
+ additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexSlice()})
+ proj_one_to_one_1 = p.Projection(
+ pre_pop, post_pop, p.OneToOneConnector(),
+ p.StaticSynapse(weight=1.5, delay=1.0))
+ proj_one_to_one_2 = p.Projection(
+ pre_pop, post_pop, p.OneToOneConnector(),
+ p.StaticSynapse(weight=2.5, delay=2.0))
+ proj_all_to_all = p.Projection(
+ pre_pop, post_pop, p.AllToAllConnector(allow_self_connections=False),
+ p.StaticSynapse(weight=4.5, delay=4.0))
+
+ # spynnaker8.setup(timestep=1)
+ # # Add an sdram so max SDRAM is high enough
+ # SDRAM(10000)
+ #
+ # set_config("Simulation", "one_to_one_connection_dtcm_max_bytes", 40)
+ #
+ # placements = Placements()
+ # pre_app_population = MockPopulation(10, "mock pop pre")
+ # pre_app_vertex = SimpleTestVertex(10, label="pre")
+ # pre_app_vertex.splitter = MockSplitter()
+ # pre_app_vertex.splitter._called = True
+ # pre_vertex_slice = Slice(0, 9)
+ #
+ # post_app_population = MockPopulation(10, "mock pop post")
+ # pre_vertex = pre_app_vertex.create_machine_vertex(
+ # pre_vertex_slice, None)
+ # placements.add_placement(Placement(pre_vertex, 0, 0, 1))
+ # post_app_vertex = SimpleTestVertex(10, label="post")
+ # post_app_vertex.splitter = MockSplitter()
+ # post_app_vertex.splitter._called = True
+ # post_vertex_slice = Slice(0, 9)
+ # post_vertex = post_app_vertex.create_machine_vertex(
+ # post_vertex_slice, None)
+ # post_vertex_placement = Placement(post_vertex, 0, 0, 2)
+ # placements.add_placement(post_vertex_placement)
+ # delay_app_vertex = DelayExtensionVertex(
+ # 10, 16, 51, pre_app_vertex, label="delay")
+ # delay_app_vertex.set_new_n_delay_stages_and_delay_per_stage(
+ # 16, 51)
+ # delay_app_vertex.splitter = SplitterDelayVertexSlice(
+ # pre_app_vertex.splitter)
+ # delay_vertex = DelayExtensionMachineVertex(
+ # resources_required=None, label="", constraints=[],
+ # app_vertex=delay_app_vertex, vertex_slice=post_vertex_slice)
+ # placements.add_placement(Placement(delay_vertex, 0, 0, 3))
+ # one_to_one_connector_1 = OneToOneConnector(None)
+ # direct_synapse_information_1 = SynapseInformation(
+ # one_to_one_connector_1, pre_app_population, post_app_population,
+ # False, False, None, SynapseDynamicsStatic(), 0, True, 1.5, 1.0)
+ # one_to_one_connector_1.set_projection_information(
+ # direct_synapse_information_1)
+ # one_to_one_connector_2 = OneToOneConnector(None)
+ # direct_synapse_information_2 = SynapseInformation(
+ # one_to_one_connector_2, pre_app_population, post_app_population,
+ # False, False, None, SynapseDynamicsStatic(), 1, True, 2.5, 2.0)
+ # one_to_one_connector_2.set_projection_information(
+ # direct_synapse_information_2)
+ # all_to_all_connector = AllToAllConnector(False)
+ # all_to_all_synapse_information = SynapseInformation(
+ # all_to_all_connector, pre_app_population, post_app_population,
+ # False, False, None, SynapseDynamicsStatic(), 0, True, 4.5, 4.0)
+ # all_to_all_connector.set_projection_information(
+ # all_to_all_synapse_information)
from_list_list = [(i, i, i, (i * 5) + 1) for i in range(10)]
- from_list_connector = FromListConnector(conn_list=from_list_list)
- from_list_synapse_information = SynapseInformation(
- from_list_connector, pre_app_population, post_app_population,
- False, False, None, SynapseDynamicsStatic(), 0, True)
- from_list_connector.set_projection_information(
- from_list_synapse_information)
- app_edge = ProjectionApplicationEdge(
- pre_app_vertex, post_app_vertex, direct_synapse_information_1)
- app_edge.add_synapse_information(direct_synapse_information_2)
- app_edge.add_synapse_information(all_to_all_synapse_information)
- app_edge.add_synapse_information(from_list_synapse_information)
- delay_edge = DelayedApplicationEdge(
- delay_app_vertex, post_app_vertex, direct_synapse_information_1,
- app_edge)
- delay_edge.add_synapse_information(direct_synapse_information_2)
- delay_edge.add_synapse_information(all_to_all_synapse_information)
- delay_edge.add_synapse_information(from_list_synapse_information)
- app_edge.delay_edge = delay_edge
- machine_edge = MachineEdge(
- pre_vertex, post_vertex, app_edge=app_edge)
- delay_machine_edge = MachineEdge(
- delay_vertex, post_vertex, app_edge=delay_edge)
- partition_name = "TestPartition"
-
- app_graph = ApplicationGraph("Test")
- graph = MachineGraph("Test", app_graph)
- graph.add_vertex(pre_vertex)
- graph.add_vertex(post_vertex)
- graph.add_vertex(delay_vertex)
- graph.add_edge(machine_edge, partition_name)
- graph.add_edge(delay_machine_edge, partition_name)
-
- app_graph.add_vertex(pre_app_vertex)
- app_graph.add_vertex(post_app_vertex)
- app_graph.add_vertex(delay_app_vertex)
- app_graph.add_edge(app_edge, partition_name)
- app_graph.add_edge(delay_edge, partition_name)
-
- routing_info = RoutingInfo()
- key = 0
- routing_info.add_partition_info(PartitionRoutingInfo(
- [BaseKeyAndMask(key, 0xFFFFFFF0)],
- graph.get_outgoing_edge_partition_starting_at_vertex(
- pre_vertex, partition_name)))
- delay_key = 0xF0
- delay_key_and_mask = BaseKeyAndMask(delay_key, 0xFFFFFFF0)
- delay_routing_info = PartitionRoutingInfo(
- [delay_key_and_mask],
- graph.get_outgoing_edge_partition_starting_at_vertex(
- delay_vertex, partition_name))
- routing_info.add_partition_info(delay_routing_info)
+ proj_from_list = p.Projection(
+ pre_pop, post_pop, p.FromListConnector(from_list_list),
+ p.StaticSynapse())
+
+ app_graph = globals_variables.get_simulator().original_application_graph
+ context = {
+ "MemoryApplicationGraph": app_graph
+ }
+ with (injection_context(context)):
+ delay_adder = DelaySupportAdder()
+ delay_adder.__call__(app_graph, 16.0)
+ partitioner = SpynnakerSplitterPartitioner()
+ machine_graph, _ = partitioner.__call__(app_graph, machine, 100)
+ allocator = ZonedRoutingInfoAllocator()
+ n_keys_mapper = EdgeToNKeysMapper()
+ n_keys_map = n_keys_mapper.__call__(machine_graph)
+ routing_info = allocator.__call__(
+ machine_graph, n_keys_map, flexible=False)
+
+ post_vertex = next(iter(post_pop._vertex.machine_vertices))
+ post_vertex_slice = post_vertex.vertex_slice
+ post_vertex_placement = Placement(post_vertex, 0, 0, 3)
temp_spec = tempfile.mktemp()
spec = DataSpecificationGenerator(io.FileIO(temp_spec, "wb"), None)
- synaptic_manager = SynapticManager(
- n_synapse_types=2, ring_buffer_sigma=5.0,
- spikes_per_second=100.0, drop_late_spikes=True)
- synaptic_manager.write_data_spec(
- spec, post_app_vertex, post_vertex_slice, post_vertex,
- graph, app_graph, routing_info, 1.0)
+ synaptic_matrices = SynapticMatrices(
+ post_vertex_slice, n_synapse_types=2, all_single_syn_sz=10000,
+ synaptic_matrix_region=1, direct_matrix_region=2, poptable_region=3,
+ connection_builder_region=4)
+ synaptic_matrices.write_synaptic_data(
+ spec, post_pop._vertex.incoming_projections, all_syn_block_sz=10000,
+ weight_scales=[32, 32], routing_info=routing_info)
spec.end_specification()
with io.FileIO(temp_spec, "rb") as spec_reader:
@@ -225,37 +214,44 @@ def test_write_data_spec():
transceiver = MockTransceiverRawData(all_data)
report_folder = mkdtemp()
try:
- connections_1 = synaptic_manager.get_connections_from_machine(
- transceiver, placements, app_edge,
- direct_synapse_information_1)
+ connections_1 = numpy.concatenate(
+ synaptic_matrices.get_connections_from_machine(
+ transceiver, post_vertex_placement,
+ proj_one_to_one_1._projection_edge,
+ proj_one_to_one_1._synapse_information))
# Check that all the connections have the right weight and delay
assert len(connections_1) == post_vertex_slice.n_atoms
assert all([conn["weight"] == 1.5 for conn in connections_1])
assert all([conn["delay"] == 1.0 for conn in connections_1])
- connections_2 = synaptic_manager.get_connections_from_machine(
- transceiver, placements, app_edge,
- direct_synapse_information_2)
+ connections_2 = numpy.concatenate(
+ synaptic_matrices.get_connections_from_machine(
+ transceiver, post_vertex_placement,
+ proj_one_to_one_2._projection_edge,
+ proj_one_to_one_2._synapse_information))
# Check that all the connections have the right weight and delay
assert len(connections_2) == post_vertex_slice.n_atoms
assert all([conn["weight"] == 2.5 for conn in connections_2])
assert all([conn["delay"] == 2.0 for conn in connections_2])
- connections_3 = synaptic_manager.get_connections_from_machine(
- transceiver, placements, app_edge,
- all_to_all_synapse_information)
+ connections_3 = numpy.concatenate(
+ synaptic_matrices.get_connections_from_machine(
+ transceiver, post_vertex_placement,
+ proj_all_to_all._projection_edge,
+ proj_all_to_all._synapse_information))
# Check that all the connections have the right weight and delay
- assert len(connections_3) == \
- post_vertex_slice.n_atoms * pre_vertex_slice.n_atoms
+ assert len(connections_3) == 100
assert all([conn["weight"] == 4.5 for conn in connections_3])
assert all([conn["delay"] == 4.0 for conn in connections_3])
- connections_4 = synaptic_manager.get_connections_from_machine(
- transceiver, placements, app_edge,
- from_list_synapse_information)
+ connections_4 = numpy.concatenate(
+ synaptic_matrices.get_connections_from_machine(
+ transceiver, post_vertex_placement,
+ proj_from_list._projection_edge,
+ proj_from_list._synapse_information))
# Check that all the connections have the right weight and delay
assert len(connections_4) == len(from_list_list)
@@ -268,10 +264,13 @@ def test_write_data_spec():
def test_set_synapse_dynamics():
- spynnaker8.setup()
- synaptic_manager = SynapticManager(
- n_synapse_types=2, ring_buffer_sigma=5.0,
- spikes_per_second=100.0, drop_late_spikes=True)
+ unittest_setup()
+ p.setup(1.0)
+ post_app_model = IFCurrExpBase()
+ post_app_vertex = post_app_model.create_vertex(
+ n_neurons=10, label="post", constraints=None, spikes_per_second=None,
+ ring_buffer_sigma=None, incoming_spike_buffer_size=None,
+ n_steps_per_timestep=1, drop_late_spikes=True, splitter=None)
static = SynapseDynamicsStatic()
stdp = SynapseDynamicsSTDP(
@@ -308,270 +307,220 @@ def test_set_synapse_dynamics():
weight_dependence=WeightDependenceMultiplicative())
# This should be fine as it is the first call
- synaptic_manager.synapse_dynamics = static
+ post_app_vertex.synapse_dynamics = static
# This should be fine as STDP overrides static
- synaptic_manager.synapse_dynamics = stdp
+ post_app_vertex.synapse_dynamics = stdp
# This should fail because STDP dependences are difference
with pytest.raises(SynapticConfigurationException):
- synaptic_manager.synapse_dynamics = alt_stdp
+ post_app_vertex.synapse_dynamics = alt_stdp
# This should work because STDP dependences are the same
- synaptic_manager.synapse_dynamics = stdp
+ post_app_vertex.synapse_dynamics = stdp
# This should work because static always works, but the type should
# still be STDP
- synaptic_manager.synapse_dynamics = static
+ post_app_vertex.synapse_dynamics = static
assert isinstance(
- synaptic_manager.synapse_dynamics, SynapseDynamicsSTDP)
+ post_app_vertex.synapse_dynamics, SynapseDynamicsSTDP)
# This should work but should merge with the STDP rule
- synaptic_manager.synapse_dynamics = static_struct
+ post_app_vertex.synapse_dynamics = static_struct
assert isinstance(
- synaptic_manager.synapse_dynamics, SynapseDynamicsStructuralSTDP)
+ post_app_vertex.synapse_dynamics, SynapseDynamicsStructuralSTDP)
# These should work as static / the STDP is the same but neither should
# change anything
- synaptic_manager.synapse_dynamics = static
+ post_app_vertex.synapse_dynamics = static
assert isinstance(
- synaptic_manager.synapse_dynamics, SynapseDynamicsStructuralSTDP)
- synaptic_manager.synapse_dynamics = stdp
+ post_app_vertex.synapse_dynamics, SynapseDynamicsStructuralSTDP)
+ post_app_vertex.synapse_dynamics = stdp
assert isinstance(
- synaptic_manager.synapse_dynamics, SynapseDynamicsStructuralSTDP)
- synaptic_manager.synapse_dynamics = static_struct
+ post_app_vertex.synapse_dynamics, SynapseDynamicsStructuralSTDP)
+ post_app_vertex.synapse_dynamics = static_struct
assert isinstance(
- synaptic_manager.synapse_dynamics, SynapseDynamicsStructuralSTDP)
+ post_app_vertex.synapse_dynamics, SynapseDynamicsStructuralSTDP)
# These should fail as things are different
with pytest.raises(SynapticConfigurationException):
- synaptic_manager.synapse_dynamics = alt_static_struct
+ post_app_vertex.synapse_dynamics = alt_static_struct
with pytest.raises(SynapticConfigurationException):
- synaptic_manager.synapse_dynamics = alt_stdp
+ post_app_vertex.synapse_dynamics = alt_stdp
# This should pass as same structural STDP
- synaptic_manager.synapse_dynamics = stdp_struct
+ post_app_vertex.synapse_dynamics = stdp_struct
assert isinstance(
- synaptic_manager.synapse_dynamics, SynapseDynamicsStructuralSTDP)
+ post_app_vertex.synapse_dynamics, SynapseDynamicsStructuralSTDP)
# These should fail as both different
with pytest.raises(SynapticConfigurationException):
- synaptic_manager.synapse_dynamics = alt_stdp_struct
+ post_app_vertex.synapse_dynamics = alt_stdp_struct
with pytest.raises(SynapticConfigurationException):
- synaptic_manager.synapse_dynamics = alt_stdp_struct_2
+ post_app_vertex.synapse_dynamics = alt_stdp_struct_2
# Try starting again to get a couple more combinations
- synaptic_manager = SynapticManager(
- n_synapse_types=2, ring_buffer_sigma=5.0,
- spikes_per_second=100.0, drop_late_spikes=True)
+ post_app_vertex = post_app_model.create_vertex(
+ n_neurons=10, label="post", constraints=None, spikes_per_second=None,
+ ring_buffer_sigma=None, incoming_spike_buffer_size=None,
+ n_steps_per_timestep=1, drop_late_spikes=True, splitter=None)
# STDP followed by structural STDP should result in Structural STDP
- synaptic_manager.synapse_dynamics = stdp
- synaptic_manager.synapse_dynamics = stdp_struct
+ post_app_vertex.synapse_dynamics = stdp
+ post_app_vertex.synapse_dynamics = stdp_struct
assert isinstance(
- synaptic_manager.synapse_dynamics, SynapseDynamicsStructuralSTDP)
+ post_app_vertex.synapse_dynamics, SynapseDynamicsStructuralSTDP)
# ... and should fail here because of differences
with pytest.raises(SynapticConfigurationException):
- synaptic_manager.synapse_dynamics = alt_stdp
+ post_app_vertex.synapse_dynamics = alt_stdp
with pytest.raises(SynapticConfigurationException):
- synaptic_manager.synapse_dynamics = alt_static_struct
+ post_app_vertex.synapse_dynamics = alt_static_struct
with pytest.raises(SynapticConfigurationException):
- synaptic_manager.synapse_dynamics = alt_stdp_struct
+ post_app_vertex.synapse_dynamics = alt_stdp_struct
with pytest.raises(SynapticConfigurationException):
- synaptic_manager.synapse_dynamics = alt_stdp_struct_2
+ post_app_vertex.synapse_dynamics = alt_stdp_struct_2
# One more time!
- synaptic_manager = SynapticManager(
- n_synapse_types=2, ring_buffer_sigma=5.0,
- spikes_per_second=100.0, drop_late_spikes=True)
+ post_app_vertex = post_app_model.create_vertex(
+ n_neurons=10, label="post", constraints=None, spikes_per_second=None,
+ ring_buffer_sigma=None, incoming_spike_buffer_size=None,
+ n_steps_per_timestep=1, drop_late_spikes=True, splitter=None)
# Static followed by static structural should result in static
# structural
- synaptic_manager.synapse_dynamics = static
- synaptic_manager.synapse_dynamics = static_struct
+ post_app_vertex.synapse_dynamics = static
+ post_app_vertex.synapse_dynamics = static_struct
assert isinstance(
- synaptic_manager.synapse_dynamics, SynapseDynamicsStructuralStatic)
+ post_app_vertex.synapse_dynamics, SynapseDynamicsStructuralStatic)
# ... and should fail here because of differences
with pytest.raises(SynapticConfigurationException):
- synaptic_manager.synapse_dynamics = alt_static_struct
+ post_app_vertex.synapse_dynamics = alt_static_struct
with pytest.raises(SynapticConfigurationException):
- synaptic_manager.synapse_dynamics = alt_stdp_struct
+ post_app_vertex.synapse_dynamics = alt_stdp_struct
# This should be fine
- synaptic_manager.synapse_dynamics = static
+ post_app_vertex.synapse_dynamics = static
# This should be OK, but should merge with STDP (opposite of above)
- synaptic_manager.synapse_dynamics = stdp
+ post_app_vertex.synapse_dynamics = stdp
assert isinstance(
- synaptic_manager.synapse_dynamics, SynapseDynamicsStructuralSTDP)
+ post_app_vertex.synapse_dynamics, SynapseDynamicsStructuralSTDP)
# ... and now these should fail
with pytest.raises(SynapticConfigurationException):
- synaptic_manager.synapse_dynamics = alt_stdp
+ post_app_vertex.synapse_dynamics = alt_stdp
with pytest.raises(SynapticConfigurationException):
- synaptic_manager.synapse_dynamics = alt_static_struct
+ post_app_vertex.synapse_dynamics = alt_static_struct
with pytest.raises(SynapticConfigurationException):
- synaptic_manager.synapse_dynamics = alt_stdp_struct
+ post_app_vertex.synapse_dynamics = alt_stdp_struct
with pytest.raises(SynapticConfigurationException):
- synaptic_manager.synapse_dynamics = alt_stdp_struct_2
+ post_app_vertex.synapse_dynamics = alt_stdp_struct_2
# OK, just one more, honest
- synaptic_manager = SynapticManager(
- n_synapse_types=2, ring_buffer_sigma=5.0,
- spikes_per_second=100.0, drop_late_spikes=True)
- synaptic_manager.synapse_dynamics = static_struct
- synaptic_manager.synapse_dynamics = stdp_struct
+ post_app_vertex = post_app_model.create_vertex(
+ n_neurons=10, label="post", constraints=None, spikes_per_second=None,
+ ring_buffer_sigma=None, incoming_spike_buffer_size=None,
+ n_steps_per_timestep=1, drop_late_spikes=True, splitter=None)
+ post_app_vertex.synapse_dynamics = static_struct
+ post_app_vertex.synapse_dynamics = stdp_struct
@pytest.mark.parametrize(
"undelayed_indices_connected,delayed_indices_connected,n_pre_neurons,"
"neurons_per_core,expect_app_keys,max_delay", [
# Only undelayed, all edges exist
- (set(range(10)), None, 1000, 100, True, None),
+ (range(10), [], 1000, 100, True, None),
# Only delayed, all edges exist
- (None, set(range(10)), 1000, 100, True, 20),
+ ([], range(10), 1000, 100, True, 20),
# All undelayed and delayed edges exist
- (set(range(10)), set(range(10)), 1000, 100, True, 20),
+ (range(10), range(10), 1000, 100, True, 20),
# Only undelayed, some edges are filtered (app keys shouldn't work)
- ({0, 1, 2, 3, 4}, None, 1000, 100, False, None),
+ ([0, 1, 2, 3, 4], [], 1000, 100, False, None),
# Only delayed, some edges are filtered (app keys shouldn't work)
- (None, {5, 6, 7, 8, 9}, 1000, 100, False, 20),
+ ([], [5, 6, 7, 8, 9], 1000, 100, False, 20),
# Both delayed and undelayed, some undelayed edges don't exist
- ({3, 4, 5, 6, 7}, set(range(10)), 1000, 100, False, 20),
+ # (app keys work because undelayed aren't filtered)
+ ([3, 4, 5, 6, 7], range(10), 1000, 100, True, 20),
# Both delayed and undelayed, some delayed edges don't exist
- (set(range(10)), {4, 5, 6, 7}, 1000, 100, False, 20),
+ # (app keys work because all undelayed exist)
+ (range(10), [4, 5, 6, 7], 1000, 100, True, 20),
# Should work but number of neurons don't work out
- (set(range(5)), None, 10000, 2048, False, None),
+ (range(5), [], 10000, 2048, False, None),
# Should work but number of cores doesn't work out
- (set(range(2000)), None, 10000, 5, False, None),
+ (range(2000), [], 10000, 5, False, None),
# Should work but number of neurons with delays don't work out
- (None, set(range(4)), 1024, 256, False, 144)
+ ([], range(4), 1024, 256, False, 144)
])
def test_pop_based_master_pop_table_standard(
undelayed_indices_connected, delayed_indices_connected,
n_pre_neurons, neurons_per_core, expect_app_keys, max_delay):
- spynnaker8.setup(1.0)
-
- # Add an sdram so max SDRAM is high enough
- SDRAM(128000000)
+ unittest_setup()
+ machine = virtual_machine(12, 12)
+
+ # Build a from list connector with the delays we want
+ connections = []
+ connections.extend([(i * neurons_per_core + j, j, 0, 10)
+ for i in undelayed_indices_connected
+ for j in range(100)])
+ connections.extend([(i * neurons_per_core + j, j, 0, max_delay)
+ for i in delayed_indices_connected
+ for j in range(100)])
# Make simple source and target, where the source has 1000 atoms
# split into 10 vertices (100 each) and the target has 100 atoms in
# a single vertex
- app_graph = ApplicationGraph("Test")
- mac_graph = MachineGraph("Test", app_graph)
- pre_app_vertex = SimpleTestVertex(
- n_pre_neurons, max_atoms_per_core=neurons_per_core)
- pre_app_vertex.splitter = MockSplitter()
- app_graph.add_vertex(pre_app_vertex)
- post_vertex_slice = Slice(0, 99)
- post_app_vertex = SimpleTestVertex(100)
- post_app_vertex.splitter = MockSplitter()
- app_graph.add_vertex(post_app_vertex)
- post_mac_vertex = post_app_vertex.create_machine_vertex(
- post_vertex_slice, None)
- mac_graph.add_vertex(post_mac_vertex)
-
- # Create the pre-machine-vertices
- for lo in range(0, n_pre_neurons, neurons_per_core):
- pre_mac_slice = Slice(
- lo, min(lo + neurons_per_core - 1, n_pre_neurons - 1))
- pre_mac_vertex = pre_app_vertex.create_machine_vertex(
- pre_mac_slice, None)
- mac_graph.add_vertex(pre_mac_vertex)
-
- # Add delays if needed
- if delayed_indices_connected:
- pre_app_delay_vertex = DelayExtensionVertex(
- n_pre_neurons, 16.0, max_delay, pre_app_vertex)
- app_graph.add_vertex(pre_app_delay_vertex)
-
- for lo in range(0, n_pre_neurons, neurons_per_core):
- pre_mac_slice = Slice(
- lo, min(lo + neurons_per_core - 1, n_pre_neurons - 1))
- pre_mac_vertex = DelayExtensionMachineVertex(
- None, "", [], pre_app_delay_vertex, pre_mac_slice)
- mac_graph.add_vertex(pre_mac_vertex)
-
- # Make the routing info line up to force an app key in the pop table if
- # the constraints match up
- routing_info = RoutingInfo()
- n_key_bits = int(math.ceil(math.log(100, 2)))
- n_keys = 2**n_key_bits
- mask = 0xFFFFFFFF - (n_keys - 1)
-
- # Build a from list connector that is really an all-to-all connector,
- # but with delays that depend on what types of connection we want
- delays = []
- if undelayed_indices_connected:
- delays.append(10)
- if delayed_indices_connected:
- delays.append(20)
- connections = [(i, j, 0, delays[i % len(delays)])
- for i in range(n_pre_neurons) for j in range(100)]
- connector = FromListConnector(connections)
- synapse_dynamics = SynapseDynamicsStatic()
- synapse_info = SynapseInformation(
- connector, pre_app_vertex, post_app_vertex, False, False, None,
- synapse_dynamics, 0, True)
-
- # Create the application edge
- app_edge = ProjectionApplicationEdge(
- pre_app_vertex, post_app_vertex, synapse_info)
- app_graph.add_edge(app_edge, "Test")
-
- # Create the machine edges
- for pre_mac_vertex in pre_app_vertex.machine_vertices:
- i = pre_mac_vertex.index
- mac_edge = MachineEdge(
- pre_mac_vertex, post_mac_vertex, app_edge=app_edge)
- if undelayed_indices_connected and i in undelayed_indices_connected:
- mac_graph.add_edge(mac_edge, "Test")
- partition = mac_graph.get_outgoing_partition_for_edge(mac_edge)
- partition_info = PartitionRoutingInfo(
- [BaseKeyAndMask(i * n_keys, mask)], partition)
- routing_info.add_partition_info(partition_info)
-
- # Create the delay application edge and delay machine edges
- if delayed_indices_connected:
- delay_app_edge = DelayedApplicationEdge(
- pre_app_delay_vertex, post_app_vertex, synapse_info, app_edge)
- app_edge.delay_edge = delay_app_edge
- app_graph.add_edge(delay_app_edge, "Test")
-
- base_d_key = 16 * n_keys
- for pre_mac_vertex in pre_app_delay_vertex.machine_vertices:
- i = pre_mac_vertex.index
- mac_edge = MachineEdge(
- pre_mac_vertex, post_mac_vertex, app_edge=delay_app_edge)
- if i in delayed_indices_connected:
- mac_graph.add_edge(mac_edge, "Test")
- partition = mac_graph.get_outgoing_partition_for_edge(mac_edge)
- partition_info = PartitionRoutingInfo(
- [BaseKeyAndMask(base_d_key + (i * n_keys), mask)],
- partition)
- routing_info.add_partition_info(partition_info)
+ p.setup(1.0)
+ post_pop = p.Population(
+ 100, p.IF_curr_exp(), label="Post",
+ additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexSlice()})
+ p.IF_curr_exp.set_model_max_atoms_per_core(neurons_per_core)
+ pre_pop = p.Population(
+ n_pre_neurons, p.IF_curr_exp(), label="Pre",
+ additional_parameters={
+ "splitter": SplitterAbstractPopulationVertexSlice()})
+ p.Projection(
+ pre_pop, post_pop, p.FromListConnector(connections), p.StaticSynapse())
+
+ app_graph = globals_variables.get_simulator().original_application_graph
+ context = {
+ "MemoryApplicationGraph": app_graph
+ }
+ with (injection_context(context)):
+ delay_adder = DelaySupportAdder()
+ delay_adder.__call__(app_graph, 16.0)
+ partitioner = SpynnakerSplitterPartitioner()
+ machine_graph, _ = partitioner.__call__(app_graph, machine, 100)
+ allocator = ZonedRoutingInfoAllocator()
+ n_keys_mapper = EdgeToNKeysMapper()
+ n_keys_map = n_keys_mapper.__call__(machine_graph)
+ routing_info = allocator.__call__(
+ machine_graph, n_keys_map, flexible=False)
+
+ post_mac_vertex = next(iter(post_pop._vertex.machine_vertices))
+ post_vertex_slice = post_mac_vertex.vertex_slice
# Generate the data
temp_spec = tempfile.mktemp()
spec = DataSpecificationGenerator(io.FileIO(temp_spec, "wb"), None)
- synaptic_manager = SynapticManager(
- n_synapse_types=2, ring_buffer_sigma=5.0,
- spikes_per_second=100.0, drop_late_spikes=True)
- synaptic_manager.write_data_spec(
- spec, post_app_vertex, post_vertex_slice, post_mac_vertex,
- mac_graph, app_graph, routing_info, 1.0)
- spec.end_specification()
+
+ synaptic_matrices = SynapticMatrices(
+ post_vertex_slice, n_synapse_types=2, all_single_syn_sz=10000,
+ synaptic_matrix_region=1, direct_matrix_region=2, poptable_region=3,
+ connection_builder_region=4)
+ synaptic_matrices.write_synaptic_data(
+ spec, post_pop._vertex.incoming_projections, all_syn_block_sz=1000000,
+ weight_scales=[32, 32], routing_info=routing_info)
+
with io.FileIO(temp_spec, "rb") as spec_reader:
executor = DataSpecificationExecutor(
spec_reader, SDRAM.max_sdram_found)
executor.execute()
# Read the population table and check entries
- region = executor.get_region(
- POPULATION_BASED_REGIONS.POPULATION_TABLE.value)
+ region = executor.get_region(3)
mpop_data = numpy.frombuffer(
region.region_data, dtype="uint8").view("uint32")
n_entries = mpop_data[0]
@@ -581,19 +530,30 @@ def test_pop_based_master_pop_table_standard(
expected_n_entries = 0
expected_n_addresses = 0
if expect_app_keys:
- n_app_entries = (int(bool(undelayed_indices_connected)) +
- int(bool(delayed_indices_connected)))
+ # Always one for undelayed, maybe one for delayed if present
+ n_app_entries = 1 + int(bool(delayed_indices_connected))
expected_n_entries += n_app_entries
- # 2 addresses for an app key because of the extra info
- expected_n_addresses += n_app_entries * 2
+ # 2 address list entries for each entry, as there is also extra_info
+ expected_n_addresses += 2 * n_app_entries
+
+ # If both delayed and undelayed, there is an entry for each incoming
+ # machine edge
+ elif delayed_indices_connected and undelayed_indices_connected:
+ all_connected = set(undelayed_indices_connected)
+ all_connected.update(delayed_indices_connected)
+ expected_n_entries += len(all_connected)
+ expected_n_addresses += len(all_connected)
+
+ # If there are only undelayed indices, there is an entry for each
+ elif undelayed_indices_connected:
+ expected_n_entries += len(undelayed_indices_connected)
+ expected_n_addresses += len(undelayed_indices_connected)
+
+ # If there are only delayed indices, there are two entries for each because
+ # the undelayed ones are still connected
else:
- # An entry and address for each incoming machine edge
- if undelayed_indices_connected:
- expected_n_entries += len(undelayed_indices_connected)
- expected_n_addresses += len(undelayed_indices_connected)
- if delayed_indices_connected:
- expected_n_entries += len(delayed_indices_connected)
- expected_n_addresses += len(delayed_indices_connected)
+ expected_n_entries += 2 * len(delayed_indices_connected)
+ expected_n_addresses += 2 * len(delayed_indices_connected)
assert(n_entries == expected_n_entries)
assert(n_addresses == expected_n_addresses)
diff --git a/unittests/test_using_virtual_board/test_constraint.py b/unittests/test_using_virtual_board/test_constraint.py
index 5183f4b780..2b6cadfde5 100644
--- a/unittests/test_using_virtual_board/test_constraint.py
+++ b/unittests/test_using_virtual_board/test_constraint.py
@@ -39,7 +39,7 @@ def test_placement_constraint(self):
sim.run(simtime)
placements = self.get_placements("pop_1")
sim.end()
- self.assertEqual(4, len(placements))
+ self.assertGreater(len(placements), 0)
for [x, y, _] in placements:
self.assertEqual("1", x)
self.assertEqual("1", y)
diff --git a/unittests/test_using_virtual_board/test_radial_placer/test_radial_constraint.py b/unittests/test_using_virtual_board/test_radial_placer/test_radial_constraint.py
index c2394ffc35..459ba4e15b 100644
--- a/unittests/test_using_virtual_board/test_radial_placer/test_radial_constraint.py
+++ b/unittests/test_using_virtual_board/test_radial_placer/test_radial_constraint.py
@@ -37,7 +37,7 @@ def test_radial_some(self):
sim.run(simtime)
placements = self.get_placements("pop_1")
sim.end()
- self.assertEqual(4, len(placements))
+ self.assertGreater(len(placements), 0)
for [x, y, _] in placements:
self.assertEqual("1", x)
self.assertEqual("1", y)
@@ -56,9 +56,9 @@ def test_radial_many(self):
sim.run(simtime)
placements = self.get_placements("pop_1")
sim.end()
- self.assertEqual(20, len(placements))
+ self.assertGreater(len(placements), 0)
count = 0
for [x, y, _] in placements:
if x == "1" and y == "1":
count += 1
- self.assertGreater(count, 10)
+ self.assertGreater(count, 0)