Skip to content

Commit

Permalink
Merge pull request #1458 from SpiNNakerManchester/manual_weight_scaling
Browse files Browse the repository at this point in the history
Manual weight scaling
  • Loading branch information
Christian-B authored Jul 18, 2024
2 parents 74c7cea + 6ff9a1e commit 6f7455b
Show file tree
Hide file tree
Showing 7 changed files with 148 additions and 31 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Optional
from typing import Optional, List
from spinn_utilities.overrides import overrides
from spinn_front_end_common.utilities.exceptions import ConfigurationException
from spynnaker.pyNN.extra_algorithms.splitter_components import (
Expand Down Expand Up @@ -104,6 +104,7 @@ def create_vertex(
self, n_neurons: int, label: str, *,
spikes_per_second: Optional[float] = None,
ring_buffer_sigma: Optional[float] = None,
max_expected_summed_weight: Optional[List[float]] = None,
incoming_spike_buffer_size: Optional[int] = None,
drop_late_spikes: Optional[bool] = None,
splitter: Optional[SplitterAbstractPopulationVertex] = None,
Expand All @@ -118,7 +119,11 @@ def create_vertex(
model.n_steps_per_timestep = n_steps_per_timestep
max_atoms = self.get_model_max_atoms_per_dimension_per_core()
return ExternalDeviceLifControlVertex(
self._devices, self._create_edges, max_atoms, model, self,
self._translator, spikes_per_second, label, ring_buffer_sigma,
incoming_spike_buffer_size, drop_late_spikes, splitter, seed,
n_colour_bits)
devices=self._devices, create_edges=self._create_edges,
max_atoms_per_core=max_atoms, neuron_impl=model, pynn_model=self,
translator=self._translator, spikes_per_second=spikes_per_second,
label=label, ring_buffer_sigma=ring_buffer_sigma,
max_expected_summed_weight=max_expected_summed_weight,
incoming_spike_buffer_size=incoming_spike_buffer_size,
drop_late_spikes=drop_late_spikes, splitter=splitter, seed=seed,
n_colour_bits=n_colour_bits)
Original file line number Diff line number Diff line change
Expand Up @@ -54,14 +54,15 @@ class ExternalDeviceLifControlVertex(
_DEFAULT_COMMAND_MASK = 0xFFFFFFFF

def __init__(
self, devices: Sequence[AbstractMulticastControllableDevice],
self, *, devices: Sequence[AbstractMulticastControllableDevice],
create_edges: bool, max_atoms_per_core: Tuple[int, ...],
neuron_impl: AbstractNeuronImpl,
pynn_model: AbstractPyNNNeuronModel,
translator: Optional[AbstractEthernetTranslator] = None,
spikes_per_second: Optional[float] = None,
label: Optional[str] = None,
ring_buffer_sigma: Optional[float] = None,
max_expected_summed_weight: Optional[List[float]] = None,
incoming_spike_buffer_size: Optional[int] = None,
drop_late_spikes: Optional[bool] = None,
splitter: Optional[SplitterAbstractPopulationVertex] = None,
Expand Down Expand Up @@ -92,11 +93,16 @@ def __init__(
if drop_late_spikes is None:
drop_late_spikes = False
super().__init__(
len(devices), f"ext_dev{devices}" if label is None else label,
max_atoms_per_core,
spikes_per_second, ring_buffer_sigma, incoming_spike_buffer_size,
neuron_impl, pynn_model, drop_late_spikes, splitter, seed,
n_colour_bits)
n_neurons=len(devices),
label=f"ext_dev{devices}" if label is None else label,
max_atoms_per_core=max_atoms_per_core,
spikes_per_second=spikes_per_second,
ring_buffer_sigma=ring_buffer_sigma,
max_expected_summed_weight=max_expected_summed_weight,
incoming_spike_buffer_size=incoming_spike_buffer_size,
neuron_impl=neuron_impl, pynn_model=pynn_model,
drop_late_spikes=drop_late_spikes, splitter=splitter, seed=seed,
n_colour_bits=n_colour_bits)

if not devices:
raise ConfigurationException("No devices specified")
Expand Down
44 changes: 30 additions & 14 deletions spynnaker/pyNN/models/neuron/abstract_population_vertex.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,10 +245,11 @@ class AbstractPopulationVertex(
CORE_PARAMS_BASE_SIZE = 5 * BYTES_PER_WORD

def __init__(
self, n_neurons: int, label: str,
self, *, n_neurons: int, label: str,
max_atoms_per_core: Union[int, Tuple[int, ...]],
spikes_per_second: Optional[float],
ring_buffer_sigma: Optional[float],
max_expected_summed_weight: Optional[List[float]],
incoming_spike_buffer_size: Optional[int],
neuron_impl: AbstractNeuronImpl,
pynn_model: AbstractPyNNNeuronModel, drop_late_spikes: bool,
Expand All @@ -266,6 +267,8 @@ def __init__(
size; a good starting choice is 5.0. Given length of simulation
we can set this for approximate number of saturation events.
:type ring_buffer_sigma: float or None
:param max_expected_summed_weight:
The maximum expected summed weights for each synapse type.
:param incoming_spike_buffer_size:
:type incoming_spike_buffer_size: int or None
:param bool drop_late_spikes: control flag for dropping late packets.
Expand Down Expand Up @@ -304,6 +307,15 @@ def __init__(
else:
self.__spikes_per_second = spikes_per_second

self.__max_expected_summed_weight = max_expected_summed_weight
if (max_expected_summed_weight is not None and
len(max_expected_summed_weight) !=
neuron_impl.get_n_synapse_types()):
raise ValueError(
"The number of expected summed weights does not match "
"the number of synapses in the neuron model "
f"({neuron_impl.get_n_synapse_types()})")

self.__drop_late_spikes = drop_late_spikes
if self.__drop_late_spikes is None:
self.__drop_late_spikes = get_config_bool(
Expand Down Expand Up @@ -1156,21 +1168,25 @@ def get_ring_buffer_shifts(self) -> List[int]:
:rtype: list(int)
"""
stats = _Stats(self.__neuron_impl, self.__spikes_per_second,
self.__ring_buffer_sigma)

for proj in self.incoming_projections:
# pylint: disable=protected-access
synapse_info = proj._synapse_information
# Skip if this is a synapse dynamics synapse type
if synapse_info.synapse_type_from_dynamics:
continue
stats.add_projection(proj)

n_synapse_types = self.__neuron_impl.get_n_synapse_types()
max_weights = numpy.zeros(n_synapse_types)
for synapse_type in range(n_synapse_types):
max_weights[synapse_type] = stats.get_max_weight(synapse_type)
if self.__max_expected_summed_weight is not None:
max_weights[:] = self.__max_expected_summed_weight
max_weights *= self.__neuron_impl.get_global_weight_scale()
else:
stats = _Stats(self.__neuron_impl, self.__spikes_per_second,
self.__ring_buffer_sigma)

for proj in self.incoming_projections:
# pylint: disable=protected-access
synapse_info = proj._synapse_information
# Skip if this is a synapse dynamics synapse type
if synapse_info.synapse_type_from_dynamics:
continue
stats.add_projection(proj)

for synapse_type in range(n_synapse_types):
max_weights[synapse_type] = stats.get_max_weight(synapse_type)

# Convert these to powers; we could use int.bit_length() for this if
# they were integers, but they aren't...
Expand Down
15 changes: 11 additions & 4 deletions spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Any, Dict, Optional, TYPE_CHECKING
from typing import Any, Dict, List, Optional, TYPE_CHECKING
from spinn_utilities.overrides import overrides
from spynnaker.pyNN.models.neuron import AbstractPopulationVertex
from spynnaker.pyNN.models.abstract_pynn_model import AbstractPyNNModel
Expand All @@ -28,6 +28,7 @@

_population_parameters: Dict[str, Any] = {
"spikes_per_second": None, "ring_buffer_sigma": None,
"max_expected_summed_weight": None,
"incoming_spike_buffer_size": None, "drop_late_spikes": None,
"splitter": None, "seed": None, "n_colour_bits": None
}
Expand Down Expand Up @@ -58,6 +59,7 @@ def create_vertex(
self, n_neurons: int, label: str, *,
spikes_per_second: Optional[float] = None,
ring_buffer_sigma: Optional[float] = None,
max_expected_summed_weight: Optional[List[float]] = None,
incoming_spike_buffer_size: Optional[int] = None,
drop_late_spikes: Optional[bool] = None,
splitter: Optional[SplitterAbstractPopulationVertex] = None,
Expand All @@ -76,9 +78,14 @@ def create_vertex(
# pylint: disable=arguments-differ
max_atoms = self.get_model_max_atoms_per_dimension_per_core()
return AbstractPopulationVertex(
n_neurons, label, max_atoms, spikes_per_second, ring_buffer_sigma,
incoming_spike_buffer_size, self.__model, self,
drop_late_spikes or False, splitter, seed, n_colour_bits)
n_neurons=n_neurons, label=label, max_atoms_per_core=max_atoms,
spikes_per_second=spikes_per_second,
ring_buffer_sigma=ring_buffer_sigma,
max_expected_summed_weight=max_expected_summed_weight,
incoming_spike_buffer_size=incoming_spike_buffer_size,
neuron_impl=self.__model, pynn_model=self,
drop_late_spikes=drop_late_spikes or False,
splitter=splitter, seed=seed, n_colour_bits=n_colour_bits)

@property
@overrides(AbstractPyNNModel.name)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Any, Dict, Optional, cast, TYPE_CHECKING
from typing import Any, Dict, List, Optional, cast, TYPE_CHECKING

from spinn_utilities.overrides import overrides
from spynnaker.pyNN.models.neuron.implementations import NeuronImplStandard
Expand Down Expand Up @@ -74,6 +74,7 @@ def create_vertex(
self, n_neurons: int, label: str, *,
spikes_per_second: Optional[float] = None,
ring_buffer_sigma: Optional[float] = None,
max_expected_summed_weight: Optional[List[float]] = None,
incoming_spike_buffer_size: Optional[int] = None,
drop_late_spikes: Optional[bool] = None,
splitter: Optional[SplitterAbstractPopulationVertex] = None,
Expand All @@ -86,8 +87,10 @@ def create_vertex(
cast(NeuronImplStandard,
self._model).n_steps_per_timestep = n_steps_per_timestep
return super().create_vertex(
n_neurons, label, spikes_per_second=spikes_per_second,
n_neurons=n_neurons, label=label,
spikes_per_second=spikes_per_second,
ring_buffer_sigma=ring_buffer_sigma,
max_expected_summed_weight=max_expected_summed_weight,
incoming_spike_buffer_size=incoming_spike_buffer_size,
drop_late_spikes=drop_late_spikes,
splitter=splitter, seed=seed, n_colour_bits=n_colour_bits)
79 changes: 79 additions & 0 deletions spynnaker_integration_tests/test_various/test_max_weight.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
#!/usr/bin/python

# Copyright (c) 2017 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
Synfirechain-like example
"""
import math
import pyNN.spiNNaker as p
from spinnaker_testbase import BaseTestCase


def do_run():
p.setup(timestep=1.0)
weight = math.sqrt(2.0)
random_delay = p.RandomDistribution("uniform", low=1, high=16)

pop_input_1 = p.Population(10000, p.IF_curr_exp())
pop_auto_1 = p.Population(256, p.IF_curr_exp())
pop_fixed_1 = p.Population(
256, p.IF_curr_exp(), max_expected_summed_weight=[2.0, 0.0])
proj_auto_1 = p.Projection(
pop_input_1, pop_auto_1, p.FixedProbabilityConnector(0.5),
p.StaticSynapse(weight=weight, delay=random_delay))
proj_fixed_1 = p.Projection(
pop_input_1, pop_fixed_1, p.FixedProbabilityConnector(0.5),
p.StaticSynapse(weight=weight, delay=random_delay))

pop_input_2 = p.Population(1000, p.IF_curr_exp())
pop_auto_2 = p.Population(256, p.IF_curr_exp())
pop_fixed_2 = p.Population(
256, p.IF_curr_exp(), max_expected_summed_weight=[2.0, 0.0])
proj_auto_2 = p.Projection(
pop_input_2, pop_auto_2, p.FixedProbabilityConnector(0.5),
p.StaticSynapse(weight=weight, delay=random_delay))
proj_fixed_2 = p.Projection(
pop_input_2, pop_fixed_2, p.FixedProbabilityConnector(0.5),
p.StaticSynapse(weight=weight, delay=random_delay))

p.run(0)

weights_auto_1 = proj_auto_1.get("weight", "list", with_address=False)
weights_fixed_1 = proj_fixed_1.get("weight", "list", with_address=False)
weights_auto_2 = proj_auto_2.get("weight", "list", with_address=False)
weights_fixed_2 = proj_fixed_2.get("weight", "list", with_address=False)

p.end()

print("Auto 1 weights: {}".format(weights_auto_1[0]))
print("Fixed 1 weights: {}".format(weights_fixed_1[0]))
print("Auto 2 weights: {}".format(weights_auto_2[0]))
print("Fixed 2 weights: {}".format(weights_fixed_2[0]))

assert weights_auto_1[0] != weights_auto_2[0]
assert weights_fixed_1[0] == weights_fixed_2[0]
assert weights_auto_1[0] != weights_fixed_1[0]
assert weights_auto_2[0] != weights_fixed_2[0]


class TestMaxWeight(BaseTestCase):

def test_run(self):
self.runsafe(do_run)


if __name__ == '__main__':
do_run()
1 change: 1 addition & 0 deletions unittests/test_populations/test_vertex.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ def __init__(self):
n_neurons=5, label="Mock",
max_atoms_per_core=None, spikes_per_second=None,
ring_buffer_sigma=None, incoming_spike_buffer_size=None,
max_expected_summed_weight=None,
neuron_impl=foo_bar.model, pynn_model=foo_bar,
drop_late_spikes=True, splitter=None, seed=None,
n_colour_bits=None)
Expand Down

0 comments on commit 6f7455b

Please sign in to comment.