diff --git a/spynnaker/pyNN/external_devices_models/machine_munich_motor_device.py b/spynnaker/pyNN/external_devices_models/machine_munich_motor_device.py index 362eaf8122..57fec58d94 100644 --- a/spynnaker/pyNN/external_devices_models/machine_munich_motor_device.py +++ b/spynnaker/pyNN/external_devices_models/machine_munich_motor_device.py @@ -68,12 +68,6 @@ class MachineMunichMotorDevice( #: The name of the provenance item saying that packets were lost. INPUT_BUFFER_FULL_NAME = "Times_the_input_buffer_lost_packets" - _INPUT_BUFFER_FULL_MESSAGE = ( - "The input buffer for {} on {}, {}, {} lost packets on {} " - "occasions. This is often a sign that the system is running " - "too quickly for the number of neurons per core. Please " - "increase the timer_tic or time_scale_factor or decrease the " - "number of neurons per core.") def __init__( self, speed, sample_time, update_time, delay_time, @@ -127,30 +121,20 @@ def _provenance_region_id(self): def _n_additional_data_items(self): return self._PROVENANCE_ELEMENTS - @overrides(ProvidesProvenanceDataFromMachineImpl. - get_provenance_data_from_machine) - def get_provenance_data_from_machine(self, transceiver, placement): - # get prov data - provenance_data = self._read_provenance_data(transceiver, placement) - # get system level prov - provenance_items = self._read_basic_provenance_items( - provenance_data, placement) - # get left over prov - provenance_data = self._get_remaining_provenance_data_items( - provenance_data) - # stuff for making prov data items - label, x, y, p, names = self._get_placement_details(placement) - - # get the only app level prov item - n_buffer_overflows = provenance_data[0] - - # build it - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.INPUT_BUFFER_FULL_NAME), - n_buffer_overflows, report=n_buffer_overflows > 0, - message=self._INPUT_BUFFER_FULL_MESSAGE.format( - label, x, y, p, n_buffer_overflows))) - return provenance_items + @overrides( + ProvidesProvenanceDataFromMachineImpl.parse_extra_provenance_items) + def parse_extra_provenance_items( + self, label, names, provenance_data): + n_buffer_overflows, = provenance_data + + yield ProvenanceDataItem( + names + [self.INPUT_BUFFER_FULL_NAME], n_buffer_overflows, + (n_buffer_overflows > 0), + f"The input buffer for {label} lost packets on " + f"{n_buffer_overflows} occasions. " + "This is often a sign that the system is running too quickly for " + "the number of neurons per core. Please increase the timer_tic " + "or time_scale_factor or decrease the number of neurons per core.") @inject_items({ "routing_info": "MemoryRoutingInfos", diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py index 62896d9b34..77fe198695 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py @@ -585,18 +585,19 @@ def get_provenance_data(self, synapse_info): """ :param SynapseInformation synapse_info: :rtype: - list(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem) + iterable(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem) """ name = "connector_{}_{}_{}".format( synapse_info.pre_population.label, synapse_info.post_population.label, self.__class__.__name__) # Convert to native Python integer; provenance system assumption ncd = self.__n_clipped_delays.item() - return [ProvenanceDataItem( + yield ProvenanceDataItem( [name, "Times_synaptic_delays_got_clipped"], ncd, report=(ncd > 0), message=self._CLIPPED_MSG.format( self.__class__.__name__, synapse_info.pre_population.label, - synapse_info.post_population.label, self.__min_delay, ncd))] + synapse_info.post_population.label, self.__min_delay, + ncd)) @property def safe(self): diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py index 0794d3ae40..a9d459e769 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py @@ -88,7 +88,7 @@ def get_provenance_data(self, pre_population_label, post_population_label): :param str pre_population_label: label of pre. :param str post_population_label: label of post. :rtype: - list(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem) + iterable(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem) """ # pylint: disable=unused-argument - return list() + return [] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py index 1c38b5140e..59b6ef1885 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py @@ -18,10 +18,10 @@ BYTES_PER_WORD, MICRO_TO_MILLISECOND_CONVERSION) from spynnaker.pyNN.models.neuron.plasticity.stdp.common import ( get_exp_lut_array) -from spynnaker.pyNN.models.neuron.plasticity.stdp.timing_dependence\ - import AbstractTimingDependence -from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure\ - import SynapseStructureWeightOnly +from spynnaker.pyNN.models.neuron.plasticity.stdp.timing_dependence import ( + AbstractTimingDependence) +from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure import ( + SynapseStructureWeightOnly) from spinn_front_end_common.utilities.globals_variables import get_simulator @@ -44,6 +44,7 @@ class TimingDependencePfisterSpikeTriplet(AbstractTimingDependence): "__tau_y_data", "__a_plus", "__a_minus"] + __PARAM_NAMES = ('tau_plus', 'tau_minus', 'tau_x', 'tau_y') # noinspection PyPep8Naming def __init__(self, tau_plus, tau_minus, tau_x, tau_y, A_plus, A_minus): @@ -189,4 +190,4 @@ def synaptic_structure(self): @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): - return ['tau_plus', 'tau_minus', 'tau_x', 'tau_y'] + return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py index fd59b644ab..e0e7d99890 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py @@ -37,6 +37,9 @@ class TimingDependenceRecurrent(AbstractTimingDependence): "__synapse_structure", "__a_plus", "__a_minus"] + __PARAM_NAMES = ( + 'accumulator_depression', 'accumulator_potentiation', + 'mean_pre_window', 'mean_post_window', 'dual_fsm') default_parameters = { 'accumulator_depression': -6, 'accumulator_potentiation': 6, @@ -187,5 +190,4 @@ def synaptic_structure(self): @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): - return ['accumulator_depression', 'accumulator_potentiation', - 'mean_pre_window', 'mean_post_window', 'dual_fsm'] + return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py index 7f5a6cf7b8..3fe0a8638e 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py @@ -35,7 +35,7 @@ class TimingDependenceSpikeNearestPair(AbstractTimingDependence): "__tau_plus_data", "__a_plus", "__a_minus"] - + __PARAM_NAMES = ('tau_plus', 'tau_minus') default_parameters = {'tau_plus': 20.0, 'tau_minus': 20.0} def __init__(self, tau_plus=default_parameters['tau_plus'], @@ -154,4 +154,4 @@ def synaptic_structure(self): @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): - return ['tau_plus', 'tau_minus'] + return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py index 7b86eae0fc..0fdd326b67 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py @@ -35,6 +35,7 @@ class TimingDependenceSpikePair(AbstractTimingDependence): "__tau_plus_data", "__a_plus", "__a_minus"] + __PARAM_NAMES = ('tau_plus', 'tau_minus') def __init__( self, tau_plus=20.0, tau_minus=20.0, A_plus=0.01, A_minus=0.01): @@ -152,4 +153,4 @@ def synaptic_structure(self): @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): - return ['tau_plus', 'tau_minus'] + return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py index 0118cf7e86..a947ec0457 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py @@ -36,7 +36,7 @@ class TimingDependenceVogels2011(AbstractTimingDependence): "__tau_data", "__a_plus", "__a_minus"] - + __PARAM_NAMES = ('alpha', 'tau') default_parameters = {'tau': 20.0} def __init__(self, alpha, tau=default_parameters['tau'], @@ -156,4 +156,4 @@ def synaptic_structure(self): @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): - return ['alpha', 'tau'] + return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py index fd244d5f02..4415b19955 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py @@ -27,10 +27,10 @@ def get_provenance_data(self, pre_population_label, post_population_label): :param str post_population_label: label of post. :return: the provenance data of the weight dependency :rtype: - list(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem) + iterable(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem) """ # pylint: disable=unused-argument - return list() + return [] @abstractmethod def get_parameter_names(self): diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py index 7bd0b6e6c9..3adc242d07 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py @@ -30,6 +30,7 @@ class WeightDependenceAdditive( __slots__ = [ "__w_max", "__w_min"] + __PARAM_NAMES = ('w_min', 'w_max', 'A_plus', 'A_minus') # noinspection PyPep8Naming def __init__(self, w_min=0.0, w_max=1.0): @@ -116,4 +117,4 @@ def weight_maximum(self): @overrides(AbstractWeightDependence.get_parameter_names) def get_parameter_names(self): - return ['w_min', 'w_max', 'A_plus', 'A_minus'] + return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py index 0f86e33816..cf83f72919 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py @@ -31,6 +31,7 @@ class WeightDependenceAdditiveTriplet( "__a3_plus", "__w_max", "__w_min"] + __PARAM_NAMES = ('w_min', 'w_max', 'A3_plus', 'A3_minus') default_parameters = {'w_min': 0.0, 'w_max': 1.0, 'A3_plus': 0.01, 'A3_minus': 0.01} @@ -153,4 +154,4 @@ def weight_maximum(self): @overrides(AbstractWeightDependence.get_parameter_names) def get_parameter_names(self): - return ['w_min', 'w_max', 'A3_plus', 'A3_minus'] + return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py index a64d41f730..cfd1e54447 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py @@ -29,6 +29,7 @@ class WeightDependenceMultiplicative( __slots__ = [ "__w_max", "__w_min"] + __PARAM_NAMES = ('w_min', 'w_max', 'A_plus', 'A_minus') def __init__(self, w_min=0.0, w_max=1.0): """ @@ -113,4 +114,4 @@ def weight_maximum(self): @overrides(AbstractWeightDependence.get_parameter_names) def get_parameter_names(self): - return ['w_min', 'w_max', 'A_plus', 'A_minus'] + return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/population_machine_vertex.py b/spynnaker/pyNN/models/neuron/population_machine_vertex.py index 5aed35ef24..fd1fe48d49 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_vertex.py +++ b/spynnaker/pyNN/models/neuron/population_machine_vertex.py @@ -12,11 +12,10 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from enum import Enum -from pacman.executor.injection_decorator import inject_items -from spinn_front_end_common.interface.simulation import simulation_utilities +from enum import IntEnum from spinn_utilities.overrides import overrides +from pacman.executor.injection_decorator import inject_items from pacman.model.graphs.machine import MachineVertex from spinn_front_end_common.utilities.utility_objs import ProvenanceDataItem from spinn_front_end_common.interface.provenance import ( @@ -34,14 +33,15 @@ from spinn_front_end_common.interface.profiling.profile_utils import ( get_profiling_data) from spinn_front_end_common.utilities.utility_objs import ExecutableType +from spinn_front_end_common.utilities import ( + constants as common_constants, helpful_functions) +from spinn_front_end_common.interface.simulation import simulation_utilities from spynnaker.pyNN.models.neuron.synapse_dynamics import ( AbstractSynapseDynamicsStructural) from spynnaker.pyNN.utilities import constants, bit_field_utilities from spynnaker.pyNN.models.abstract_models import ( AbstractSynapseExpandable, AbstractReadParametersBeforeSet) from spynnaker.pyNN.utilities.constants import POPULATION_BASED_REGIONS -from spinn_front_end_common.utilities import ( - constants as common_constants, helpful_functions) class PopulationMachineVertex( @@ -61,7 +61,7 @@ class PopulationMachineVertex( "__drop_late_spikes", "__change_requires_neuron_parameters_reload"] - class EXTRA_PROVENANCE_DATA_ENTRIES(Enum): + class EXTRA_PROVENANCE_DATA_ENTRIES(IntEnum): """ Entries for the provenance data generated by standard neuron \ models. """ @@ -83,62 +83,18 @@ class EXTRA_PROVENANCE_DATA_ENTRIES(Enum): INVALID_MASTER_POP_HITS = 9 BIT_FIELD_FILTERED_COUNT = 10 N_REWIRES = 11 - # the number of packets that were dropped as they arrived too late - # to be processed + #: The number of packets that were dropped as they arrived too late + #: to be processed N_LATE_SPIKES = 12 - # the max filled size of the input buffer + #: The max filled size of the input buffer INPUT_BUFFER_FILLED_SIZE = 13 - # the number of tdma misses + #: The number of TDMA misses TDMA_MISSES = 14 # the maxmimum number of background tasks queued MAX_BACKGROUND_QUEUED = 15 # the number of times the background queue overloaded N_BACKGROUND_OVERLOADS = 16 - SATURATION_COUNT_NAME = "Times_synaptic_weights_have_saturated" - _SATURATION_COUNT_MESSAGE = ( - "The weights from the synapses for {} on {}, {}, {} saturated " - "{} times. If this causes issues you can increase the " - "spikes_per_second and / or ring_buffer_sigma " - "values located within the .spynnaker.cfg file.") - - INPUT_BUFFER_FULL_NAME = "Times_the_input_buffer_lost_packets" - _INPUT_BUFFER_FULL_MESSAGE = ( - "The input buffer for {} on {}, {}, {} lost packets on {} " - "occasions. This is often a sign that the system is running " - "too quickly for the number of neurons per core. Please " - "increase the timer_tic or time_scale_factor or decrease the " - "number of neurons per core.") - - TOTAL_PRE_SYNAPTIC_EVENT_NAME = "Total_pre_synaptic_events" - LAST_TIMER_TICK_NAME = "Last_timer_tic_the_core_ran_to" - N_RE_WIRES_NAME = "Number_of_rewires" - - SATURATED_PLASTIC_WEIGHTS_NAME = ( - "Times_plastic_synaptic_weights_have_saturated") - _SATURATED_PLASTIC_WEIGHTS_MESSAGE = ( - "The weights from the plastic synapses for {} on {}, {}, {} " - "saturated {} times. If this causes issue increase the " - "spikes_per_second and / or ring_buffer_sigma values located " - "within the .spynnaker.cfg file.") - - _N_LATE_SPIKES_NAME = "Number_of_late_spikes" - _N_LATE_SPIKES_MESSAGE_DROP = ( - "{} packets from {} on {}, {}, {} were dropped from the input buffer, " - "because they arrived too late to be processed in a given time step. " - "Try increasing the time_scale_factor located within the " - ".spynnaker.cfg file or in the pynn.setup() method.") - _N_LATE_SPIKES_MESSAGE_NO_DROP = ( - "{} packets from {} on {}, {}, {} arrived too late to be processed in" - " a given time step. " - "Try increasing the time_scale_factor located within the " - ".spynnaker.cfg file or in the pynn.setup() method.") - - _MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME = "Max_filled_size_input_buffer" - - _BACKGROUND_OVERLOADS_NAME = "Times_the_background_queue_overloaded" - _BACKGROUND_MAX_QUEUED_NAME = "Max_backgrounds_queued" - _PROFILE_TAG_LABELS = { 0: "TIMER", 1: "DMA_READ", @@ -150,8 +106,19 @@ class EXTRA_PROVENANCE_DATA_ENTRIES(Enum): _WORDS_TO_COVER_256_ATOMS = 8 # provenance data items - BIT_FIELD_FILTERED_PACKETS = \ - "How many packets were filtered by the bitfield filterer." + SATURATION_COUNT_NAME = "Times_synaptic_weights_have_saturated" + INPUT_BUFFER_FULL_NAME = "Times_the_input_buffer_lost_packets" + TOTAL_PRE_SYNAPTIC_EVENT_NAME = "Total_pre_synaptic_events" + LAST_TIMER_TICK_NAME = "Last_timer_tic_the_core_ran_to" + N_RE_WIRES_NAME = "Number_of_rewires" + SATURATED_PLASTIC_WEIGHTS_NAME = ( + "Times_plastic_synaptic_weights_have_saturated") + _N_LATE_SPIKES_NAME = "Number_of_late_spikes" + _MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME = "Max_filled_size_input_buffer" + _BACKGROUND_OVERLOADS_NAME = "Times_the_background_queue_overloaded" + _BACKGROUND_MAX_QUEUED_NAME = "Max_backgrounds_queued" + BIT_FIELD_FILTERED_PACKETS = ( + "How many packets were filtered by the bitfield filterer.") INVALID_MASTER_POP_HITS = "Invalid Master Pop hits" SPIKES_PROCESSED = "how many spikes were processed" DMA_COMPLETE = "DMA's that were completed" @@ -234,165 +201,148 @@ def _provenance_region_id(self): @property @overrides(ProvidesProvenanceDataFromMachineImpl._n_additional_data_items) def _n_additional_data_items(self): - return len(PopulationMachineVertex.EXTRA_PROVENANCE_DATA_ENTRIES) + return len(self.EXTRA_PROVENANCE_DATA_ENTRIES) @overrides(ProvidesProvenanceDataFromMachineImpl. get_provenance_data_from_machine) def get_provenance_data_from_machine(self, transceiver, placement): provenance_data = self._read_provenance_data(transceiver, placement) - provenance_items = self._read_basic_provenance_items( - provenance_data, placement) - provenance_data = self._get_remaining_provenance_data_items( - provenance_data) + label, names = self._get_provenance_placement_description(placement) - times_timer_tic_overran = 0 - for item in provenance_items: + # This is why we have to override the superclass public method + tic_overruns = 0 + for item in self.parse_system_provenance_items( + label, names, provenance_data): + yield item if item.names[-1] == self._TIMER_TICK_OVERRUN: - times_timer_tic_overran = item.value - - n_saturations = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.SATURATION_COUNT.value] - n_buffer_overflows = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.BUFFER_OVERFLOW_COUNT.value] - n_pre_synaptic_events = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.PRE_SYNAPTIC_EVENT_COUNT.value] - last_timer_tick = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.CURRENT_TIMER_TIC.value] - n_plastic_saturations = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES. - PLASTIC_SYNAPTIC_WEIGHT_SATURATION_COUNT.value] - n_ghost_searches = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.GHOST_POP_TABLE_SEARCHES.value] - failed_to_read_bit_fields = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.FAILED_TO_READ_BIT_FIELDS.value] - dma_completes = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.DMA_COMPLETES.value] - spike_processing_count = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.SPIKE_PROGRESSING_COUNT.value] - invalid_master_pop_hits = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.INVALID_MASTER_POP_HITS.value] - n_packets_filtered_by_bit_field_filter = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.BIT_FIELD_FILTERED_COUNT.value] - n_rewires = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_REWIRES.value] - n_late_packets = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_LATE_SPIKES.value] - input_buffer_max_filled_size = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.INPUT_BUFFER_FILLED_SIZE.value] - tdma_misses = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.TDMA_MISSES.value] - max_background_queued = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.MAX_BACKGROUND_QUEUED.value] - n_background_overloads = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_BACKGROUND_OVERLOADS.value] - - label, x, y, p, names = self._get_placement_details(placement) + # GOTCHA! + tic_overruns = item.value # translate into provenance data items - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.SATURATION_COUNT_NAME), - n_saturations, report=n_saturations > 0, - message=self._SATURATION_COUNT_MESSAGE.format( - label, x, y, p, n_saturations))) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.INPUT_BUFFER_FULL_NAME), - n_buffer_overflows, report=n_buffer_overflows > 0, - message=self._INPUT_BUFFER_FULL_MESSAGE.format( - label, x, y, p, n_buffer_overflows))) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.TOTAL_PRE_SYNAPTIC_EVENT_NAME), - n_pre_synaptic_events)) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.LAST_TIMER_TICK_NAME), - last_timer_tick)) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.SATURATED_PLASTIC_WEIGHTS_NAME), - n_plastic_saturations, report=n_plastic_saturations > 0, - message=self._SATURATED_PLASTIC_WEIGHTS_MESSAGE.format( - label, x, y, p, n_plastic_saturations))) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.N_RE_WIRES_NAME), n_rewires)) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.GHOST_SEARCHES), n_ghost_searches, - report=n_ghost_searches > 0, - message=( - "The number of failed population table searches for {} on {}," - " {}, {} was {}. If this number is large relative to the " - "predicted incoming spike rate, try increasing source and " - "target neurons per core".format( - label, x, y, p, n_ghost_searches)))) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.BIT_FIELDS_NOT_READ), - failed_to_read_bit_fields, report=False, - message=( - "The filter for stopping redundant DMA's couldn't be fully " - "filled in, it failed to read {} entries, which means it " - "required a max of {} extra bytes of DTCM (assuming cores " - "have at max 255 neurons. Try reducing neurons per core, or " - "size of buffers, or neuron params per neuron etc.".format( - failed_to_read_bit_fields, - failed_to_read_bit_fields * - self._WORDS_TO_COVER_256_ATOMS)))) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.DMA_COMPLETE), dma_completes)) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.SPIKES_PROCESSED), - spike_processing_count)) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.INVALID_MASTER_POP_HITS), - invalid_master_pop_hits, report=invalid_master_pop_hits > 0, - message=( - "There were {} keys which were received by core {}:{}:{} which" - " had no master pop entry for it. This is a error, which most " - "likely strives from bad routing.".format( - invalid_master_pop_hits, x, y, p)))) - provenance_items.append((ProvenanceDataItem( - self._add_name(names, self.BIT_FIELD_FILTERED_PACKETS), - n_packets_filtered_by_bit_field_filter, - report=( - n_packets_filtered_by_bit_field_filter > 0 and ( - n_buffer_overflows > 0 or times_timer_tic_overran > 0)), - message=( - "There were {} packets received by {}:{}:{} that were " - "filtered by the Bitfield filterer on the core. These packets " - "were having to be stored and processed on core, which means " - "the core may not be running as efficiently as it could. " - "Please adjust the network or the mapping so that these " - "packets are filtered in the router to improve " - "performance.".format( - n_packets_filtered_by_bit_field_filter, x, y, p))))) + yield from self.__parse_prov_items( + label, names, self._get_extra_provenance_words(provenance_data), + tic_overruns) + + def __parse_prov_items(self, label, names, provenance_data, tic_overruns): + # Would be parse_extra_provenance_items except for extra argument + """ + :param str label: + :param list(str) names: + :param list(int) provenance_data: + :param int tic_overruns: + :rtype: iterable(ProvenanceDataItem) + """ + (n_pre_synaptic_events, n_saturations, n_buffer_overflows, + last_timer_tick, n_plastic_saturations, n_ghost_searches, + n_bitfield_fails, dma_completes, spike_processing_count, + invalid_master_pop_hits, n_packets_filtered, n_rewires, + n_late_packets, input_buffer_max, tdma_misses, max_bg_queued, + n_bg_overloads) = provenance_data + + # translate into provenance data items + yield ProvenanceDataItem( + names + [self.SATURATION_COUNT_NAME], + n_saturations, (n_saturations > 0), + f"The weights from the synapses for {label} saturated " + f"{n_saturations} times. If this causes issues you can increase " + "the spikes_per_second and / or ring_buffer_sigma values located " + "within the .spynnaker.cfg file.") + yield ProvenanceDataItem( + names + [self.INPUT_BUFFER_FULL_NAME], + n_buffer_overflows, (n_buffer_overflows > 0), + f"The input buffer for {label} lost packets on " + f"{n_buffer_overflows} occasions. This is often a sign that the " + "system is running too quickly for the number of neurons per " + "core. Please increase the timer_tic or time_scale_factor or " + "decrease the number of neurons per core.") + yield ProvenanceDataItem( + names + [self.TOTAL_PRE_SYNAPTIC_EVENT_NAME], + n_pre_synaptic_events) + yield ProvenanceDataItem( + names + [self.LAST_TIMER_TICK_NAME], last_timer_tick) + yield ProvenanceDataItem( + names + [self.SATURATED_PLASTIC_WEIGHTS_NAME], + n_plastic_saturations, (n_plastic_saturations > 0), + f"The weights from the plastic synapses for {label} saturated " + f"{n_plastic_saturations} times. If this causes issue increase " + "the spikes_per_second and / or ring_buffer_sigma values located " + "within the .spynnaker.cfg file.") + yield ProvenanceDataItem( + names + [self.N_RE_WIRES_NAME], n_rewires) + yield ProvenanceDataItem( + names + [self.GHOST_SEARCHES], n_ghost_searches, + (n_ghost_searches > 0), + f"The number of failed population table searches for {label} was " + f"{n_ghost_searches}. If this number is large relative to the " + "predicted incoming spike rate, try increasing source and target " + "neurons per core") + yield ProvenanceDataItem( + names + [self.BIT_FIELDS_NOT_READ], + n_bitfield_fails, False, + f"On {label}, the filter for stopping redundant DMAs couldn't be " + f"fully filled in; it failed to read {n_bitfield_fails} entries, " + "which means it required a max of " + f"{n_bitfield_fails * self._WORDS_TO_COVER_256_ATOMS} " + "extra bytes of DTCM (assuming cores have at most 255 neurons). " + "Try reducing neurons per core, or size of buffers, or neuron " + "params per neuron, etc.") + yield ProvenanceDataItem( + names + [self.DMA_COMPLETE], dma_completes) + yield ProvenanceDataItem( + names + [self.SPIKES_PROCESSED], + spike_processing_count) + yield ProvenanceDataItem( + names + [self.INVALID_MASTER_POP_HITS], + invalid_master_pop_hits, (invalid_master_pop_hits > 0), + f"On {label}, there were {invalid_master_pop_hits} keys received " + "that had no master pop entry for them. This is an error, which " + "most likely stems from bad routing.") + yield ProvenanceDataItem( + names + [self.BIT_FIELD_FILTERED_PACKETS], + n_packets_filtered, (n_packets_filtered > 0 and ( + n_buffer_overflows > 0 or tic_overruns > 0)), + f"On {label}, there were {n_packets_filtered} packets received " + "that were filtered by the bit-field filterer on the core. These " + "packets were having to be stored and processed on core, which " + "means the core may not be running as efficiently as it should. " + "Please adjust the network or the mapping so that these packets " + "are filtered in the router to improve performance.") + late_message = ( - self._N_LATE_SPIKES_MESSAGE_DROP if self.__drop_late_spikes - else self._N_LATE_SPIKES_MESSAGE_NO_DROP) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self._N_LATE_SPIKES_NAME), - n_late_packets, report=n_late_packets > 0, - message=late_message.format(n_late_packets, label, x, y, p))) - - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self._MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME), - input_buffer_max_filled_size, report=False)) - - provenance_items.append(self._app_vertex.get_tdma_provenance_item( - names, x, y, p, tdma_misses)) - - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self._BACKGROUND_MAX_QUEUED_NAME), - max_background_queued, report=max_background_queued > 1, - message=( - "A maximum of {} background tasks were queued on {} on" - " {}, {}, {}. Try increasing the time_scale_factor located" - " within the .spynnaker.cfg file or in the pynn.setup()" - " method.".format(max_background_queued, label, x, y, p)))) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self._BACKGROUND_OVERLOADS_NAME), - n_background_overloads, report=n_background_overloads > 0, - message=( - "On {} on {}, {}, {}, the background queue overloaded {}" - " times. Try increasing the time_scale_factor located within" - " the .spynnaker.cfg file or in the pynn.setup() method." - .format(label, x, y, p, n_background_overloads)))) - return provenance_items + f"On {label}, {n_late_packets} packets were dropped from the " + "input buffer, because they arrived too late to be processed in " + "a given time step. Try increasing the time_scale_factor located " + "within the .spynnaker.cfg file or in the pynn.setup() method." + if self.__drop_late_spikes else + f"On {label}, {n_late_packets} packets arrived too late to be " + "processed in a given time step. Try increasing the " + "time_scale_factor located within the .spynnaker.cfg file or in " + "the pynn.setup() method.") + yield ProvenanceDataItem( + names + [self._N_LATE_SPIKES_NAME], + n_late_packets, (n_late_packets > 0), late_message) + + yield ProvenanceDataItem( + names + [self._MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME], + input_buffer_max, report=False) + + yield self._app_vertex.get_tdma_provenance_item( + names, label, tdma_misses) + + yield ProvenanceDataItem( + names + [self._BACKGROUND_MAX_QUEUED_NAME], + max_bg_queued, (max_bg_queued > 1), + f"On {label}, a maximum of {max_bg_queued} background tasks were " + "queued, which can indicate a core overloading. Try increasing " + "the time_scale_factor located within the .spynnaker.cfg file or " + "in the pynn.setup() method.") + yield ProvenanceDataItem( + names + [self._BACKGROUND_OVERLOADS_NAME], + n_bg_overloads, (n_bg_overloads > 0), + f"On {label}, the background queue overloaded {n_bg_overloads} " + "times, which can indicate a core overloading. Try increasing " + "the time_scale_factor located within the .spynnaker.cfg file or " + "in the pynn.setup() method.") @overrides(AbstractReceiveBuffersToHost.get_recorded_region_ids) def get_recorded_region_ids(self): diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py index 39f9c94704..c17d706179 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py @@ -128,9 +128,11 @@ def get_provenance_data(self, pre_population_label, post_population_label): :param str pre_population_label: :param str post_population_label: + :rtype: + iterable(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem) """ # pylint: disable=unused-argument - return list() + return [] def get_delay_maximum(self, connector, synapse_info): """ Get the maximum delay for the synapses diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py index 8095189917..f69a2f6f82 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py @@ -22,14 +22,15 @@ BYTES_PER_WORD, BYTES_PER_SHORT) from spinn_front_end_common.utilities.globals_variables import get_simulator from spynnaker.pyNN.models.abstract_models import AbstractSettable +from spynnaker.pyNN.exceptions import ( + InvalidParameterType, SynapticConfigurationException) +from spynnaker.pyNN.utilities.utility_calls import get_n_bits from .abstract_plastic_synapse_dynamics import AbstractPlasticSynapseDynamics +from .abstract_synapse_dynamics import AbstractSynapseDynamics from .abstract_synapse_dynamics_structural import ( AbstractSynapseDynamicsStructural) from .abstract_generate_on_machine import ( AbstractGenerateOnMachine, MatrixGeneratorID) -from spynnaker.pyNN.exceptions import ( - InvalidParameterType, SynapticConfigurationException) -from spynnaker.pyNN.utilities.utility_calls import get_n_bits # How large are the time-stamps stored with each event TIME_STAMP_BYTES = BYTES_PER_WORD @@ -476,29 +477,18 @@ def get_weight_maximum(self, connector, synapse_info): # the weight dependence return max(w_max, self.__weight_dependence.weight_maximum) + @overrides(AbstractSynapseDynamics.get_provenance_data) def get_provenance_data(self, pre_population_label, post_population_label): - """ - :param str pre_population_label: - :param str post_population_label: - :rtype: - list(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem) - """ - prov_data = list() - if self.__timing_dependence is not None: - prov_data.extend(self.__timing_dependence.get_provenance_data( - pre_population_label, post_population_label)) - if self.__weight_dependence is not None: - prov_data.extend(self.__weight_dependence.get_provenance_data( - pre_population_label, post_population_label)) - return prov_data + yield from self.__timing_dependence.get_provenance_data( + pre_population_label, post_population_label) + yield from self.__weight_dependence.get_provenance_data( + pre_population_label, post_population_label) @overrides(AbstractPlasticSynapseDynamics.get_parameter_names) def get_parameter_names(self): names = ['weight', 'delay'] - if self.__timing_dependence is not None: - names.extend(self.__timing_dependence.get_parameter_names()) - if self.__weight_dependence is not None: - names.extend(self.__weight_dependence.get_parameter_names()) + names.extend(self.__timing_dependence.get_parameter_names()) + names.extend(self.__weight_dependence.get_parameter_names()) return names @overrides(AbstractPlasticSynapseDynamics.get_max_synapses) diff --git a/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py b/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py index b5cdb1c626..89f195348c 100644 --- a/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py +++ b/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py @@ -54,8 +54,7 @@ def _flatten(alist): for item in alist: if hasattr(item, "__iter__"): - for subitem in _flatten(item): - yield subitem + yield from _flatten(item) else: yield item @@ -210,24 +209,13 @@ def get_profile_data(self, transceiver, placement): self.PROFILE_TAG_LABELS, transceiver, placement) @overrides(ProvidesProvenanceDataFromMachineImpl. - get_provenance_data_from_machine) - def get_provenance_data_from_machine(self, transceiver, placement): - # pylint: disable=too-many-locals - provenance_data = self._read_provenance_data(transceiver, placement) - provenance_items = self._read_basic_provenance_items( - provenance_data, placement) - provenance_data = self._get_remaining_provenance_data_items( - provenance_data) - + parse_extra_provenance_items) + def parse_extra_provenance_items(self, label, names, provenance_data): n_times_tdma_fell_behind = provenance_data[ self.EXTRA_PROVENANCE_DATA_ENTRIES.TDMA_MISSED_SLOTS.value] - _, x, y, p, names = self._get_placement_details(placement) - - provenance_items.append( - self._app_vertex.get_tdma_provenance_item( - names, x, y, p, n_times_tdma_fell_behind)) - return provenance_items + yield self._app_vertex.get_tdma_provenance_item( + names, label, n_times_tdma_fell_behind) @overrides(AbstractHasAssociatedBinary.get_binary_file_name) def get_binary_file_name(self): diff --git a/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py b/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py index af92f77fa9..c1a4d9c299 100644 --- a/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py +++ b/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py @@ -71,67 +71,16 @@ class EXTRA_PROVENANCE_DATA_ENTRIES(Enum): N_EXTRA_PROVENANCE_DATA_ENTRIES = len(EXTRA_PROVENANCE_DATA_ENTRIES) - COUNT_SATURATION_ERROR_MESSAGE = ( - "The delay extension {} has dropped {} packets because during " - "certain time steps a neuron was asked to spike more than 256 times. " - "This causes a saturation on the count tracker which is a uint8. " - "Reduce the packet rates, or modify the delay extension to have " - "larger counters.") - COUNT_SATURATION_NAME = "saturation_count" - - INVALID_NEURON_IDS_ERROR_MESSAGE = ( - "The delay extension {} has dropped {} packets because their " - "neuron id was not valid. This is likely a routing issue. " - "Please fix and try again") - INVALID_NEURON_ID_COUNT_NAME = "invalid_neuron_count" - - PACKETS_DROPPED_FROM_INVALID_KEY_ERROR_MESSAGE = ( - "The delay extension {} has dropped {} packets due to the packet " - "key being invalid. This is likely a routing issue. " - "Please fix and try again") - INVALID_KEY_COUNT_NAME = "invalid_key_count" - N_PACKETS_RECEIVED_NAME = "Number_of_packets_received" - N_PACKETS_PROCESSED_NAME = "Number_of_packets_processed" - - MISMATCH_PROCESSED_FROM_RECEIVED_ERROR_MESSAGE = ( - "The delay extension {} on {}, {}, {} only processed {} of {}" - " received packets. This could indicate a fault.") - MISMATCH_ADDED_FROM_PROCESSED_NAME = ( "Number_of_packets_added_to_delay_slot") - - MISMATCH_ADDED_FROM_PROCESSED_ERROR_MESSAGE = ( - "The delay extension {} on {}, {}, {} only added {} of {} processed " - "packets. This could indicate a routing or filtering fault") - N_PACKETS_SENT_NAME = "Number_of_packets_sent" - INPUT_BUFFER_LOST_NAME = "Times_the_input_buffer_lost_packets" - - INPUT_BUFFER_LOST_ERROR_MESSAGE = ( - "The input buffer for {} on {}, {}, {} lost packets on {} " - "occasions. This is often a sign that the system is running " - "too quickly for the number of neurons per core. Please " - "increase the timer_tic or time_scale_factor or decrease the " - "number of neurons per core.") - N_LATE_SPIKES_NAME = "Number_of_late_spikes" - N_LATE_SPIKES_MESSAGE_DROP = ( - "{} packets from {} on {}, {}, {} were dropped from the input buffer, " - "because they arrived too late to be processed in a given time step. " - "Try increasing the time_scale_factor located within the " - ".spynnaker.cfg file or in the pynn.setup() method.") - N_LATE_SPIKES_MESSAGE_NO_DROP = ( - "{} packets from {} on {}, {}, {} arrived too late to be processed in" - " a given time step. " - "Try increasing the time_scale_factor located within the " - ".spynnaker.cfg file or in the pynn.setup() method.") - DELAYED_FOR_TRAFFIC_NAME = "Number_of_times_delayed_to_spread_traffic" BACKGROUND_OVERLOADS_NAME = "Times_the_background_queue_overloaded" BACKGROUND_MAX_QUEUED_NAME = "Max_backgrounds_queued" @@ -174,114 +123,88 @@ def resources_required(self): return self.__resources @overrides(ProvidesProvenanceDataFromMachineImpl. - get_provenance_data_from_machine) - def get_provenance_data_from_machine(self, transceiver, placement): - # pylint: disable=too-many-locals - provenance_data = self._read_provenance_data(transceiver, placement) - provenance_items = self._read_basic_provenance_items( - provenance_data, placement) - provenance_data = self._get_remaining_provenance_data_items( - provenance_data) - - n_packets_received = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_PACKETS_RECEIVED.value] - n_packets_processed = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_PACKETS_PROCESSED.value] - n_packets_added = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_PACKETS_ADDED.value] - n_packets_sent = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_PACKETS_SENT.value] - n_buffer_overflows = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_BUFFER_OVERFLOWS.value] - n_delays = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_DELAYS.value] - n_times_tdma_fell_behind = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_TIMES_TDMA_FELL_BEHIND.value] - n_saturation = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES. - N_PACKETS_LOST_DUE_TO_COUNT_SATURATION.value] - n_packets_invalid_neuron = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES. - N_PACKETS_WITH_INVALID_NEURON_IDS.value] - n_packets_invalid_keys = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES. - N_PACKETS_DROPPED_DUE_TO_INVALID_KEY.value] - n_late_packets = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_LATE_SPIKES.value] - max_background_queued = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.MAX_BACKGROUND_QUEUED.value] - n_background_overloads = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_BACKGROUND_OVERLOADS.value] - - label, x, y, p, names = self._get_placement_details(placement) + parse_extra_provenance_items) + def parse_extra_provenance_items(self, label, names, provenance_data): + (n_received, n_processed, n_added, n_sent, n_overflows, n_delays, + n_tdma_behind, n_sat, n_bad_neuron, n_bad_keys, n_late_spikes, + max_bg, n_bg_overloads) = provenance_data # translate into provenance data items - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.COUNT_SATURATION_NAME), - n_saturation, report=n_saturation != 0, - message=self.COUNT_SATURATION_ERROR_MESSAGE.format( - label, n_saturation))) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.INVALID_NEURON_ID_COUNT_NAME), - n_packets_invalid_neuron, report=n_packets_invalid_neuron != 0, - message=self.INVALID_NEURON_IDS_ERROR_MESSAGE.format( - label, n_packets_invalid_neuron))) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.INVALID_NEURON_ID_COUNT_NAME), - n_packets_invalid_keys, n_packets_invalid_keys != 0, - self.PACKETS_DROPPED_FROM_INVALID_KEY_ERROR_MESSAGE.format( - label, n_packets_invalid_keys))) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.N_PACKETS_RECEIVED_NAME), - n_packets_received)) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.N_PACKETS_PROCESSED_NAME), - n_packets_processed, n_packets_received != n_packets_processed, - self.MISMATCH_PROCESSED_FROM_RECEIVED_ERROR_MESSAGE.format( - label, x, y, p, n_packets_processed, n_packets_received))) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.MISMATCH_ADDED_FROM_PROCESSED_NAME), - n_packets_added, n_packets_added != n_packets_processed, - self.MISMATCH_ADDED_FROM_PROCESSED_ERROR_MESSAGE.format( - label, x, y, p, n_packets_added, n_packets_processed))) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.N_PACKETS_SENT_NAME), n_packets_sent)) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.INPUT_BUFFER_LOST_NAME), - n_buffer_overflows, - report=n_buffer_overflows > 0, - message=self.INPUT_BUFFER_LOST_ERROR_MESSAGE.format( - label, x, y, p, n_buffer_overflows))) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.DELAYED_FOR_TRAFFIC_NAME), n_delays)) - provenance_items.append( - self._app_vertex.get_tdma_provenance_item( - names, x, y, p, n_times_tdma_fell_behind)) + yield ProvenanceDataItem( + names + [self.COUNT_SATURATION_NAME], + n_sat, (n_sat != 0), + f"The delay extension {label} has dropped {n_sat} packets because " + "during certain time steps a neuron was asked to spike more than " + "256 times. This causes a saturation on the count tracker which " + "is a uint8. Reduce the packet rates, or modify the delay " + "extension to have larger counters.") + yield ProvenanceDataItem( + names + [self.INVALID_NEURON_ID_COUNT_NAME], + n_bad_neuron, (n_bad_neuron != 0), + f"The delay extension {label} has dropped {n_bad_neuron} packets " + "because their neuron id was not valid. This is likely a routing " + "issue. Please fix and try again") + yield ProvenanceDataItem( + names + [self.INVALID_KEY_COUNT_NAME], + n_bad_keys, (n_bad_keys != 0), + f"The delay extension {label} has dropped {n_bad_keys} packets " + "due to the packet key being invalid. This is likely a routing " + "issue. Please fix and try again") + yield ProvenanceDataItem( + names + [self.N_PACKETS_RECEIVED_NAME], n_received) + yield ProvenanceDataItem( + names + [self.N_PACKETS_PROCESSED_NAME], + n_processed, (n_received != n_processed), + f"The delay extension {label} only processed {n_processed} of " + f"{n_received} received packets. This could indicate a fault.") + yield ProvenanceDataItem( + names + [self.MISMATCH_ADDED_FROM_PROCESSED_NAME], + n_added, (n_added != n_processed), + f"The delay extension {label} only added {n_added} of " + f"{n_processed} processed packets. This could indicate a " + "routing or filtering fault") + yield ProvenanceDataItem( + names + [self.N_PACKETS_SENT_NAME], n_sent) + yield ProvenanceDataItem( + names + [self.INPUT_BUFFER_LOST_NAME], + n_overflows, (n_overflows > 0), + f"The input buffer for {label} lost packets on {n_overflows} " + "occasions. This is often a sign that the system is running " + "too quickly for the number of neurons per core. Please " + "increase the timer_tic or time_scale_factor or decrease the " + "number of neurons per core.") + yield ProvenanceDataItem( + names + [self.DELAYED_FOR_TRAFFIC_NAME], n_delays) + yield self._app_vertex.get_tdma_provenance_item( + names, label, n_tdma_behind) + late_message = ( - self.N_LATE_SPIKES_MESSAGE_DROP - if self._app_vertex.drop_late_spikes - else self.N_LATE_SPIKES_MESSAGE_NO_DROP) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.N_LATE_SPIKES_NAME), - n_late_packets, report=n_late_packets > 0, - message=late_message.format(n_late_packets, label, x, y, p))) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.BACKGROUND_MAX_QUEUED_NAME), - max_background_queued, report=max_background_queued > 1, - message=( - "A maximum of {} background tasks were queued on {} on" - " {}, {}, {}. Try increasing the time_scale_factor located" - " within the .spynnaker.cfg file or in the pynn.setup()" - " method.".format(max_background_queued, label, x, y, p)))) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.BACKGROUND_OVERLOADS_NAME), - n_background_overloads, report=n_background_overloads > 0, - message=( - "On {} on {}, {}, {}, the background queue overloaded {}" - " times. Try increasing the time_scale_factor located within" - " the .spynnaker.cfg file or in the pynn.setup() method." - .format(label, x, y, p, n_background_overloads)))) - return provenance_items + f"On {label}, {n_late_spikes} packets were dropped from the " + "input buffer, because they arrived too late to be processed in " + "a given time step. Try increasing the time_scale_factor located " + "within the .spynnaker.cfg file or in the pynn.setup() method." + if self._app_vertex.drop_late_spikes else + f"On {label}, {n_late_spikes} packets arrived too late to be " + "processed in a given time step. Try increasing the " + "time_scale_factor located within the .spynnaker.cfg file or in " + "the pynn.setup() method.") + yield ProvenanceDataItem( + names + [self.N_LATE_SPIKES_NAME], + n_late_spikes, report=(n_late_spikes > 0), + message=late_message) + + yield ProvenanceDataItem( + names + [self.BACKGROUND_MAX_QUEUED_NAME], + max_bg, (max_bg > 1), + f"On {label}, a maximum of {max_bg} background tasks were queued. " + "Try increasing the time_scale_factor located within the " + ".spynnaker.cfg file or in the pynn.setup() method.") + yield ProvenanceDataItem( + names + [self.BACKGROUND_OVERLOADS_NAME], + n_bg_overloads, (n_bg_overloads > 0), + f"On {label}, the background queue overloaded {n_bg_overloads} " + "times. Try increasing the time_scale_factor located within the " + ".spynnaker.cfg file or in the pynn.setup() method.") @overrides(MachineVertex.get_n_keys_for_partition) def get_n_keys_for_partition(self, _partition): diff --git a/spynnaker_integration_tests/test_iobuf/test_iobuf_without_during_run_flag/spynnaker.cfg b/spynnaker_integration_tests/test_iobuf/test_iobuf_without_during_run_flag/spynnaker.cfg index e7d0832df3..f592e58cb5 100644 --- a/spynnaker_integration_tests/test_iobuf/test_iobuf_without_during_run_flag/spynnaker.cfg +++ b/spynnaker_integration_tests/test_iobuf/test_iobuf_without_during_run_flag/spynnaker.cfg @@ -6,5 +6,4 @@ extract_iobuf_from_cores = ALL extract_iobuf_from_binary_types = None generate_bit_field_summary_report = False -read_router_compressor_with_bitfield_iobuf = False generate_router_compression_with_bitfield_report = False diff --git a/unittests/model_tests/neuron/test_neural_parameter.py b/unittests/model_tests/neuron/test_neural_parameter.py index 62c8e05ff3..d7398248c7 100644 --- a/unittests/model_tests/neuron/test_neural_parameter.py +++ b/unittests/model_tests/neuron/test_neural_parameter.py @@ -74,8 +74,7 @@ def test_range_list(): def _generator(size): - for i in range(size): - yield i + yield from range(size) def range_list_as_list(spec): diff --git a/unittests/test_sata_connectors/spynnaker.cfg b/unittests/test_sata_connectors/spynnaker.cfg index 656a5db749..d4d16052ba 100644 --- a/unittests/test_sata_connectors/spynnaker.cfg +++ b/unittests/test_sata_connectors/spynnaker.cfg @@ -11,11 +11,8 @@ spalloc_machine = None virtual_board = True -requires_wrap_arounds = None - down_cores = None down_chips = None down_links = 2,5,2:2,5,3:6,1,2:6,1,3:10,9,2:10,9,3 -core_limit = None width = 12 height = 12