From a85b7b8a31336f4c5077c68c6da748631db9adcf Mon Sep 17 00:00:00 2001 From: Donal Fellows Date: Mon, 26 Oct 2020 16:15:03 +0000 Subject: [PATCH 1/8] Use the new provenance reporting API where possible Alas, PopulationMachineVertex can't do it; it wants to examine the standard provenance info as well as the PMV-specific fields. See SpiNNakerManchester/SpiNNFrontEndCommon#681 --- .../munich_spinnaker_link_motor_device.py | 23 ++---- .../neuron/population_machine_vertex.py | 1 + .../spike_source_poisson_machine_vertex.py | 21 ++--- .../delays/delay_extension_machine_vertex.py | 78 +++++++------------ 4 files changed, 40 insertions(+), 83 deletions(-) diff --git a/spynnaker/pyNN/external_devices_models/munich_spinnaker_link_motor_device.py b/spynnaker/pyNN/external_devices_models/munich_spinnaker_link_motor_device.py index fab6762090..8fc661f0dc 100644 --- a/spynnaker/pyNN/external_devices_models/munich_spinnaker_link_motor_device.py +++ b/spynnaker/pyNN/external_devices_models/munich_spinnaker_link_motor_device.py @@ -128,29 +128,18 @@ def _n_additional_data_items(self): return self.PROVENANCE_ELEMENTS @overrides(ProvidesProvenanceDataFromMachineImpl. - get_provenance_data_from_machine) - def get_provenance_data_from_machine(self, transceiver, placement): - # get prov data - provenance_data = self._read_provenance_data(transceiver, placement) - # get system level prov - provenance_items = self._read_basic_provenance_items( - provenance_data, placement) - # get left over prov - provenance_data = self._get_remaining_provenance_data_items( - provenance_data) - # stuff for making prov data items - label, x, y, p, names = self._get_placement_details(placement) - - # get the only app level prov item + _get_extra_provenance_items) + def _get_extra_provenance_items( + self, label, location, names, provenance_data): n_buffer_overflows = provenance_data[0] + x, y, p = location # build it - provenance_items.append(ProvenanceDataItem( + yield ProvenanceDataItem( self._add_name(names, self.INPUT_BUFFER_FULL_NAME), n_buffer_overflows, report=n_buffer_overflows > 0, message=self.INPUT_BUFFER_FULL_MESSAGE.format( - label, x, y, p, n_buffer_overflows))) - return provenance_items + label, x, y, p, n_buffer_overflows)) @property @overrides(ApplicationVertex.n_atoms) diff --git a/spynnaker/pyNN/models/neuron/population_machine_vertex.py b/spynnaker/pyNN/models/neuron/population_machine_vertex.py index c627cce558..4c3fce3eca 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_vertex.py +++ b/spynnaker/pyNN/models/neuron/population_machine_vertex.py @@ -232,6 +232,7 @@ def get_provenance_data_from_machine(self, transceiver, placement): provenance_data = self._get_remaining_provenance_data_items( provenance_data) + # This is why we have to override the public method times_timer_tic_overran = 0 for item in provenance_items: if item.names[-1] == self._TIMER_TICK_OVERRUN: diff --git a/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py b/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py index b7e1db0316..120148ac59 100644 --- a/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py +++ b/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py @@ -133,24 +133,15 @@ def get_profile_data(self, transceiver, placement): self.PROFILE_TAG_LABELS, transceiver, placement) @overrides(ProvidesProvenanceDataFromMachineImpl. - get_provenance_data_from_machine) - def get_provenance_data_from_machine(self, transceiver, placement): - # pylint: disable=too-many-locals - provenance_data = self._read_provenance_data(transceiver, placement) - provenance_items = self._read_basic_provenance_items( - provenance_data, placement) - provenance_data = self._get_remaining_provenance_data_items( - provenance_data) - + _get_extra_provenance_items) + def _get_extra_provenance_items( + self, label, location, names, provenance_data): n_times_tdma_fell_behind = provenance_data[ self.EXTRA_PROVENANCE_DATA_ENTRIES.TDMA_MISSED_SLOTS.value] + x, y, p = location - label, x, y, p, names = self._get_placement_details(placement) - - provenance_items.append( - self._app_vertex.get_tdma_provenance_item( - names, x, y, p, n_times_tdma_fell_behind)) - return provenance_items + yield self._app_vertex.get_tdma_provenance_item( + names, x, y, p, n_times_tdma_fell_behind) @overrides(AbstractHasAssociatedBinary.get_binary_file_name) def get_binary_file_name(self): diff --git a/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py b/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py index 702a273dc1..959318718e 100644 --- a/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py +++ b/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py @@ -87,75 +87,51 @@ def resources_required(self): return self.__resources @overrides(ProvidesProvenanceDataFromMachineImpl. - get_provenance_data_from_machine) - def get_provenance_data_from_machine(self, transceiver, placement): - # pylint: disable=too-many-locals - provenance_data = self._read_provenance_data(transceiver, placement) - provenance_items = self._read_basic_provenance_items( - provenance_data, placement) - provenance_data = self._get_remaining_provenance_data_items( - provenance_data) - - n_packets_received = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_PACKETS_RECEIVED.value] - n_packets_processed = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_PACKETS_PROCESSED.value] - n_packets_added = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_PACKETS_ADDED.value] - n_packets_sent = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_PACKETS_SENT.value] - n_buffer_overflows = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_BUFFER_OVERFLOWS.value] - n_delays = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_DELAYS.value] - n_times_tdma_fell_behind = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_TIMES_TDMA_FELL_BEHIND.value] - - label, x, y, p, names = self._get_placement_details(placement) + _get_extra_provenance_items) + def _get_extra_provenance_items( + self, label, location, names, provenance_data): + (n_received, n_processed, n_added, n_sent, n_overflows, n_delays, + n_times_tdma_fell_behind) = provenance_data + + x, y, p = location # translate into provenance data items - provenance_items.append(ProvenanceDataItem( - self._add_name(names, "Number_of_packets_received"), - n_packets_received)) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, "Number_of_packets_processed"), - n_packets_processed, - report=n_packets_received != n_packets_processed, + yield ProvenanceDataItem( + self._add_name(names, "Number_of_packets_received"), n_received) + yield ProvenanceDataItem( + self._add_name(names, "Number_of_packets_processed"), n_processed, + report=(n_received != n_processed), message=( "The delay extension {} on {}, {}, {} only processed {} of {}" " received packets. This could indicate a fault.".format( - label, x, y, p, n_packets_processed, n_packets_received)))) - provenance_items.append(ProvenanceDataItem( + label, x, y, p, n_processed, n_received))) + yield ProvenanceDataItem( self._add_name(names, "Number_of_packets_added_to_delay_slot"), - n_packets_added, - report=n_packets_added != n_packets_processed, + n_added, + report=(n_added != n_processed), message=( "The delay extension {} on {}, {}, {} only added {} of {}" " processed packets. This could indicate a routing or" " filtering fault".format( - label, x, y, p, n_packets_added, n_packets_processed)))) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, "Number_of_packets_sent"), - n_packets_sent)) - provenance_items.append(ProvenanceDataItem( + label, x, y, p, n_added, n_processed))) + yield ProvenanceDataItem( + self._add_name(names, "Number_of_packets_sent"), n_sent) + yield ProvenanceDataItem( self._add_name(names, "Times_the_input_buffer_lost_packets"), - n_buffer_overflows, - report=n_buffer_overflows > 0, + n_overflows, + report=(n_overflows > 0), message=( "The input buffer for {} on {}, {}, {} lost packets on {} " "occasions. This is often a sign that the system is running " "too quickly for the number of neurons per core. Please " "increase the timer_tic or time_scale_factor or decrease the " "number of neurons per core.".format( - label, x, y, p, n_buffer_overflows)))) - provenance_items.append(ProvenanceDataItem( + label, x, y, p, n_overflows))) + yield ProvenanceDataItem( self._add_name(names, "Number_of_times_delayed_to_spread_traffic"), - n_delays)) - provenance_items.append( - self._app_vertex.get_tdma_provenance_item( - names, x, y, p, n_times_tdma_fell_behind)) - - return provenance_items + n_delays) + yield self._app_vertex.get_tdma_provenance_item( + names, x, y, p, n_times_tdma_fell_behind) @overrides(MachineVertex.get_n_keys_for_partition) def get_n_keys_for_partition(self, _partition): From 64afb08220873c81476e85d839e735e22f3fb37e Mon Sep 17 00:00:00 2001 From: Donal Fellows Date: Tue, 27 Oct 2020 12:14:22 +0000 Subject: [PATCH 2/8] Improving provenance collection for PMV --- .../neuron/abstract_population_vertex.py | 2 +- .../neuron/population_machine_vertex.py | 199 ++++++++---------- 2 files changed, 90 insertions(+), 111 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index 4b85f1767f..969a94adaa 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -290,7 +290,7 @@ def _get_sdram_usage_for_atoms(self, vertex_slice, graph): self._get_sdram_usage_for_neuron_params(vertex_slice) + self._neuron_recorder.get_static_sdram_usage(vertex_slice) + PopulationMachineVertex.get_provenance_data_size( - len(PopulationMachineVertex.EXTRA_PROVENANCE_DATA_ENTRIES)) + + PopulationMachineVertex.N_ADDITIONAL_PROVENANCE_DATA_ITEMS) + self.__synapse_manager.get_sdram_usage_in_bytes( vertex_slice, graph, self) + profile_utils.get_profile_region_size( diff --git a/spynnaker/pyNN/models/neuron/population_machine_vertex.py b/spynnaker/pyNN/models/neuron/population_machine_vertex.py index 4c3fce3eca..8003db6c53 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_vertex.py +++ b/spynnaker/pyNN/models/neuron/population_machine_vertex.py @@ -13,7 +13,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from enum import Enum +from enum import IntEnum from spinn_utilities.overrides import overrides from pacman.model.graphs.machine import MachineVertex from spinn_front_end_common.utilities.utility_objs import ProvenanceDataItem @@ -50,7 +50,7 @@ class PopulationMachineVertex( "__on_chip_generatable_size", "__drop_late_spikes"] - class EXTRA_PROVENANCE_DATA_ENTRIES(Enum): + class EXTRA_PROVENANCE_DATA_ENTRIES(IntEnum): """ Entries for the provenance data generated by standard neuron \ models. """ @@ -72,24 +72,24 @@ class EXTRA_PROVENANCE_DATA_ENTRIES(Enum): INVALID_MASTER_POP_HITS = 9 BIT_FIELD_FILTERED_COUNT = 10 N_REWIRES = 11 - # the number of packets that were dropped as they arrived too late - # to be processed + #: The number of packets that were dropped as they arrived too late + #: to be processed N_LATE_SPIKES = 12 - # the max filled size of the input buffer + #: The max filled size of the input buffer INPUT_BUFFER_FILLED_SIZE = 13 - # the number of tdma misses + #: The number of TDMA misses TDMA_MISSES = 14 SATURATION_COUNT_NAME = "Times_synaptic_weights_have_saturated" SATURATION_COUNT_MESSAGE = ( - "The weights from the synapses for {} on {}, {}, {} saturated " + "The weights from the synapses for {} on {}:{}:{} saturated " "{} times. If this causes issues you can increase the " "spikes_per_second and / or ring_buffer_sigma " "values located within the .spynnaker.cfg file.") INPUT_BUFFER_FULL_NAME = "Times_the_input_buffer_lost_packets" INPUT_BUFFER_FULL_MESSAGE = ( - "The input buffer for {} on {}, {}, {} lost packets on {} " + "The input buffer for {} on {}:{}:{} lost packets on {} " "occasions. This is often a sign that the system is running " "too quickly for the number of neurons per core. Please " "increase the timer_tic or time_scale_factor or decrease the " @@ -102,23 +102,45 @@ class EXTRA_PROVENANCE_DATA_ENTRIES(Enum): SATURATED_PLASTIC_WEIGHTS_NAME = ( "Times_plastic_synaptic_weights_have_saturated") SATURATED_PLASTIC_WEIGHTS_MESSAGE = ( - "The weights from the plastic synapses for {} on {}, {}, {} " + "The weights from the plastic synapses for {} on {}:{}:{} " "saturated {} times. If this causes issue increase the " "spikes_per_second and / or ring_buffer_sigma values located " "within the .spynnaker.cfg file.") _N_LATE_SPIKES_NAME = "Number_of_late_spikes" _N_LATE_SPIKES_MESSAGE_DROP = ( - "{} packets from {} on {}, {}, {} were dropped from the input buffer, " + "{} packets from {} on {}:{}:{} were dropped from the input buffer, " "because they arrived too late to be processed in a given time step. " "Try increasing the time_scale_factor located within the " ".spynnaker.cfg file or in the pynn.setup() method.") _N_LATE_SPIKES_MESSAGE_NO_DROP = ( - "{} packets from {} on {}, {}, {} arrived too late to be processed in" + "{} packets from {} on {}:{}:{} arrived too late to be processed in" " a given time step. " "Try increasing the time_scale_factor located within the " ".spynnaker.cfg file or in the pynn.setup() method.") + _GHOST_SEARCH_MESSAGE = ( + "The number of failed population table searches for {} on {}:{}:{} " + "was {}. If this number is large relative to the predicted incoming " + "spike rate, try increasing source and target neurons per core") + _INVALID_POP_MESSAGE = ( + "There were {} keys which were received by core {}:{}:{} which had no " + "master pop entry for it. This is a error, which most likely stems " + "from bad routing.") + _FILTERED_PACKETS_MESSAGE = ( + "There were {} packets received by {}:{}:{} that were filtered by " + "the Bitfield filterer on the core. These packets were having to be " + "stored and processed on core, which means the core may not be " + "running as efficiently as it could. Please adjust the network or " + "the mapping so that these packets are filtered in the router to " + "improve performance.") + _BITFIELDS_UNREAD_MESSAGE = ( + "The filter for stopping redundant DMA's couldn't be fully filled in; " + "it failed to read {} entries, which means it required a max of {} " + "extra bytes of DTCM (assuming cores have at most 255 neurons). Try " + "reducing neurons per core, or size of buffers, or neuron params per " + "neuron, etc.") + _MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME = "Max_filled_size_input_buffer" _PROFILE_TAG_LABELS = { @@ -217,7 +239,7 @@ def _provenance_region_id(self): @property @overrides(ProvidesProvenanceDataFromMachineImpl._n_additional_data_items) def _n_additional_data_items(self): - return len(PopulationMachineVertex.EXTRA_PROVENANCE_DATA_ENTRIES) + return len(self.EXTRA_PROVENANCE_DATA_ENTRIES) @overrides(AbstractRecordable.is_recording) def is_recording(self): @@ -229,138 +251,95 @@ def get_provenance_data_from_machine(self, transceiver, placement): provenance_data = self._read_provenance_data(transceiver, placement) provenance_items = self._read_basic_provenance_items( provenance_data, placement) - provenance_data = self._get_remaining_provenance_data_items( - provenance_data) - # This is why we have to override the public method + # This is why we have to override the superclass public method times_timer_tic_overran = 0 for item in provenance_items: if item.names[-1] == self._TIMER_TICK_OVERRUN: times_timer_tic_overran = item.value - n_saturations = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.SATURATION_COUNT.value] - n_buffer_overflows = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.BUFFER_OVERFLOW_COUNT.value] - n_pre_synaptic_events = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.PRE_SYNAPTIC_EVENT_COUNT.value] - last_timer_tick = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.CURRENT_TIMER_TIC.value] - n_plastic_saturations = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES. - PLASTIC_SYNAPTIC_WEIGHT_SATURATION_COUNT.value] - n_ghost_searches = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.GHOST_POP_TABLE_SEARCHES.value] - failed_to_read_bit_fields = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.FAILED_TO_READ_BIT_FIELDS.value] - dma_completes = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.DMA_COMPLETES.value] - spike_processing_count = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.SPIKE_PROGRESSING_COUNT.value] - invalid_master_pop_hits = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.INVALID_MASTER_POP_HITS.value] - n_packets_filtered_by_bit_field_filter = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.BIT_FIELD_FILTERED_COUNT.value] - n_rewires = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_REWIRES.value] - n_late_packets = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.N_LATE_SPIKES.value] - input_buffer_max_filled_size = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.INPUT_BUFFER_FILLED_SIZE.value] - tdma_misses = provenance_data[ - self.EXTRA_PROVENANCE_DATA_ENTRIES.TDMA_MISSES.value] + # translate into provenance data items + provenance_items.extend(self.__local_prov_items( + placement, provenance_data, times_timer_tic_overran)) + return provenance_items + def __local_prov_items(self, placement, provenance_data, tic_overruns): + """ + :param ~.Placement placement: + :param list(int) provenance_data: + :param int tic_overruns: + :rtype: iterable(ProvenanceDataItem) + """ label, x, y, p, names = self._get_placement_details(placement) + (n_pre_synaptic_events, n_saturations, n_buffer_overflows, + last_timer_tick, n_plastic_saturations, n_ghost_searches, + failed_to_read_bit_fields, dma_completes, spike_processing_count, + invalid_master_pop_hits, n_packets_filtered, n_rewires, + n_late_packets, input_buffer_max_filled_size, tdma_misses) = \ + self._get_remaining_provenance_data_items(provenance_data) # translate into provenance data items - provenance_items.append(ProvenanceDataItem( + yield ProvenanceDataItem( self._add_name(names, self.SATURATION_COUNT_NAME), n_saturations, report=n_saturations > 0, message=self.SATURATION_COUNT_MESSAGE.format( - label, x, y, p, n_saturations))) - provenance_items.append(ProvenanceDataItem( + label, x, y, p, n_saturations)) + yield ProvenanceDataItem( self._add_name(names, self.INPUT_BUFFER_FULL_NAME), n_buffer_overflows, report=n_buffer_overflows > 0, message=self.INPUT_BUFFER_FULL_MESSAGE.format( - label, x, y, p, n_buffer_overflows))) - provenance_items.append(ProvenanceDataItem( + label, x, y, p, n_buffer_overflows)) + yield ProvenanceDataItem( self._add_name(names, self.TOTAL_PRE_SYNAPTIC_EVENT_NAME), - n_pre_synaptic_events)) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.LAST_TIMER_TICK_NAME), - last_timer_tick)) - provenance_items.append(ProvenanceDataItem( + n_pre_synaptic_events) + yield ProvenanceDataItem( + self._add_name(names, self.LAST_TIMER_TICK_NAME), last_timer_tick) + yield ProvenanceDataItem( self._add_name(names, self.SATURATED_PLASTIC_WEIGHTS_NAME), n_plastic_saturations, report=n_plastic_saturations > 0, message=self.SATURATED_PLASTIC_WEIGHTS_MESSAGE.format( - label, x, y, p, n_plastic_saturations))) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.N_RE_WIRES_NAME), n_rewires)) - provenance_items.append(ProvenanceDataItem( + label, x, y, p, n_plastic_saturations)) + yield ProvenanceDataItem( + self._add_name(names, self.N_RE_WIRES_NAME), n_rewires) + yield ProvenanceDataItem( self._add_name(names, self.GHOST_SEARCHES), n_ghost_searches, report=n_ghost_searches > 0, - message=( - "The number of failed population table searches for {} on {}," - " {}, {} was {}. If this number is large relative to the " - "predicted incoming spike rate, try increasing source and " - "target neurons per core".format( - label, x, y, p, n_ghost_searches)))) - provenance_items.append(ProvenanceDataItem( + message=self._GHOST_SEARCH_MESSAGE.format( + label, x, y, p, n_ghost_searches)) + yield ProvenanceDataItem( self._add_name(names, self.BIT_FIELDS_NOT_READ), failed_to_read_bit_fields, report=False, - message=( - "The filter for stopping redundant DMA's couldn't be fully " - "filled in, it failed to read {} entries, which means it " - "required a max of {} extra bytes of DTCM (assuming cores " - "have at max 255 neurons. Try reducing neurons per core, or " - "size of buffers, or neuron params per neuron etc.".format( - failed_to_read_bit_fields, - failed_to_read_bit_fields * - self.WORDS_TO_COVER_256_ATOMS)))) - provenance_items.append(ProvenanceDataItem( - self._add_name(names, self.DMA_COMPLETE), dma_completes)) - provenance_items.append(ProvenanceDataItem( + message=self._BITFIELDS_UNREAD_MESSAGE.format( + failed_to_read_bit_fields, + failed_to_read_bit_fields * self.WORDS_TO_COVER_256_ATOMS)) + yield ProvenanceDataItem( + self._add_name(names, self.DMA_COMPLETE), dma_completes) + yield ProvenanceDataItem( self._add_name(names, self.SPIKES_PROCESSED), - spike_processing_count)) - provenance_items.append(ProvenanceDataItem( + spike_processing_count) + yield ProvenanceDataItem( self._add_name(names, self.INVALID_MASTER_POP_HITS), invalid_master_pop_hits, report=invalid_master_pop_hits > 0, - message=( - "There were {} keys which were received by core {}:{}:{} which" - " had no master pop entry for it. This is a error, which most " - "likely strives from bad routing.".format( - invalid_master_pop_hits, x, y, p)))) - provenance_items.append((ProvenanceDataItem( + message=self._INVALID_POP_MESSAGE.format( + invalid_master_pop_hits, x, y, p)) + yield ProvenanceDataItem( self._add_name(names, self.BIT_FIELD_FILTERED_PACKETS), - n_packets_filtered_by_bit_field_filter, - report=( - n_packets_filtered_by_bit_field_filter > 0 and ( - n_buffer_overflows > 0 or - times_timer_tic_overran > 0)), - message=( - "There were {} packets received by {}:{}:{} that were " - "filtered by the Bitfield filterer on the core. These packets " - "were having to be stored and processed on core, which means " - "the core may not be running as efficiently as it could. " - "Please adjust the network or the mapping so that these " - "packets are filtered in the router to improve " - "performance.".format( - n_packets_filtered_by_bit_field_filter, x, y, p))))) + n_packets_filtered, report=(n_packets_filtered > 0 and ( + n_buffer_overflows > 0 or tic_overruns > 0)), + message=self._FILTERED_PACKETS_MESSAGE.format( + n_packets_filtered, x, y, p)) late_message = ( self._N_LATE_SPIKES_MESSAGE_DROP if self.__drop_late_spikes else self._N_LATE_SPIKES_MESSAGE_NO_DROP) - provenance_items.append(ProvenanceDataItem( + yield ProvenanceDataItem( self._add_name(names, self._N_LATE_SPIKES_NAME), n_late_packets, report=n_late_packets > 0, - message=late_message.format(n_late_packets, label, x, y, p))) - - provenance_items.append(ProvenanceDataItem( + message=late_message.format(n_late_packets, label, x, y, p)) + yield ProvenanceDataItem( self._add_name(names, self._MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME), - input_buffer_max_filled_size, report=False)) - - provenance_items.append(self._app_vertex.get_tdma_provenance_item( - names, x, y, p, tdma_misses)) - return provenance_items + input_buffer_max_filled_size) + yield self._app_vertex.get_tdma_provenance_item( + names, x, y, p, tdma_misses) @overrides(AbstractReceiveBuffersToHost.get_recorded_region_ids) def get_recorded_region_ids(self): From e3f9177b504cd84335e89525b98c3fa9e6167990 Mon Sep 17 00:00:00 2001 From: Donal Fellows Date: Fri, 30 Oct 2020 17:20:23 +0000 Subject: [PATCH 3/8] Tweaking provenance generation --- .../connectors/abstract_connector.py | 6 +-- .../abstract_timing_dependence.py | 6 +-- ...timing_dependence_pfister_spike_triplet.py | 3 +- .../timing_dependence_recurrent.py | 6 ++- .../timing_dependence_spike_nearest_pair.py | 4 +- .../timing_dependence_spike_pair.py | 3 +- .../timing_dependence_vogels_2011.py | 4 +- .../abstract_weight_dependence.py | 6 +-- .../weight_dependence_additive.py | 3 +- .../weight_dependence_additive_triplet.py | 3 +- .../weight_dependence_multiplicative.py | 3 +- .../abstract_synapse_dynamics.py | 4 +- .../synapse_dynamics/synapse_dynamics_stdp.py | 38 ++++++++----------- 13 files changed, 45 insertions(+), 44 deletions(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py index 3c7bcaa2c2..5470e2e226 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py @@ -490,14 +490,14 @@ def get_provenance_data(self, synapse_info): """ :param SynapseInformation synapse_info: :rtype: - list(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem) + iterable(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem) """ name = "connector_{}_{}_{}".format( synapse_info.pre_population.label, synapse_info.post_population.label, self.__class__.__name__) # Convert to native Python integer; provenance system assumption ncd = self.__n_clipped_delays.item() - return [ProvenanceDataItem( + yield ProvenanceDataItem( [name, "Times_synaptic_delays_got_clipped"], ncd, report=ncd > 0, message=( "The delays in the connector {} from {} to {} was clipped " @@ -506,7 +506,7 @@ def get_provenance_data(self, synapse_info): "timestep".format( self.__class__.__name__, synapse_info.pre_population.label, synapse_info.post_population.label, self.__min_delay, - ncd)))] + ncd))) @property def safe(self): diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py index a11db4e352..dfd8d9e6da 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py @@ -89,8 +89,8 @@ def get_provenance_data(self, pre_population_label, post_population_label): :param str pre_population_label: label of pre. :param str post_population_label: label of post. - :rtype: \ - list(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem) + :rtype: + iterable(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem) """ # pylint: disable=unused-argument - return list() + return [] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py index 1052c4659a..5de3d48e50 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py @@ -45,6 +45,7 @@ class TimingDependencePfisterSpikeTriplet(AbstractTimingDependence): "__tau_x_data", "__tau_y", "__tau_y_data"] + __PARAM_NAMES = ('tau_plus', 'tau_minus', 'tau_x', 'tau_y') # noinspection PyPep8Naming def __init__(self, tau_plus, tau_minus, tau_x, tau_y): @@ -162,4 +163,4 @@ def synaptic_structure(self): @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): - return ['tau_plus', 'tau_minus', 'tau_x', 'tau_y'] + return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py index 1e6d1e9277..c18bcc7902 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py @@ -37,6 +37,9 @@ class TimingDependenceRecurrent(AbstractTimingDependence): "__mean_post_window", "__mean_pre_window", "__synapse_structure"] + __PARAM_NAMES = ( + 'accumulator_depression', 'accumulator_potentiation', + 'mean_pre_window', 'mean_post_window', 'dual_fsm') default_parameters = { 'accumulator_depression': -6, 'accumulator_potentiation': 6, @@ -151,5 +154,4 @@ def synaptic_structure(self): @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): - return ['accumulator_depression', 'accumulator_potentiation', - 'mean_pre_window', 'mean_post_window', 'dual_fsm'] + return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py index f58d2a826d..b16425f8da 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py @@ -37,7 +37,7 @@ class TimingDependenceSpikeNearestPair(AbstractTimingDependence): "__tau_minus_data", "__tau_plus", "__tau_plus_data"] - + __PARAM_NAMES = ('tau_plus', 'tau_minus') default_parameters = {'tau_plus': 20.0, 'tau_minus': 20.0} def __init__(self, tau_plus=default_parameters['tau_plus'], @@ -128,4 +128,4 @@ def synaptic_structure(self): @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): - return ['tau_plus', 'tau_minus'] + return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py index 40cb9ee532..e0a93e40f8 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py @@ -36,6 +36,7 @@ class TimingDependenceSpikePair(AbstractTimingDependence): "__tau_minus_data", "__tau_plus", "__tau_plus_data"] + __PARAM_NAMES = ('tau_plus', 'tau_minus') def __init__(self, tau_plus=20.0, tau_minus=20.0): r""" @@ -124,4 +125,4 @@ def synaptic_structure(self): @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): - return ['tau_plus', 'tau_minus'] + return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py index c75f8672b0..6af1a37152 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py @@ -37,7 +37,7 @@ class TimingDependenceVogels2011(AbstractTimingDependence): "__synapse_structure", "__tau", "__tau_data"] - + __PARAM_NAMES = ('alpha', 'tau') default_parameters = {'tau': 20.0} def __init__(self, alpha, tau=default_parameters['tau']): @@ -128,4 +128,4 @@ def synaptic_structure(self): @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): - return ['alpha', 'tau'] + return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py index 109ae9ce86..bf367e6ea1 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py @@ -28,11 +28,11 @@ def get_provenance_data(self, pre_population_label, post_population_label): :param str pre_population_label: label of pre. :param str post_population_label: label of post. :return: the provenance data of the weight dependency - :rtype: \ - list(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem) + :rtype: + iterable(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem) """ # pylint: disable=unused-argument - return list() + return [] @abstractmethod def get_parameter_names(self): diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py index 24099adaf4..27eaed6be9 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py @@ -30,6 +30,7 @@ class WeightDependenceAdditive( __slots__ = [ "__w_max", "__w_min"] + __PARAM_NAMES = ('w_min', 'w_max', 'A_plus', 'A_minus') # noinspection PyPep8Naming def __init__(self, w_min=0.0, w_max=1.0): @@ -116,4 +117,4 @@ def weight_maximum(self): @overrides(AbstractWeightDependence.get_parameter_names) def get_parameter_names(self): - return ['w_min', 'w_max', 'A_plus', 'A_minus'] + return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py index e2a112411b..7c9f563adc 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py @@ -31,6 +31,7 @@ class WeightDependenceAdditiveTriplet( "__a3_plus", "__w_max", "__w_min"] + __PARAM_NAMES = ('w_min', 'w_max', 'A3_plus', 'A3_minus') default_parameters = {'w_min': 0.0, 'w_max': 1.0, 'A3_plus': 0.01, 'A3_minus': 0.01} @@ -153,4 +154,4 @@ def weight_maximum(self): @overrides(AbstractWeightDependence.get_parameter_names) def get_parameter_names(self): - return ['w_min', 'w_max', 'A3_plus', 'A3_minus'] + return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py index 5b5ccce85e..69e6ebd64f 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py @@ -29,6 +29,7 @@ class WeightDependenceMultiplicative( __slots__ = [ "__w_max", "__w_min"] + __PARAM_NAMES = ('w_min', 'w_max', 'A_plus', 'A_minus') def __init__(self, w_min=0.0, w_max=1.0): """ @@ -113,4 +114,4 @@ def weight_maximum(self): @overrides(AbstractWeightDependence.get_parameter_names) def get_parameter_names(self): - return ['w_min', 'w_max', 'A_plus', 'A_minus'] + return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py index d373137f3f..4ec8a9688a 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py @@ -130,9 +130,11 @@ def get_provenance_data(self, pre_population_label, post_population_label): :param str pre_population_label: :param str post_population_label: + :rtype: + iterable(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem) """ # pylint: disable=unused-argument - return list() + return [] def get_delay_maximum(self, connector, synapse_info): """ Get the maximum delay for the synapses diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py index ee95d31ab0..d92f8d8c2f 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py @@ -20,14 +20,15 @@ from spinn_front_end_common.utilities.constants import ( BYTES_PER_WORD, BYTES_PER_SHORT) from spynnaker.pyNN.models.abstract_models import AbstractSettable +from spynnaker.pyNN.exceptions import ( + InvalidParameterType, SynapticConfigurationException) +from spynnaker.pyNN.utilities.utility_calls import get_n_bits from .abstract_plastic_synapse_dynamics import AbstractPlasticSynapseDynamics -from .abstract_synapse_dynamics_structural \ - import AbstractSynapseDynamicsStructural +from .abstract_synapse_dynamics import AbstractSynapseDynamics +from .abstract_synapse_dynamics_structural import ( + AbstractSynapseDynamicsStructural) from .abstract_generate_on_machine import ( AbstractGenerateOnMachine, MatrixGeneratorID) -from spynnaker.pyNN.exceptions import InvalidParameterType,\ - SynapticConfigurationException -from spynnaker.pyNN.utilities.utility_calls import get_n_bits # How large are the time-stamps stored with each event TIME_STAMP_BYTES = BYTES_PER_WORD @@ -473,29 +474,20 @@ def get_weight_maximum(self, connector, synapse_info): # the weight dependence return max(w_max, self.__weight_dependence.weight_maximum) + @overrides(AbstractSynapseDynamics.get_provenance_data) def get_provenance_data(self, pre_population_label, post_population_label): - """ - :param str pre_population_label: - :param str post_population_label: - :rtype: \ - list(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem) - """ - prov_data = list() - if self.__timing_dependence is not None: - prov_data.extend(self.__timing_dependence.get_provenance_data( - pre_population_label, post_population_label)) - if self.__weight_dependence is not None: - prov_data.extend(self.__weight_dependence.get_provenance_data( - pre_population_label, post_population_label)) - return prov_data + for item in self.__timing_dependence.get_provenance_data( + pre_population_label, post_population_label): + yield item + for item in self.__weight_dependence.get_provenance_data( + pre_population_label, post_population_label): + yield item @overrides(AbstractPlasticSynapseDynamics.get_parameter_names) def get_parameter_names(self): names = ['weight', 'delay'] - if self.__timing_dependence is not None: - names.extend(self.__timing_dependence.get_parameter_names()) - if self.__weight_dependence is not None: - names.extend(self.__weight_dependence.get_parameter_names()) + names.extend(self.__timing_dependence.get_parameter_names()) + names.extend(self.__weight_dependence.get_parameter_names()) return names @overrides(AbstractPlasticSynapseDynamics.get_max_synapses) From d2daf2ab3ca7e1356559aeb5569260d5be0059e9 Mon Sep 17 00:00:00 2001 From: Donal Fellows Date: Thu, 29 Apr 2021 16:42:12 +0100 Subject: [PATCH 4/8] Standardise the human-readable label for provenance purposes --- .../machine_munich_motor_device.py | 18 +- .../neuron/population_machine_vertex.py | 171 +++++++----------- .../spike_source_poisson_machine_vertex.py | 6 +- .../delays/delay_extension_machine_vertex.py | 139 +++++--------- 4 files changed, 125 insertions(+), 209 deletions(-) diff --git a/spynnaker/pyNN/external_devices_models/machine_munich_motor_device.py b/spynnaker/pyNN/external_devices_models/machine_munich_motor_device.py index 53c9b8ffe0..7244db2962 100644 --- a/spynnaker/pyNN/external_devices_models/machine_munich_motor_device.py +++ b/spynnaker/pyNN/external_devices_models/machine_munich_motor_device.py @@ -68,12 +68,6 @@ class MachineMunichMotorDevice( #: The name of the provenance item saying that packets were lost. INPUT_BUFFER_FULL_NAME = "Times_the_input_buffer_lost_packets" - _INPUT_BUFFER_FULL_MESSAGE = ( - "The input buffer for {} on {}, {}, {} lost packets on {} " - "occasions. This is often a sign that the system is running " - "too quickly for the number of neurons per core. Please " - "increase the timer_tic or time_scale_factor or decrease the " - "number of neurons per core.") def __init__( self, speed, sample_time, update_time, delay_time, @@ -130,15 +124,17 @@ def _n_additional_data_items(self): @overrides( ProvidesProvenanceDataFromMachineImpl._get_extra_provenance_items) def _get_extra_provenance_items( - self, label, location, names, provenance_data): + self, label, names, provenance_data): n_buffer_overflows, = provenance_data - x, y, p = location yield ProvenanceDataItem( names + [self.INPUT_BUFFER_FULL_NAME], n_buffer_overflows, - report=(n_buffer_overflows > 0), - message=self._INPUT_BUFFER_FULL_MESSAGE.format( - label, x, y, p, n_buffer_overflows)) + (n_buffer_overflows > 0), + f"The input buffer for {label} lost packets on " + f"{n_buffer_overflows} occasions. " + "This is often a sign that the system is running too quickly for " + "the number of neurons per core. Please increase the timer_tic " + "or time_scale_factor or decrease the number of neurons per core.") @inject_items({ "routing_info": "MemoryRoutingInfos", diff --git a/spynnaker/pyNN/models/neuron/population_machine_vertex.py b/spynnaker/pyNN/models/neuron/population_machine_vertex.py index fb29f62f53..d5c0d2063e 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_vertex.py +++ b/spynnaker/pyNN/models/neuron/population_machine_vertex.py @@ -96,74 +96,13 @@ class EXTRA_PROVENANCE_DATA_ENTRIES(IntEnum): N_BACKGROUND_OVERLOADS = 16 SATURATION_COUNT_NAME = "Times_synaptic_weights_have_saturated" - _SATURATION_COUNT_MESSAGE = ( - "The weights from the synapses for {} on {}:{}:{} saturated " - "{} times. If this causes issues you can increase the " - "spikes_per_second and / or ring_buffer_sigma " - "values located within the .spynnaker.cfg file.") - INPUT_BUFFER_FULL_NAME = "Times_the_input_buffer_lost_packets" - _INPUT_BUFFER_FULL_MESSAGE = ( - "The input buffer for {} on {}:{}:{} lost packets on {} " - "occasions. This is often a sign that the system is running " - "too quickly for the number of neurons per core. Please " - "increase the timer_tic or time_scale_factor or decrease the " - "number of neurons per core.") - TOTAL_PRE_SYNAPTIC_EVENT_NAME = "Total_pre_synaptic_events" LAST_TIMER_TICK_NAME = "Last_timer_tic_the_core_ran_to" N_RE_WIRES_NAME = "Number_of_rewires" - SATURATED_PLASTIC_WEIGHTS_NAME = ( "Times_plastic_synaptic_weights_have_saturated") - _SATURATED_PLASTIC_WEIGHTS_MESSAGE = ( - "The weights from the plastic synapses for {} on {}:{}:{} " - "saturated {} times. If this causes issue increase the " - "spikes_per_second and / or ring_buffer_sigma values located " - "within the .spynnaker.cfg file.") - _N_LATE_SPIKES_NAME = "Number_of_late_spikes" - _N_LATE_SPIKES_MESSAGE_DROP = ( - "{} packets from {} on {}:{}:{} were dropped from the input buffer, " - "because they arrived too late to be processed in a given time step. " - "Try increasing the time_scale_factor located within the " - ".spynnaker.cfg file or in the pynn.setup() method.") - _N_LATE_SPIKES_MESSAGE_NO_DROP = ( - "{} packets from {} on {}:{}:{} arrived too late to be processed in" - " a given time step. " - "Try increasing the time_scale_factor located within the " - ".spynnaker.cfg file or in the pynn.setup() method.") - - _GHOST_SEARCH_MESSAGE = ( - "The number of failed population table searches for {} on {}:{}:{} " - "was {}. If this number is large relative to the predicted incoming " - "spike rate, try increasing source and target neurons per core") - _INVALID_POP_MESSAGE = ( - "There were {} keys which were received by core {}:{}:{} which had no " - "master pop entry for it. This is a error, which most likely stems " - "from bad routing.") - _FILTERED_PACKETS_MESSAGE = ( - "There were {} packets received by {}:{}:{} that were filtered by " - "the Bitfield filterer on the core. These packets were having to be " - "stored and processed on core, which means the core may not be " - "running as efficiently as it could. Please adjust the network or " - "the mapping so that these packets are filtered in the router to " - "improve performance.") - _BITFIELDS_UNREAD_MESSAGE = ( - "The filter for stopping redundant DMA's couldn't be fully filled in; " - "it failed to read {} entries, which means it required a max of {} " - "extra bytes of DTCM (assuming cores have at most 255 neurons). Try " - "reducing neurons per core, or size of buffers, or neuron params per " - "neuron, etc.") - _BACKGROUND_OVERLOADS_MESSAGE = ( - "On {} on {}, {}, {}, the background queue overloaded {} times. " - "Try increasing the time_scale_factor located within the " - ".spynnaker.cfg file or in the pynn.setup() method.") - _BACKGROUND_MAX_QUEUED_MESSAGE = ( - "A maximum of {} background tasks were queued on {} on {}, {}, {}. " - "Try increasing the time_scale_factor located within the " - ".spynnaker.cfg file or in the pynn.setup() method.") - _MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME = "Max_filled_size_input_buffer" _BACKGROUND_OVERLOADS_NAME = "Times_the_background_queue_overloaded" _BACKGROUND_MAX_QUEUED_NAME = "Max_backgrounds_queued" @@ -179,8 +118,8 @@ class EXTRA_PROVENANCE_DATA_ENTRIES(IntEnum): _WORDS_TO_COVER_256_ATOMS = 8 # provenance data items - BIT_FIELD_FILTERED_PACKETS = \ - "How many packets were filtered by the bitfield filterer." + BIT_FIELD_FILTERED_PACKETS = ( + "How many packets were filtered by the bitfield filterer.") INVALID_MASTER_POP_HITS = "Invalid Master Pop hits" SPIKES_PROCESSED = "how many spikes were processed" DMA_COMPLETE = "DMA's that were completed" @@ -269,18 +208,18 @@ def _n_additional_data_items(self): get_provenance_data_from_machine) def get_provenance_data_from_machine(self, transceiver, placement): provenance_data = self._read_provenance_data(transceiver, placement) - provenance_items = self._read_basic_provenance_items( - provenance_data, placement) + provenance_items = list(self._read_basic_provenance_items( + provenance_data, placement)) # This is why we have to override the superclass public method - times_timer_tic_overran = 0 + tic_overruns = 0 for item in provenance_items: if item.names[-1] == self._TIMER_TICK_OVERRUN: - times_timer_tic_overran = item.value + tic_overruns = item.value # translate into provenance data items provenance_items.extend(self.__local_prov_items( - placement, provenance_data, times_timer_tic_overran)) + placement, provenance_data, tic_overruns)) return provenance_items def __local_prov_items(self, placement, provenance_data, tic_overruns): @@ -290,7 +229,7 @@ def __local_prov_items(self, placement, provenance_data, tic_overruns): :param int tic_overruns: :rtype: iterable(ProvenanceDataItem) """ - label, x, y, p, names = self._get_placement_details(placement) + label, names = self._get_placement_details(placement) (n_pre_synaptic_events, n_saturations, n_buffer_overflows, last_timer_tick, n_plastic_saturations, n_ghost_searches, failed_to_read_bit_fields, dma_completes, spike_processing_count, @@ -302,14 +241,19 @@ def __local_prov_items(self, placement, provenance_data, tic_overruns): # translate into provenance data items yield ProvenanceDataItem( self._add_name(names, self.SATURATION_COUNT_NAME), - n_saturations, report=(n_saturations > 0), - message=self._SATURATION_COUNT_MESSAGE.format( - label, x, y, p, n_saturations)) + n_saturations, (n_saturations > 0), + f"The weights from the synapses for {label} saturated " + f"{n_saturations} times. If this causes issues you can increase " + "the spikes_per_second and / or ring_buffer_sigma values located " + "within the .spynnaker.cfg file.") yield ProvenanceDataItem( self._add_name(names, self.INPUT_BUFFER_FULL_NAME), - n_buffer_overflows, report=(n_buffer_overflows > 0), - message=self._INPUT_BUFFER_FULL_MESSAGE.format( - label, x, y, p, n_buffer_overflows)) + n_buffer_overflows, (n_buffer_overflows > 0), + f"The input buffer for {label} lost packets on " + f"{n_buffer_overflows} occasions. This is often a sign that the " + "system is running too quickly for the number of neurons per " + "core. Please increase the timer_tic or time_scale_factor or " + "decrease the number of neurons per core.") yield ProvenanceDataItem( self._add_name(names, self.TOTAL_PRE_SYNAPTIC_EVENT_NAME), n_pre_synaptic_events) @@ -317,22 +261,30 @@ def __local_prov_items(self, placement, provenance_data, tic_overruns): self._add_name(names, self.LAST_TIMER_TICK_NAME), last_timer_tick) yield ProvenanceDataItem( self._add_name(names, self.SATURATED_PLASTIC_WEIGHTS_NAME), - n_plastic_saturations, report=(n_plastic_saturations > 0), - message=self._SATURATED_PLASTIC_WEIGHTS_MESSAGE.format( - label, x, y, p, n_plastic_saturations)) + n_plastic_saturations, (n_plastic_saturations > 0), + f"The weights from the plastic synapses for {label} saturated " + f"{n_plastic_saturations} times. If this causes issue increase " + "the spikes_per_second and / or ring_buffer_sigma values located " + "within the .spynnaker.cfg file.") yield ProvenanceDataItem( self._add_name(names, self.N_RE_WIRES_NAME), n_rewires) yield ProvenanceDataItem( self._add_name(names, self.GHOST_SEARCHES), n_ghost_searches, - report=(n_ghost_searches > 0), - message=self._GHOST_SEARCH_MESSAGE.format( - label, x, y, p, n_ghost_searches)) + (n_ghost_searches > 0), + f"The number of failed population table searches for {label} was " + f"{n_ghost_searches}. If this number is large relative to the " + "predicted incoming spike rate, try increasing source and target " + "neurons per core") yield ProvenanceDataItem( self._add_name(names, self.BIT_FIELDS_NOT_READ), - failed_to_read_bit_fields, report=False, - message=self._BITFIELDS_UNREAD_MESSAGE.format( - failed_to_read_bit_fields, - failed_to_read_bit_fields * self._WORDS_TO_COVER_256_ATOMS)) + failed_to_read_bit_fields, False, + f"On {label}, the filter for stopping redundant DMAs couldn't be " + f"fully filled in; it failed to read {failed_to_read_bit_fields} " + "entries, which means it required a max of " + f"{failed_to_read_bit_fields * self._WORDS_TO_COVER_256_ATOMS} " + "extra bytes of DTCM (assuming cores have at most 255 neurons). " + "Try reducing neurons per core, or size of buffers, or neuron " + "params per neuron, etc.") yield ProvenanceDataItem( self._add_name(names, self.DMA_COMPLETE), dma_completes) yield ProvenanceDataItem( @@ -340,41 +292,56 @@ def __local_prov_items(self, placement, provenance_data, tic_overruns): spike_processing_count) yield ProvenanceDataItem( self._add_name(names, self.INVALID_MASTER_POP_HITS), - invalid_master_pop_hits, report=(invalid_master_pop_hits > 0), - message=self._INVALID_POP_MESSAGE.format( - invalid_master_pop_hits, x, y, p)) + invalid_master_pop_hits, (invalid_master_pop_hits > 0), + f"On {label}, there were {invalid_master_pop_hits} keys received " + "that had no master pop entry for them. This is an error, which " + "most likely stems from bad routing.") yield ProvenanceDataItem( self._add_name(names, self.BIT_FIELD_FILTERED_PACKETS), - n_packets_filtered, report=(n_packets_filtered > 0 and ( + n_packets_filtered, (n_packets_filtered > 0 and ( n_buffer_overflows > 0 or tic_overruns > 0)), - message=self._FILTERED_PACKETS_MESSAGE.format( - n_packets_filtered, x, y, p)) + f"On {label}, there were {n_packets_filtered} packets received " + "that were filtered by the bit-field filterer on the core. These " + "packets were having to be stored and processed on core, which " + "means the core may not be running as efficiently as it should. " + "Please adjust the network or the mapping so that these packets " + "are filtered in the router to improve performance.") late_message = ( - self._N_LATE_SPIKES_MESSAGE_DROP if self.__drop_late_spikes - else self._N_LATE_SPIKES_MESSAGE_NO_DROP) + f"On {label}, {n_late_packets} packets were dropped from the " + "input buffer, because they arrived too late to be processed in " + "a given time step. Try increasing the time_scale_factor located " + "within the .spynnaker.cfg file or in the pynn.setup() method." + if self.__drop_late_spikes else + f"On {label}, {n_late_packets} packets arrived too late to be " + "processed in a given time step. Try increasing the " + "time_scale_factor located within the .spynnaker.cfg file or in " + "the pynn.setup() method.") yield ProvenanceDataItem( self._add_name(names, self._N_LATE_SPIKES_NAME), - n_late_packets, report=(n_late_packets > 0), - message=late_message.format(n_late_packets, label, x, y, p)) + n_late_packets, (n_late_packets > 0), late_message) yield ProvenanceDataItem( self._add_name(names, self._MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME), input_buffer_max_filled_size, report=False) yield self._app_vertex.get_tdma_provenance_item( - names, x, y, p, tdma_misses) + names, label, tdma_misses) yield ProvenanceDataItem( self._add_name(names, self._BACKGROUND_MAX_QUEUED_NAME), - max_background_queued, report=(max_background_queued > 1), - message=self._BACKGROUND_MAX_QUEUED_MESSAGE.format( - max_background_queued, label, x, y, p)) + max_background_queued, (max_background_queued > 1), + f"On {label}, a maximum of {max_background_queued} background " + "tasks were queued, which can indicate a core overloading. Try " + "increasing the time_scale_factor located within the " + ".spynnaker.cfg file or in the pynn.setup() method.") yield ProvenanceDataItem( self._add_name(names, self._BACKGROUND_OVERLOADS_NAME), - n_background_overloads, report=(n_background_overloads > 0), - message=self._BACKGROUND_OVERLOADS_MESSAGE.format( - label, x, y, p, n_background_overloads)) + n_background_overloads, (n_background_overloads > 0), + f"On {label}, the background queue overloaded " + f"{n_background_overloads} times, which can indicate a core " + "overloading. Try increasing the time_scale_factor located " + "within the .spynnaker.cfg file or in the pynn.setup() method.") @overrides(AbstractReceiveBuffersToHost.get_recorded_region_ids) def get_recorded_region_ids(self): diff --git a/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py b/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py index e6cbdc07a8..b179b86a6e 100644 --- a/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py +++ b/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py @@ -210,14 +210,12 @@ def get_profile_data(self, transceiver, placement): @overrides(ProvidesProvenanceDataFromMachineImpl. _get_extra_provenance_items) - def _get_extra_provenance_items( - self, label, location, names, provenance_data): + def _get_extra_provenance_items(self, label, names, provenance_data): n_times_tdma_fell_behind = provenance_data[ self.EXTRA_PROVENANCE_DATA_ENTRIES.TDMA_MISSED_SLOTS.value] - x, y, p = location yield self._app_vertex.get_tdma_provenance_item( - names, x, y, p, n_times_tdma_fell_behind) + names, label, n_times_tdma_fell_behind) @overrides(AbstractHasAssociatedBinary.get_binary_file_name) def get_binary_file_name(self): diff --git a/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py b/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py index c79f4abb50..3dd2b2d9c7 100644 --- a/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py +++ b/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py @@ -71,78 +71,19 @@ class EXTRA_PROVENANCE_DATA_ENTRIES(Enum): N_EXTRA_PROVENANCE_DATA_ENTRIES = len(EXTRA_PROVENANCE_DATA_ENTRIES) - _COUNT_SATURATION_MESSAGE = ( - "The delay extension {} has dropped {} packets because during " - "certain time steps a neuron was asked to spike more than 256 times. " - "This causes a saturation on the count tracker which is a uint8. " - "Reduce the packet rates, or modify the delay extension to have " - "larger counters.") - COUNT_SATURATION_NAME = "saturation_count" - - _INVALID_NEURON_IDS_MESSAGE = ( - "The delay extension {} has dropped {} packets because their " - "neuron id was not valid. This is likely a routing issue. " - "Please fix and try again") - INVALID_NEURON_ID_COUNT_NAME = "invalid_neuron_count" - - _PACKETS_DROPPED_FROM_INVALID_KEY_MESSAGE = ( - "The delay extension {} has dropped {} packets due to the packet " - "key being invalid. This is likely a routing issue. " - "Please fix and try again") - INVALID_KEY_COUNT_NAME = "invalid_key_count" - N_PACKETS_RECEIVED_NAME = "Number_of_packets_received" - N_PACKETS_PROCESSED_NAME = "Number_of_packets_processed" - - _MISMATCH_PROCESSED_FROM_RECEIVED_MESSAGE = ( - "The delay extension {} on {}, {}, {} only processed {} of {}" - " received packets. This could indicate a fault.") - MISMATCH_ADDED_FROM_PROCESSED_NAME = ( "Number_of_packets_added_to_delay_slot") - - _MISMATCH_ADDED_FROM_PROCESSED_MESSAGE = ( - "The delay extension {} on {}, {}, {} only added {} of {} processed " - "packets. This could indicate a routing or filtering fault") - N_PACKETS_SENT_NAME = "Number_of_packets_sent" - INPUT_BUFFER_LOST_NAME = "Times_the_input_buffer_lost_packets" - - _INPUT_BUFFER_LOST_MESSAGE = ( - "The input buffer for {} on {}, {}, {} lost packets on {} " - "occasions. This is often a sign that the system is running " - "too quickly for the number of neurons per core. Please " - "increase the timer_tic or time_scale_factor or decrease the " - "number of neurons per core.") - N_LATE_SPIKES_NAME = "Number_of_late_spikes" - N_LATE_SPIKES_MESSAGE_DROP = ( - "{} packets from {} on {}, {}, {} were dropped from the input buffer, " - "because they arrived too late to be processed in a given time step. " - "Try increasing the time_scale_factor located within the " - ".spynnaker.cfg file or in the pynn.setup() method.") - N_LATE_SPIKES_MESSAGE_NO_DROP = ( - "{} packets from {} on {}, {}, {} arrived too late to be processed in" - " a given time step. " - "Try increasing the time_scale_factor located within the " - ".spynnaker.cfg file or in the pynn.setup() method.") - DELAYED_FOR_TRAFFIC_NAME = "Number_of_times_delayed_to_spread_traffic" BACKGROUND_OVERLOADS_NAME = "Times_the_background_queue_overloaded" - _BACKGROUND_OVERLOADS_MESSAGE = ( - "On {} on {}, {}, {}, the background queue overloaded {} times. " - "Try increasing the time_scale_factor located within the " - ".spynnaker.cfg file or in the pynn.setup() method.") BACKGROUND_MAX_QUEUED_NAME = "Max_backgrounds_queued" - _BACKGROUND_MAX_QUEUED_MESSAGE = ( - "A maximum of {} background tasks were queued on {} on {}, {}, {}. " - "Try increasing the time_scale_factor located within the " - ".spynnaker.cfg file or in the pynn.setup() method.") def __init__(self, resources_required, label, constraints=None, app_vertex=None, vertex_slice=None): @@ -183,73 +124,87 @@ def resources_required(self): @overrides(ProvidesProvenanceDataFromMachineImpl. _get_extra_provenance_items) - def _get_extra_provenance_items( - self, label, location, names, provenance_data): + def _get_extra_provenance_items(self, label, names, provenance_data): (n_received, n_processed, n_added, n_sent, n_overflows, n_delays, n_tdma_behind, n_sat, n_bad_neuron, n_bad_keys, n_late_spikes, max_bg, n_bg_overloads) = provenance_data - x, y, p = location - # translate into provenance data items yield ProvenanceDataItem( self._add_name(names, self.COUNT_SATURATION_NAME), - n_sat, report=(n_sat != 0), - message=self._COUNT_SATURATION_MESSAGE.format( - label, n_sat)) + n_sat, (n_sat != 0), + f"The delay extension {label} has dropped {n_sat} packets because " + "during certain time steps a neuron was asked to spike more than " + "256 times. This causes a saturation on the count tracker which " + "is a uint8. Reduce the packet rates, or modify the delay " + "extension to have larger counters.") yield ProvenanceDataItem( self._add_name(names, self.INVALID_NEURON_ID_COUNT_NAME), - n_bad_neuron, report=(n_bad_neuron != 0), - message=self._INVALID_NEURON_IDS_MESSAGE.format( - label, n_bad_neuron)) + n_bad_neuron, (n_bad_neuron != 0), + f"The delay extension {label} has dropped {n_bad_neuron} packets " + "because their neuron id was not valid. This is likely a routing " + "issue. Please fix and try again") yield ProvenanceDataItem( self._add_name(names, self.INVALID_KEY_COUNT_NAME), - n_bad_keys, report=(n_bad_keys != 0), - message=self._PACKETS_DROPPED_FROM_INVALID_KEY_MESSAGE.format( - label, n_bad_keys)) + n_bad_keys, (n_bad_keys != 0), + f"The delay extension {label} has dropped {n_bad_keys} packets " + "due to the packet key being invalid. This is likely a routing " + "issue. Please fix and try again") yield ProvenanceDataItem( self._add_name(names, self.N_PACKETS_RECEIVED_NAME), n_received) yield ProvenanceDataItem( self._add_name(names, self.N_PACKETS_PROCESSED_NAME), - n_processed, report=(n_received != n_processed), - message=self._MISMATCH_PROCESSED_FROM_RECEIVED_MESSAGE.format( - label, x, y, p, n_processed, n_received)) + n_processed, (n_received != n_processed), + f"The delay extension {label} only processed {n_processed} of " + f"{n_received} received packets. This could indicate a fault.") yield ProvenanceDataItem( self._add_name(names, self.MISMATCH_ADDED_FROM_PROCESSED_NAME), - n_added, report=(n_added != n_processed), - message=self._MISMATCH_ADDED_FROM_PROCESSED_MESSAGE.format( - label, x, y, p, n_added, n_processed)) + n_added, (n_added != n_processed), + f"The delay extension {label} only added {n_added} of " + f"{n_processed} processed packets. This could indicate a " + "routing or filtering fault") yield ProvenanceDataItem( self._add_name(names, self.N_PACKETS_SENT_NAME), n_sent) yield ProvenanceDataItem( self._add_name(names, self.INPUT_BUFFER_LOST_NAME), - n_overflows, report=(n_overflows > 0), - message=self._INPUT_BUFFER_LOST_MESSAGE.format( - label, x, y, p, n_overflows)) + n_overflows, (n_overflows > 0), + f"The input buffer for {label} lost packets on {n_overflows} " + "occasions. This is often a sign that the system is running " + "too quickly for the number of neurons per core. Please " + "increase the timer_tic or time_scale_factor or decrease the " + "number of neurons per core.") yield ProvenanceDataItem( self._add_name(names, self.DELAYED_FOR_TRAFFIC_NAME), n_delays) yield self._app_vertex.get_tdma_provenance_item( - names, x, y, p, n_tdma_behind) + names, label, n_tdma_behind) late_message = ( - self.N_LATE_SPIKES_MESSAGE_DROP - if self._app_vertex.drop_late_spikes - else self.N_LATE_SPIKES_MESSAGE_NO_DROP) + f"On {label}, {n_late_spikes} packets were dropped from the " + "input buffer, because they arrived too late to be processed in " + "a given time step. Try increasing the time_scale_factor located " + "within the .spynnaker.cfg file or in the pynn.setup() method." + if self._app_vertex.drop_late_spikes else + f"On {label}, {n_late_spikes} packets arrived too late to be " + "processed in a given time step. Try increasing the " + "time_scale_factor located within the .spynnaker.cfg file or in " + "the pynn.setup() method.") yield ProvenanceDataItem( self._add_name(names, self.N_LATE_SPIKES_NAME), n_late_spikes, report=(n_late_spikes > 0), - message=late_message.format(n_late_spikes, label, x, y, p)) + message=late_message) yield ProvenanceDataItem( self._add_name(names, self.BACKGROUND_MAX_QUEUED_NAME), - max_bg, report=(max_bg > 1), - message=self._BACKGROUND_MAX_QUEUED_MESSAGE.format( - max_bg, label, x, y, p)) + max_bg, (max_bg > 1), + f"On {label}, a maximum of {max_bg} background tasks were queued. " + "Try increasing the time_scale_factor located within the " + ".spynnaker.cfg file or in the pynn.setup() method.") yield ProvenanceDataItem( self._add_name(names, self.BACKGROUND_OVERLOADS_NAME), - n_bg_overloads, report=(n_bg_overloads > 0), - message=self._BACKGROUND_OVERLOADS_MESSAGE.format( - label, x, y, p, n_bg_overloads)) + n_bg_overloads, (n_bg_overloads > 0), + f"On {label}, the background queue overloaded {n_bg_overloads} " + "times. Try increasing the time_scale_factor located within the " + ".spynnaker.cfg file or in the pynn.setup() method.") @overrides(MachineVertex.get_n_keys_for_partition) def get_n_keys_for_partition(self, _partition): From ba3074f603af69df1a2c4c857477e6cacd0ac5e2 Mon Sep 17 00:00:00 2001 From: Donal Fellows Date: Fri, 30 Apr 2021 11:30:27 +0100 Subject: [PATCH 5/8] Better wording/phrasing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change the methods that convert arrays of prov data into provenance items to be named “parse_…” for clarity. And used more public names. And audited what are actually using so uncalled methods are no longer there. --- .../machine_munich_motor_device.py | 4 +- .../neuron/population_machine_vertex.py | 113 +++++++++--------- .../spike_source_poisson_machine_vertex.py | 4 +- .../delays/delay_extension_machine_vertex.py | 28 ++--- 4 files changed, 75 insertions(+), 74 deletions(-) diff --git a/spynnaker/pyNN/external_devices_models/machine_munich_motor_device.py b/spynnaker/pyNN/external_devices_models/machine_munich_motor_device.py index 7244db2962..57fec58d94 100644 --- a/spynnaker/pyNN/external_devices_models/machine_munich_motor_device.py +++ b/spynnaker/pyNN/external_devices_models/machine_munich_motor_device.py @@ -122,8 +122,8 @@ def _n_additional_data_items(self): return self._PROVENANCE_ELEMENTS @overrides( - ProvidesProvenanceDataFromMachineImpl._get_extra_provenance_items) - def _get_extra_provenance_items( + ProvidesProvenanceDataFromMachineImpl.parse_extra_provenance_items) + def parse_extra_provenance_items( self, label, names, provenance_data): n_buffer_overflows, = provenance_data diff --git a/spynnaker/pyNN/models/neuron/population_machine_vertex.py b/spynnaker/pyNN/models/neuron/population_machine_vertex.py index d5c0d2063e..fd1fe48d49 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_vertex.py +++ b/spynnaker/pyNN/models/neuron/population_machine_vertex.py @@ -95,18 +95,6 @@ class EXTRA_PROVENANCE_DATA_ENTRIES(IntEnum): # the number of times the background queue overloaded N_BACKGROUND_OVERLOADS = 16 - SATURATION_COUNT_NAME = "Times_synaptic_weights_have_saturated" - INPUT_BUFFER_FULL_NAME = "Times_the_input_buffer_lost_packets" - TOTAL_PRE_SYNAPTIC_EVENT_NAME = "Total_pre_synaptic_events" - LAST_TIMER_TICK_NAME = "Last_timer_tic_the_core_ran_to" - N_RE_WIRES_NAME = "Number_of_rewires" - SATURATED_PLASTIC_WEIGHTS_NAME = ( - "Times_plastic_synaptic_weights_have_saturated") - _N_LATE_SPIKES_NAME = "Number_of_late_spikes" - _MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME = "Max_filled_size_input_buffer" - _BACKGROUND_OVERLOADS_NAME = "Times_the_background_queue_overloaded" - _BACKGROUND_MAX_QUEUED_NAME = "Max_backgrounds_queued" - _PROFILE_TAG_LABELS = { 0: "TIMER", 1: "DMA_READ", @@ -118,6 +106,17 @@ class EXTRA_PROVENANCE_DATA_ENTRIES(IntEnum): _WORDS_TO_COVER_256_ATOMS = 8 # provenance data items + SATURATION_COUNT_NAME = "Times_synaptic_weights_have_saturated" + INPUT_BUFFER_FULL_NAME = "Times_the_input_buffer_lost_packets" + TOTAL_PRE_SYNAPTIC_EVENT_NAME = "Total_pre_synaptic_events" + LAST_TIMER_TICK_NAME = "Last_timer_tic_the_core_ran_to" + N_RE_WIRES_NAME = "Number_of_rewires" + SATURATED_PLASTIC_WEIGHTS_NAME = ( + "Times_plastic_synaptic_weights_have_saturated") + _N_LATE_SPIKES_NAME = "Number_of_late_spikes" + _MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME = "Max_filled_size_input_buffer" + _BACKGROUND_OVERLOADS_NAME = "Times_the_background_queue_overloaded" + _BACKGROUND_MAX_QUEUED_NAME = "Max_backgrounds_queued" BIT_FIELD_FILTERED_PACKETS = ( "How many packets were filtered by the bitfield filterer.") INVALID_MASTER_POP_HITS = "Invalid Master Pop hits" @@ -208,46 +207,48 @@ def _n_additional_data_items(self): get_provenance_data_from_machine) def get_provenance_data_from_machine(self, transceiver, placement): provenance_data = self._read_provenance_data(transceiver, placement) - provenance_items = list(self._read_basic_provenance_items( - provenance_data, placement)) + label, names = self._get_provenance_placement_description(placement) # This is why we have to override the superclass public method tic_overruns = 0 - for item in provenance_items: + for item in self.parse_system_provenance_items( + label, names, provenance_data): + yield item if item.names[-1] == self._TIMER_TICK_OVERRUN: + # GOTCHA! tic_overruns = item.value # translate into provenance data items - provenance_items.extend(self.__local_prov_items( - placement, provenance_data, tic_overruns)) - return provenance_items + yield from self.__parse_prov_items( + label, names, self._get_extra_provenance_words(provenance_data), + tic_overruns) - def __local_prov_items(self, placement, provenance_data, tic_overruns): + def __parse_prov_items(self, label, names, provenance_data, tic_overruns): + # Would be parse_extra_provenance_items except for extra argument """ - :param ~.Placement placement: + :param str label: + :param list(str) names: :param list(int) provenance_data: :param int tic_overruns: :rtype: iterable(ProvenanceDataItem) """ - label, names = self._get_placement_details(placement) (n_pre_synaptic_events, n_saturations, n_buffer_overflows, last_timer_tick, n_plastic_saturations, n_ghost_searches, - failed_to_read_bit_fields, dma_completes, spike_processing_count, + n_bitfield_fails, dma_completes, spike_processing_count, invalid_master_pop_hits, n_packets_filtered, n_rewires, - n_late_packets, input_buffer_max_filled_size, tdma_misses, - max_background_queued, n_background_overloads) = \ - self._get_remaining_provenance_data_items(provenance_data) + n_late_packets, input_buffer_max, tdma_misses, max_bg_queued, + n_bg_overloads) = provenance_data # translate into provenance data items yield ProvenanceDataItem( - self._add_name(names, self.SATURATION_COUNT_NAME), + names + [self.SATURATION_COUNT_NAME], n_saturations, (n_saturations > 0), f"The weights from the synapses for {label} saturated " f"{n_saturations} times. If this causes issues you can increase " "the spikes_per_second and / or ring_buffer_sigma values located " "within the .spynnaker.cfg file.") yield ProvenanceDataItem( - self._add_name(names, self.INPUT_BUFFER_FULL_NAME), + names + [self.INPUT_BUFFER_FULL_NAME], n_buffer_overflows, (n_buffer_overflows > 0), f"The input buffer for {label} lost packets on " f"{n_buffer_overflows} occasions. This is often a sign that the " @@ -255,49 +256,49 @@ def __local_prov_items(self, placement, provenance_data, tic_overruns): "core. Please increase the timer_tic or time_scale_factor or " "decrease the number of neurons per core.") yield ProvenanceDataItem( - self._add_name(names, self.TOTAL_PRE_SYNAPTIC_EVENT_NAME), + names + [self.TOTAL_PRE_SYNAPTIC_EVENT_NAME], n_pre_synaptic_events) yield ProvenanceDataItem( - self._add_name(names, self.LAST_TIMER_TICK_NAME), last_timer_tick) + names + [self.LAST_TIMER_TICK_NAME], last_timer_tick) yield ProvenanceDataItem( - self._add_name(names, self.SATURATED_PLASTIC_WEIGHTS_NAME), + names + [self.SATURATED_PLASTIC_WEIGHTS_NAME], n_plastic_saturations, (n_plastic_saturations > 0), f"The weights from the plastic synapses for {label} saturated " f"{n_plastic_saturations} times. If this causes issue increase " "the spikes_per_second and / or ring_buffer_sigma values located " "within the .spynnaker.cfg file.") yield ProvenanceDataItem( - self._add_name(names, self.N_RE_WIRES_NAME), n_rewires) + names + [self.N_RE_WIRES_NAME], n_rewires) yield ProvenanceDataItem( - self._add_name(names, self.GHOST_SEARCHES), n_ghost_searches, + names + [self.GHOST_SEARCHES], n_ghost_searches, (n_ghost_searches > 0), f"The number of failed population table searches for {label} was " f"{n_ghost_searches}. If this number is large relative to the " "predicted incoming spike rate, try increasing source and target " "neurons per core") yield ProvenanceDataItem( - self._add_name(names, self.BIT_FIELDS_NOT_READ), - failed_to_read_bit_fields, False, + names + [self.BIT_FIELDS_NOT_READ], + n_bitfield_fails, False, f"On {label}, the filter for stopping redundant DMAs couldn't be " - f"fully filled in; it failed to read {failed_to_read_bit_fields} " - "entries, which means it required a max of " - f"{failed_to_read_bit_fields * self._WORDS_TO_COVER_256_ATOMS} " + f"fully filled in; it failed to read {n_bitfield_fails} entries, " + "which means it required a max of " + f"{n_bitfield_fails * self._WORDS_TO_COVER_256_ATOMS} " "extra bytes of DTCM (assuming cores have at most 255 neurons). " "Try reducing neurons per core, or size of buffers, or neuron " "params per neuron, etc.") yield ProvenanceDataItem( - self._add_name(names, self.DMA_COMPLETE), dma_completes) + names + [self.DMA_COMPLETE], dma_completes) yield ProvenanceDataItem( - self._add_name(names, self.SPIKES_PROCESSED), + names + [self.SPIKES_PROCESSED], spike_processing_count) yield ProvenanceDataItem( - self._add_name(names, self.INVALID_MASTER_POP_HITS), + names + [self.INVALID_MASTER_POP_HITS], invalid_master_pop_hits, (invalid_master_pop_hits > 0), f"On {label}, there were {invalid_master_pop_hits} keys received " "that had no master pop entry for them. This is an error, which " "most likely stems from bad routing.") yield ProvenanceDataItem( - self._add_name(names, self.BIT_FIELD_FILTERED_PACKETS), + names + [self.BIT_FIELD_FILTERED_PACKETS], n_packets_filtered, (n_packets_filtered > 0 and ( n_buffer_overflows > 0 or tic_overruns > 0)), f"On {label}, there were {n_packets_filtered} packets received " @@ -318,30 +319,30 @@ def __local_prov_items(self, placement, provenance_data, tic_overruns): "time_scale_factor located within the .spynnaker.cfg file or in " "the pynn.setup() method.") yield ProvenanceDataItem( - self._add_name(names, self._N_LATE_SPIKES_NAME), + names + [self._N_LATE_SPIKES_NAME], n_late_packets, (n_late_packets > 0), late_message) yield ProvenanceDataItem( - self._add_name(names, self._MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME), - input_buffer_max_filled_size, report=False) + names + [self._MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME], + input_buffer_max, report=False) yield self._app_vertex.get_tdma_provenance_item( names, label, tdma_misses) yield ProvenanceDataItem( - self._add_name(names, self._BACKGROUND_MAX_QUEUED_NAME), - max_background_queued, (max_background_queued > 1), - f"On {label}, a maximum of {max_background_queued} background " - "tasks were queued, which can indicate a core overloading. Try " - "increasing the time_scale_factor located within the " - ".spynnaker.cfg file or in the pynn.setup() method.") + names + [self._BACKGROUND_MAX_QUEUED_NAME], + max_bg_queued, (max_bg_queued > 1), + f"On {label}, a maximum of {max_bg_queued} background tasks were " + "queued, which can indicate a core overloading. Try increasing " + "the time_scale_factor located within the .spynnaker.cfg file or " + "in the pynn.setup() method.") yield ProvenanceDataItem( - self._add_name(names, self._BACKGROUND_OVERLOADS_NAME), - n_background_overloads, (n_background_overloads > 0), - f"On {label}, the background queue overloaded " - f"{n_background_overloads} times, which can indicate a core " - "overloading. Try increasing the time_scale_factor located " - "within the .spynnaker.cfg file or in the pynn.setup() method.") + names + [self._BACKGROUND_OVERLOADS_NAME], + n_bg_overloads, (n_bg_overloads > 0), + f"On {label}, the background queue overloaded {n_bg_overloads} " + "times, which can indicate a core overloading. Try increasing " + "the time_scale_factor located within the .spynnaker.cfg file or " + "in the pynn.setup() method.") @overrides(AbstractReceiveBuffersToHost.get_recorded_region_ids) def get_recorded_region_ids(self): diff --git a/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py b/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py index b179b86a6e..89f195348c 100644 --- a/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py +++ b/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py @@ -209,8 +209,8 @@ def get_profile_data(self, transceiver, placement): self.PROFILE_TAG_LABELS, transceiver, placement) @overrides(ProvidesProvenanceDataFromMachineImpl. - _get_extra_provenance_items) - def _get_extra_provenance_items(self, label, names, provenance_data): + parse_extra_provenance_items) + def parse_extra_provenance_items(self, label, names, provenance_data): n_times_tdma_fell_behind = provenance_data[ self.EXTRA_PROVENANCE_DATA_ENTRIES.TDMA_MISSED_SLOTS.value] diff --git a/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py b/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py index 3dd2b2d9c7..c1a4d9c299 100644 --- a/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py +++ b/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py @@ -123,15 +123,15 @@ def resources_required(self): return self.__resources @overrides(ProvidesProvenanceDataFromMachineImpl. - _get_extra_provenance_items) - def _get_extra_provenance_items(self, label, names, provenance_data): + parse_extra_provenance_items) + def parse_extra_provenance_items(self, label, names, provenance_data): (n_received, n_processed, n_added, n_sent, n_overflows, n_delays, n_tdma_behind, n_sat, n_bad_neuron, n_bad_keys, n_late_spikes, max_bg, n_bg_overloads) = provenance_data # translate into provenance data items yield ProvenanceDataItem( - self._add_name(names, self.COUNT_SATURATION_NAME), + names + [self.COUNT_SATURATION_NAME], n_sat, (n_sat != 0), f"The delay extension {label} has dropped {n_sat} packets because " "during certain time steps a neuron was asked to spike more than " @@ -139,34 +139,34 @@ def _get_extra_provenance_items(self, label, names, provenance_data): "is a uint8. Reduce the packet rates, or modify the delay " "extension to have larger counters.") yield ProvenanceDataItem( - self._add_name(names, self.INVALID_NEURON_ID_COUNT_NAME), + names + [self.INVALID_NEURON_ID_COUNT_NAME], n_bad_neuron, (n_bad_neuron != 0), f"The delay extension {label} has dropped {n_bad_neuron} packets " "because their neuron id was not valid. This is likely a routing " "issue. Please fix and try again") yield ProvenanceDataItem( - self._add_name(names, self.INVALID_KEY_COUNT_NAME), + names + [self.INVALID_KEY_COUNT_NAME], n_bad_keys, (n_bad_keys != 0), f"The delay extension {label} has dropped {n_bad_keys} packets " "due to the packet key being invalid. This is likely a routing " "issue. Please fix and try again") yield ProvenanceDataItem( - self._add_name(names, self.N_PACKETS_RECEIVED_NAME), n_received) + names + [self.N_PACKETS_RECEIVED_NAME], n_received) yield ProvenanceDataItem( - self._add_name(names, self.N_PACKETS_PROCESSED_NAME), + names + [self.N_PACKETS_PROCESSED_NAME], n_processed, (n_received != n_processed), f"The delay extension {label} only processed {n_processed} of " f"{n_received} received packets. This could indicate a fault.") yield ProvenanceDataItem( - self._add_name(names, self.MISMATCH_ADDED_FROM_PROCESSED_NAME), + names + [self.MISMATCH_ADDED_FROM_PROCESSED_NAME], n_added, (n_added != n_processed), f"The delay extension {label} only added {n_added} of " f"{n_processed} processed packets. This could indicate a " "routing or filtering fault") yield ProvenanceDataItem( - self._add_name(names, self.N_PACKETS_SENT_NAME), n_sent) + names + [self.N_PACKETS_SENT_NAME], n_sent) yield ProvenanceDataItem( - self._add_name(names, self.INPUT_BUFFER_LOST_NAME), + names + [self.INPUT_BUFFER_LOST_NAME], n_overflows, (n_overflows > 0), f"The input buffer for {label} lost packets on {n_overflows} " "occasions. This is often a sign that the system is running " @@ -174,7 +174,7 @@ def _get_extra_provenance_items(self, label, names, provenance_data): "increase the timer_tic or time_scale_factor or decrease the " "number of neurons per core.") yield ProvenanceDataItem( - self._add_name(names, self.DELAYED_FOR_TRAFFIC_NAME), n_delays) + names + [self.DELAYED_FOR_TRAFFIC_NAME], n_delays) yield self._app_vertex.get_tdma_provenance_item( names, label, n_tdma_behind) @@ -189,18 +189,18 @@ def _get_extra_provenance_items(self, label, names, provenance_data): "time_scale_factor located within the .spynnaker.cfg file or in " "the pynn.setup() method.") yield ProvenanceDataItem( - self._add_name(names, self.N_LATE_SPIKES_NAME), + names + [self.N_LATE_SPIKES_NAME], n_late_spikes, report=(n_late_spikes > 0), message=late_message) yield ProvenanceDataItem( - self._add_name(names, self.BACKGROUND_MAX_QUEUED_NAME), + names + [self.BACKGROUND_MAX_QUEUED_NAME], max_bg, (max_bg > 1), f"On {label}, a maximum of {max_bg} background tasks were queued. " "Try increasing the time_scale_factor located within the " ".spynnaker.cfg file or in the pynn.setup() method.") yield ProvenanceDataItem( - self._add_name(names, self.BACKGROUND_OVERLOADS_NAME), + names + [self.BACKGROUND_OVERLOADS_NAME], n_bg_overloads, (n_bg_overloads > 0), f"On {label}, the background queue overloaded {n_bg_overloads} " "times. Try increasing the time_scale_factor located within the " From 1ae657575160462f3da749c61228238cb79bddff Mon Sep 17 00:00:00 2001 From: "Christian Y. Brenninkmeijer" Date: Tue, 4 May 2021 10:24:30 +0100 Subject: [PATCH 6/8] removed dead cfg option --- unittests/test_sata_connectors/spynnaker.cfg | 1 - 1 file changed, 1 deletion(-) diff --git a/unittests/test_sata_connectors/spynnaker.cfg b/unittests/test_sata_connectors/spynnaker.cfg index 656a5db749..6d233d1f7e 100644 --- a/unittests/test_sata_connectors/spynnaker.cfg +++ b/unittests/test_sata_connectors/spynnaker.cfg @@ -16,6 +16,5 @@ requires_wrap_arounds = None down_cores = None down_chips = None down_links = 2,5,2:2,5,3:6,1,2:6,1,3:10,9,2:10,9,3 -core_limit = None width = 12 height = 12 From b96d9bdfdc9c819b1856311d04e5c67f3b86ac30 Mon Sep 17 00:00:00 2001 From: "Christian Y. Brenninkmeijer" Date: Tue, 4 May 2021 10:27:35 +0100 Subject: [PATCH 7/8] removed dead cfg option --- unittests/test_sata_connectors/spynnaker.cfg | 2 -- 1 file changed, 2 deletions(-) diff --git a/unittests/test_sata_connectors/spynnaker.cfg b/unittests/test_sata_connectors/spynnaker.cfg index 6d233d1f7e..d4d16052ba 100644 --- a/unittests/test_sata_connectors/spynnaker.cfg +++ b/unittests/test_sata_connectors/spynnaker.cfg @@ -11,8 +11,6 @@ spalloc_machine = None virtual_board = True -requires_wrap_arounds = None - down_cores = None down_chips = None down_links = 2,5,2:2,5,3:6,1,2:6,1,3:10,9,2:10,9,3 From 3d66f869d167255f0013790051d58ecdc1aec15b Mon Sep 17 00:00:00 2001 From: "Christian Y. Brenninkmeijer" Date: Tue, 4 May 2021 16:27:34 +0100 Subject: [PATCH 8/8] read_router_compressor_with_bitfield_iobuf no longer used. --- .../test_iobuf/test_iobuf_without_during_run_flag/spynnaker.cfg | 1 - 1 file changed, 1 deletion(-) diff --git a/spynnaker_integration_tests/test_iobuf/test_iobuf_without_during_run_flag/spynnaker.cfg b/spynnaker_integration_tests/test_iobuf/test_iobuf_without_during_run_flag/spynnaker.cfg index e7d0832df3..f592e58cb5 100644 --- a/spynnaker_integration_tests/test_iobuf/test_iobuf_without_during_run_flag/spynnaker.cfg +++ b/spynnaker_integration_tests/test_iobuf/test_iobuf_without_during_run_flag/spynnaker.cfg @@ -6,5 +6,4 @@ extract_iobuf_from_cores = ALL extract_iobuf_from_binary_types = None generate_bit_field_summary_report = False -read_router_compressor_with_bitfield_iobuf = False generate_router_compression_with_bitfield_report = False