Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Create graph #961

Merged
merged 18 commits into from
Jul 28, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions spinn_front_end_common/data/fec_data_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -548,3 +548,9 @@ def clear_notification_protocol(self):
If the close causes an Exception it is logged and ignored
"""
self.__fec_data._clear_notification_protocol()

@classmethod
@overrides(FecDataView.add_vertex)
def add_vertex(cls, vertex):
# Avoid the safety check in FecDataView
PacmanDataWriter.add_vertex(vertex)
65 changes: 20 additions & 45 deletions spinn_front_end_common/interface/abstract_spinnaker_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@
from spinn_front_end_common.utilities.iobuf_extractor import IOBufExtractor
from spinn_front_end_common.utilities.utility_objs import ExecutableType
from spinn_front_end_common.utility_models import (
CommandSender, DataSpeedUpPacketGatherMachineVertex, LivePacketGather)
CommandSender, DataSpeedUpPacketGatherMachineVertex)
from spinn_front_end_common.utilities.report_functions.reports import (
generate_comparison_router_report, partitioner_report,
placer_reports_with_application_graph,
Expand Down Expand Up @@ -178,10 +178,8 @@ class AbstractSpinnakerBase(ConfigHandler):
"_multicast_routes_loaded"
]

def __init__(
self, graph_label=None, data_writer_cls=None):
def __init__(self, data_writer_cls=None):
"""
:param str graph_label: A label for the overall application graph
:param int n_chips_required:
Overrides the number of chips to allocate from spalloc
:param int n_boards_required:
Expand All @@ -200,7 +198,6 @@ def __init__(
# store for Live Packet Gatherers
self._lpg_vertices = dict()

self._data_writer.create_graphs(graph_label)
self._machine_allocation_controller = None
self._hard_reset()

Expand Down Expand Up @@ -244,31 +241,6 @@ def _hard_reset(self):
def _machine_clear(self):
pass

def add_live_packet_gatherer_parameters(
self, live_packet_gatherer_params, vertex_to_record_from,
partition_ids):
""" Adds parameters for a new LPG if needed, or adds to the tracker \
for parameters. Note that LPGs can be inserted to track behaviour \
either at the application graph level or at the machine graph \
level, but not both at the same time.

:param LivePacketGatherParameters live_packet_gatherer_params:
params to look for a LPG
:param ~pacman.model.graphs.AbstractVertex vertex_to_record_from:
the vertex that needs to send to a given LPG
:param list(str) partition_ids:
the IDs of the partitions to connect from the vertex
"""
lpg_vertex = self._lpg_vertices.get(live_packet_gatherer_params)
if lpg_vertex is None:
lpg_vertex = LivePacketGather(
live_packet_gatherer_params, live_packet_gatherer_params.label)
self._lpg_vertices[live_packet_gatherer_params] = lpg_vertex
self._data_writer.add_vertex(lpg_vertex)
for part_id in partition_ids:
self._data_writer.add_edge(
ApplicationEdge(vertex_to_record_from, lpg_vertex), part_id)

def check_machine_specifics(self):
""" Checks machine specifics for the different modes of execution.

Expand Down Expand Up @@ -496,7 +468,6 @@ def __run(self, run_time, sync_time):
self._hard_reset()
FecTimer.setup(self)

self._data_writer.clone_graphs()
self._add_dependent_verts_and_edges_for_application_graph()
self._add_commands_to_command_sender()

Expand Down Expand Up @@ -600,15 +571,15 @@ def _is_per_timestep_sdram(self):

def _add_commands_to_command_sender(self):
command_sender = None
graph = self._data_writer.get_runtime_graph()
vertices = graph.vertices
for vertex in vertices:
for vertex in self._data_writer.iterate_vertices():
if isinstance(vertex, CommandSender):
command_sender = vertex
for vertex in self._data_writer.iterate_vertices():
if isinstance(vertex, AbstractSendMeMulticastCommandsVertex):
# if there's no command sender yet, build one
if command_sender is None:
command_sender = CommandSender(
"auto_added_command_sender", None)
graph.add_vertex(command_sender)

# allow the command sender to create key to partition map
command_sender.add_commands(
Expand All @@ -618,24 +589,28 @@ def _add_commands_to_command_sender(self):

# add the edges from the command sender to the dependent vertices
if command_sender is not None:
if not command_sender.addedToGraph():
self._data_writer.add_vertex(command_sender)
edges, partition_ids = command_sender.edges_and_partitions()
for edge, partition_id in zip(edges, partition_ids):
graph.add_edge(edge, partition_id)
self._data_writer.add_edge(edge, partition_id)

def _add_dependent_verts_and_edges_for_application_graph(self):
graph = self._data_writer.get_runtime_graph()
for vertex in graph.vertices:
# add any dependent edges and vertices if needed
if isinstance(vertex, AbstractVertexWithEdgeToDependentVertices):
for dependant_vertex in vertex.dependent_vertices():
graph.add_vertex(dependant_vertex)
# cache vertices to allow insertion during iteration
vertices = list(self._data_writer.get_vertices_by_type(
AbstractVertexWithEdgeToDependentVertices))
for vertex in vertices:
for dependant_vertex in vertex.dependent_vertices():
if not vertex.addedToGraph():
self._data_writer.add_vertex(dependant_vertex)
edge_partition_ids = vertex.\
edge_partition_identifiers_for_dependent_vertex(
dependant_vertex)
for edge_identifier in edge_partition_ids:
dependant_edge = ApplicationEdge(
pre_vertex=vertex, post_vertex=dependant_vertex)
graph.add_edge(dependant_edge, edge_identifier)
self._data_writer.add_edge(
dependant_edge, edge_identifier)

def _deduce_data_n_timesteps(self):
""" Operates the auto pause and resume functionality by figuring out\
Expand Down Expand Up @@ -880,7 +855,7 @@ def _execute_splitter_partitioner(self):
"""
Runs, times and logs the SplitterPartitioner if required
"""
if not self._data_writer.get_runtime_graph().n_vertices:
if self._data_writer.get_n_vertices() == 0:
return
with FecTimer(MAPPING, "Splitter partitioner"):
self._data_writer.set_n_chips_in_graph(splitter_partitioner())
Expand Down Expand Up @@ -998,7 +973,7 @@ def _report_placements_with_application_graph(self):
Writes, times and logs the application graph placer report if
requested
"""
if not self._data_writer.get_runtime_graph().n_vertices:
if self._data_writer.get_n_vertices() == 0:
return
with FecTimer(
MAPPING, "Placements wth application graph report") as timer:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def _count_regions():
# Count the regions to be read
n_regions_to_read = 0
recording_placements = list()
for app_vertex in FecDataView.get_runtime_graph().vertices:
for app_vertex in FecDataView.iterate_vertices():
for vertex in app_vertex.machine_vertices:
if isinstance(vertex, AbstractReceiveBuffersToHost):
n_regions_to_read += len(vertex.get_recorded_region_ids())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,14 +70,12 @@ def _write_to_db(
w.add_tags()
p.update()
lpg_source_machine_vertices = w.add_lpg_mapping()
app_graph = FecDataView.get_runtime_graph()

if get_config_bool(
"Database", "create_routing_info_to_neuron_id_mapping"):
machine_vertices = [
(vertex, vertex.injection_partition_id)
for app_vertex in app_graph.vertices
for vertex in app_vertex.machine_vertices
for vertex in FecDataView.iterate_machine_vertices()
if isinstance(vertex, AbstractSupportsDatabaseInjection)
and vertex.is_in_injection_mode]
machine_vertices.extend(lpg_source_machine_vertices)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,19 +23,18 @@ def graph_provenance_gatherer():
""" Gets provenance information from the graph.

"""
application_graph = FecDataView.get_runtime_graph()
progress = ProgressBar(
application_graph.n_vertices +
application_graph.n_outgoing_edge_partitions,
FecDataView.get_n_vertices() +
FecDataView.get_n_partitions(),
"Getting provenance data from application graph")
for vertex in progress.over(application_graph.vertices, False):
for vertex in progress.over(FecDataView.iterate_vertices(), False):
if isinstance(vertex, AbstractProvidesLocalProvenanceData):
vertex.get_local_provenance_data()
for m_vertex in vertex.machine_vertices:
if isinstance(m_vertex, AbstractProvidesLocalProvenanceData):
m_vertex.get_local_provenance_data()
for partition in progress.over(
application_graph.outgoing_edge_partitions):
FecDataView.iterate_partitions()):
for edge in partition.edges:
if isinstance(edge, AbstractProvidesLocalProvenanceData):
edge.get_local_provenance_data()
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ def host_based_bit_field_router_compressor():
:rtype: ~pacman.model.routing_tables.MulticastRoutingTables
"""
routing_tables = FecDataView.get_uncompressed().routing_tables
app_graph = FecDataView.get_runtime_graph()
# create progress bar
progress = ProgressBar(
len(routing_tables) * 2,
Expand All @@ -71,7 +70,7 @@ def host_based_bit_field_router_compressor():
key_atom_map = generate_key_to_atom_map()

most_costly_cores = defaultdict(lambda: defaultdict(int))
for partition in app_graph.outgoing_edge_partitions:
for partition in FecDataView.iterate_partitions():
for edge in partition.edges:
splitter = edge.post_vertex.splitter
for vertex, _ in splitter.get_source_specific_in_coming_vertices(
Expand Down Expand Up @@ -107,10 +106,9 @@ def generate_key_to_atom_map():
:rtype: dict(int,int)
"""
# build key to n atoms map
app_graph = FecDataView.get_runtime_graph()
routing_infos = FecDataView.get_routing_infos()
key_to_n_atoms_map = dict()
for partition in app_graph.outgoing_edge_partitions:
for partition in FecDataView.iterate_partitions():
for vertex in partition.pre_vertex.splitter.get_out_going_vertices(
partition.identifier):
key = routing_infos.get_first_key_from_pre_vertex(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def local_tdma_builder():
Y is pop1 firing

"""
if FecDataView.get_runtime_graph().n_vertices == 0:
if FecDataView.get_n_vertices == 0:
return
# get config params
us_per_cycle = FecDataView.get_hardware_time_step_us()
Expand All @@ -97,31 +97,31 @@ def local_tdma_builder():
# calculate for each app vertex if the time needed fits
app_verts = list()
max_fraction_of_sending = 0
for app_vertex in FecDataView.get_runtime_graph().vertices:
if isinstance(app_vertex, TDMAAwareApplicationVertex):
app_verts.append(app_vertex)

# get timings

# check config params for better performance
(n_at_same_time, local_clocks) = __auto_config_times(
app_machine_quantity, clocks_between_cores,
clocks_for_sending, app_vertex, clocks_waiting)
n_phases, n_slots, clocks_between_phases = \
__generate_times(
app_vertex, n_at_same_time, local_clocks)

# store in tracker
app_vertex.set_other_timings(
local_clocks, n_slots, clocks_between_phases,
n_phases, clocks_per_cycle)

# test timings
fraction_of_sending = __get_fraction_of_sending(
n_phases, clocks_between_phases, clocks_for_sending)
if fraction_of_sending is not None:
max_fraction_of_sending = max(
max_fraction_of_sending, fraction_of_sending)
for app_vertex in FecDataView.get_vertices_by_type(
TDMAAwareApplicationVertex):
app_verts.append(app_vertex)

# get timings

# check config params for better performance
(n_at_same_time, local_clocks) = __auto_config_times(
app_machine_quantity, clocks_between_cores,
clocks_for_sending, app_vertex, clocks_waiting)
n_phases, n_slots, clocks_between_phases = \
__generate_times(
app_vertex, n_at_same_time, local_clocks)

# store in tracker
app_vertex.set_other_timings(
local_clocks, n_slots, clocks_between_phases,
n_phases, clocks_per_cycle)

# test timings
fraction_of_sending = __get_fraction_of_sending(
n_phases, clocks_between_phases, clocks_for_sending)
if fraction_of_sending is not None:
max_fraction_of_sending = max(
max_fraction_of_sending, fraction_of_sending)

time_scale_factor_needed = (
FecDataView.get_time_scale_factor() * max_fraction_of_sending)
Expand All @@ -131,12 +131,12 @@ def local_tdma_builder():
.format(time_scale_factor_needed))

# get initial offset for each app vertex.
for app_vertex in FecDataView.get_runtime_graph().vertices:
if isinstance(app_vertex, TDMAAwareApplicationVertex):
initial_offset = __generate_initial_offset(
app_vertex, app_verts, clocks_initial,
clocks_waiting)
app_vertex.set_initial_offset(initial_offset)
for app_vertex in FecDataView.get_vertices_by_type(
TDMAAwareApplicationVertex):
initial_offset = __generate_initial_offset(
app_vertex, app_verts, clocks_initial,
clocks_waiting)
app_vertex.set_initial_offset(initial_offset)


def __auto_config_times(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,6 @@ def run(self, compress_as_much_as_possible=False):
"""
view = FecDataView()
app_id = FecDataView.get_app_id()
app_graph = FecDataView.get_runtime_graph()
routing_tables = FecDataView.get_uncompressed()
transceiver = FecDataView.get_transceiver()
if len(routing_tables.routing_tables) == 0:
Expand All @@ -150,7 +149,7 @@ def run(self, compress_as_much_as_possible=False):
text += " capped at {} retries".format(retry_count)
progress_bar = ProgressBar(
total_number_of_things_to_do=(
len(app_graph.vertices) +
FecDataView.get_n_vertices() +
(len(routing_tables.routing_tables) *
self.TIMES_CYCLED_ROUTING_TABLES)),
string_describing_what_being_progressed=text)
Expand Down Expand Up @@ -199,7 +198,7 @@ def run(self, compress_as_much_as_possible=False):
# start the host side compressions if needed
if len(on_host_chips) != 0:
most_costly_cores = defaultdict(lambda: defaultdict(int))
for partition in app_graph.outgoing_edge_partitions:
for partition in FecDataView.iterate_partitions():
for edge in partition.edges:
sttr = edge.pre_vertex.splitter
for vertex in sttr.get_source_specific_in_coming_vertices(
Expand Down Expand Up @@ -704,10 +703,9 @@ def _generate_addresses(self, progress_bar):
# data holders
region_addresses = defaultdict(list)
sdram_block_addresses_and_sizes = defaultdict(list)
app_graph = FecDataView.get_runtime_graph()

for app_vertex in progress_bar.over(
app_graph.vertices, finish_at_end=False):
FecDataView.iterate_vertices(), finish_at_end=False):
for m_vertex in app_vertex.machine_vertices:
if isinstance(
m_vertex, AbstractSupportsBitFieldRoutingCompression):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,16 +30,15 @@ def sdram_outgoing_partition_allocator():
transceiver = None
virtual_usage = defaultdict(int)

app_graph = FecDataView.get_runtime_graph()
progress_bar = ProgressBar(
total_number_of_things_to_do=len(app_graph.vertices),
total_number_of_things_to_do=FecDataView.get_n_vertices(),
string_describing_what_being_progressed=(
"Allocating SDRAM for SDRAM outgoing egde partitions"))

# Keep track of SDRAM tags used
next_tag = defaultdict(lambda: SDRAM_EDGE_BASE_TAG)

for vertex in app_graph.vertices:
for vertex in FecDataView.iterate_vertices():
sdram_partitions = vertex.splitter.get_internal_sdram_partitions()
for sdram_partition in sdram_partitions:

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,5 @@ def split_lpg_vertices(system_placements):
:param Placements system_placements:
exiting placements to be added to
"""
for vertex in FecDataView.get_runtime_graph().vertices:
if isinstance(vertex, LivePacketGather):
vertex.splitter.create_vertices(system_placements)
for vertex in FecDataView.get_vertices_by_type(LivePacketGather):
vertex.splitter.create_vertices(system_placements)
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def splitter_selector():

:rtype: None
"""
for app_vertex in PacmanDataView.get_runtime_graph().vertices:
for app_vertex in PacmanDataView.iterate_vertices():
if app_vertex.splitter is None:
vertex_selector(app_vertex)

Expand Down
Loading