• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

SpiNNakerManchester / SpiNNFrontEndCommon / 8906184429

01 May 2024 06:19AM UTC coverage: 47.241% (-0.06%) from 47.305%
8906184429

Pull #1181

github

Christian-B
add comment
Pull Request #1181: Compressor error

1761 of 4471 branches covered (39.39%)

Branch coverage included in aggregate %.

9 of 32 new or added lines in 4 files covered. (28.13%)

1 existing line in 1 file now uncovered.

5507 of 10914 relevant lines covered (50.46%)

0.5 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

19.02
/spinn_front_end_common/interface/abstract_spinnaker_base.py
1
# Copyright (c) 2016 The University of Manchester
2
#
3
# Licensed under the Apache License, Version 2.0 (the "License");
4
# you may not use this file except in compliance with the License.
5
# You may obtain a copy of the License at
6
#
7
#     https://www.apache.org/licenses/LICENSE-2.0
8
#
9
# Unless required by applicable law or agreed to in writing, software
10
# distributed under the License is distributed on an "AS IS" BASIS,
11
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
# See the License for the specific language governing permissions and
13
# limitations under the License.
14
"""
1✔
15
main interface for the SpiNNaker tools
16
"""
17
from __future__ import annotations
1✔
18
import logging
1✔
19
import math
1✔
20
import os
1✔
21
import re
1✔
22
import signal
1✔
23
import sys
1✔
24
import threading
1✔
25
import types
1✔
26
from threading import Condition
1✔
27
from typing import (
1✔
28
    Dict, Iterable, Optional, Sequence, Tuple, Type,
29
    TypeVar, Union, cast, final)
30

31
import ebrains_drive  # type: ignore[import]
1✔
32
from numpy import __version__ as numpy_version
1✔
33
import requests
1✔
34

35
from spinn_utilities import __version__ as spinn_utils_version
1✔
36
from spinn_utilities.config_holder import (
1✔
37
    get_config_bool, get_config_int, get_config_str, get_config_str_or_none,
38
    is_config_none, set_config)
39
from spinn_utilities.log import FormatAdapter
1✔
40
from spinn_utilities.typing.coords import XY
1✔
41

42
from spinn_machine import __version__ as spinn_machine_version
1✔
43
from spinn_machine import CoreSubsets
1✔
44

45
from spinnman import __version__ as spinnman_version
1✔
46
from spinnman.exceptions import SpiNNManCoresNotInStateException
1✔
47
from spinnman.model.cpu_infos import CPUInfos
1✔
48
from spinnman.model.enums import CPUState, ExecutableType
1✔
49

50
from spalloc_client import (  # type: ignore[import]
1✔
51
    __version__ as spalloc_version)
52

53
from pacman import __version__ as pacman_version
1✔
54
from pacman.exceptions import PacmanPlaceException
1✔
55
from pacman.model.graphs.application import ApplicationEdge, ApplicationVertex
1✔
56
from pacman.model.graphs import AbstractVirtual
1✔
57
from pacman.model.resources import AbstractSDRAM
1✔
58
from pacman.model.partitioner_splitters.splitter_reset import splitter_reset
1✔
59
from pacman.model.placements import Placements
1✔
60
from pacman.model.routing_tables import MulticastRoutingTables
1✔
61
from pacman.operations.fixed_route_router import fixed_route_router
1✔
62
from pacman.operations.partition_algorithms import splitter_partitioner
1✔
63
from pacman.operations.placer_algorithms import place_application_graph
1✔
64
from pacman.operations.router_algorithms import route_application_graph
1✔
65
from pacman.operations.router_compressors import (
1✔
66
    pair_compressor, range_compressor)
67
from pacman.operations.router_compressors.ordered_covering_router_compressor \
1✔
68
    import ordered_covering_compressor
69
from pacman.operations.routing_info_allocator_algorithms.\
1✔
70
    zoned_routing_info_allocator import (flexible_allocate, global_allocate)
71
from pacman.operations.routing_table_generators import (
1✔
72
    basic_routing_table_generator, merged_routing_table_generator)
73
from pacman.operations.tag_allocator_algorithms import basic_tag_allocator
1✔
74

75
from spinn_front_end_common import __version__ as fec_version
1✔
76
from spinn_front_end_common import common_model_binaries
1✔
77
from spinn_front_end_common.abstract_models import (
1✔
78
    AbstractVertexWithEdgeToDependentVertices,
79
    AbstractCanReset)
80
from spinn_front_end_common.abstract_models.impl import (
1✔
81
    MachineAllocationController)
82
from spinn_front_end_common.data.fec_data_view import FecDataView
1✔
83
from spinn_front_end_common.interface.buffer_management import BufferManager
1✔
84
from spinn_front_end_common.interface.buffer_management.storage_objects \
1✔
85
    import BufferDatabase
86
from spinn_front_end_common.interface.config_handler import ConfigHandler
1✔
87
from spinn_front_end_common.interface.interface_functions import (
1✔
88
    application_finisher, application_runner,
89
    chip_io_buf_clearer, chip_io_buf_extractor,
90
    chip_provenance_updater, chip_runtime_updater, compute_energy_used,
91
    create_notification_protocol, database_interface,
92
    reload_dsg_regions, energy_provenance_reporter,
93
    load_application_data_specs, load_system_data_specs,
94
    graph_binary_gatherer, graph_data_specification_writer,
95
    graph_provenance_gatherer,
96
    host_based_bit_field_router_compressor, hbp_allocator,
97
    insert_chip_power_monitors_to_graphs,
98
    insert_extra_monitor_vertices_to_graphs, split_lpg_vertices,
99
    load_app_images, load_fixed_routes, load_sys_images,
100
    locate_executable_start_type, machine_generator,
101
    placements_provenance_gatherer, profile_data_gatherer,
102
    read_routing_tables_from_machine, router_provenance_gatherer,
103
    routing_setup, routing_table_loader,
104
    sdram_outgoing_partition_allocator, spalloc_allocator,
105
    system_multicast_routing_generator,
106
    tags_loader, virtual_machine_generator, add_command_senders)
107
from spinn_front_end_common.interface.interface_functions.\
1✔
108
    host_no_bitfield_router_compression import (
109
        ordered_covering_compression, pair_compression)
110
from spinn_front_end_common.interface.provenance import (
1✔
111
    FecTimer, GlobalProvenance, TimerCategory, TimerWork)
112
from spinn_front_end_common.interface.splitter_selectors import (
1✔
113
    splitter_selector)
114
from spinn_front_end_common.interface.java_caller import JavaCaller
1✔
115
from spinn_front_end_common.utilities.exceptions import ConfigurationException
1✔
116
from spinn_front_end_common.utilities.report_functions import (
1✔
117
    bitfield_compressor_report, board_chip_report, EnergyReport,
118
    fixed_route_from_machine_report,
119
    generate_routing_compression_checker_report, memory_map_on_host_report,
120
    memory_map_on_host_chip_report, network_specification,
121
    tags_from_machine_report,
122
    write_json_machine, write_json_placements,
123
    write_json_routing_tables, drift_report)
124
from spinn_front_end_common.utilities.iobuf_extractor import IOBufExtractor
1✔
125
from spinn_front_end_common.utility_models import (
1✔
126
    DataSpeedUpPacketGatherMachineVertex)
127
from spinn_front_end_common.utilities.report_functions.reports import (
1✔
128
    generate_comparison_router_report, partitioner_report,
129
    placer_reports_with_application_graph,
130
    router_compressed_summary_report, routing_info_report,
131
    router_report_from_compressed_router_tables,
132
    router_report_from_paths,
133
    router_report_from_router_tables, router_summary_report,
134
    sdram_usage_report_per_chip,
135
    tag_allocator_report)
136
from spinn_front_end_common.data.fec_data_writer import FecDataWriter
1✔
137

138
try:
1✔
139
    from scipy import __version__ as scipy_version
1✔
140
except ImportError:
×
141
    scipy_version = "scipy not installed"
×
142

143
logger = FormatAdapter(logging.getLogger(__name__))
1✔
144
_T = TypeVar("_T")
1✔
145

146
SHARED_PATH = re.compile(r".*\/shared\/([^\/]+)")
1✔
147
SHARED_GROUP = 1
1✔
148
SHARED_WITH_PATH = re.compile(r".*\/Shared with (all|groups|me)\/([^\/]+)")
1✔
149
SHARED_WITH_GROUP = 2
1✔
150

151

152
class AbstractSpinnakerBase(ConfigHandler):
1✔
153
    """
154
    Main interface into the tools logic flow.
155
    """
156
    # pylint: disable=broad-except
157

158
    __slots__ = (
1✔
159
        # Condition object used for waiting for stop
160
        # Set during init and the used but never new object
161
        "_state_condition",
162

163
        # Set when run_until_complete is specified by the user
164
        "_run_until_complete",
165

166
        #
167
        "_raise_keyboard_interrupt",
168

169
        # original value which is used in exception handling and control c
170
        "__sys_excepthook",
171

172
        # All beyond this point new for no extractor
173
        # The data is not new but now it is held direct and not via inputs
174

175
        # Flag to say is compressed routing tables are on machine
176
        # TODO remove this when the data change only algorithms are done
177
        "_multicast_routes_loaded")
178

179
    def __init__(
1✔
180
            self, data_writer_cls: Optional[Type[FecDataWriter]] = None):
181
        """
182
        :param FecDataWriter data_writer_cls:
183
            The Global data writer class
184
        """
185
        # pylint: disable=too-many-arguments
186
        super().__init__(data_writer_cls)
1✔
187

188
        FecTimer.start_category(TimerCategory.WAITING)
1✔
189
        FecTimer.start_category(TimerCategory.SETTING_UP)
1✔
190

191
        # output locations of binaries to be searched for end user info
192
        logger.info(
1✔
193
            "Will search these locations for binaries: {}",
194
            self._data_writer.get_executable_finder().binary_paths)
195

196
        self._multicast_routes_loaded = False
1✔
197

198
        # holder for timing and running related values
199
        self._run_until_complete = False
1✔
200
        self._state_condition = Condition()
1✔
201

202
        # folders
203
        self._set_up_report_specifics()
1✔
204

205
        # Setup for signal handling
206
        self._raise_keyboard_interrupt = False
1✔
207

208
        self._create_version_provenance()
1✔
209

210
        self.__sys_excepthook = sys.excepthook
1✔
211

212
        FecTimer.setup(self)
1✔
213

214
        self._data_writer.register_binary_search_path(
1✔
215
            os.path.dirname(common_model_binaries.__file__))
216

217
        self._data_writer.set_machine_generator(self._get_machine)
1✔
218
        FecTimer.end_category(TimerCategory.SETTING_UP)
1✔
219

220
    def _hard_reset(self) -> None:
1✔
221
        """
222
        This clears all data that if no longer valid after a hard reset
223
        """
224
        if self._data_writer.has_transceiver():
×
225
            self._data_writer.get_transceiver().stop_application(
×
226
                self._data_writer.get_app_id())
227
        self.__close_allocation_controller()
×
228
        self._data_writer.hard_reset()
×
229
        self._multicast_routes_loaded = False
×
230

231
    def _machine_clear(self) -> None:
1✔
232
        pass
×
233

234
    def _setup_java_caller(self) -> None:
1✔
235
        if get_config_bool("Java", "use_java"):
×
236
            self._data_writer.set_java_caller(JavaCaller())
×
237

238
    def __signal_handler(self, _signal, _frame) -> None:
1✔
239
        """
240
        Handles closing down of script via keyboard interrupt
241

242
        :param _signal: the signal received (ignored)
243
        :param _frame: frame executed in (ignored)
244
        """
245
        # If we are to raise the keyboard interrupt, do so
246
        if self._raise_keyboard_interrupt:
×
247
            raise KeyboardInterrupt
×
248

249
        logger.error("User has cancelled simulation")
×
250
        self._shutdown()
×
251

252
    @property
1✔
253
    def __bearer_token(self) -> Optional[str]:
1✔
254
        """
255
        :return: The OIDC bearer token
256
        :rtype: str or None
257
        """
258
        # Try using Jupyter if we have the right variables
259
        jupyter_token = os.getenv("JUPYTERHUB_API_TOKEN")
×
260
        jupyter_ip = os.getenv("JUPYTERHUB_SERVICE_HOST")
×
261
        jupyter_port = os.getenv("JUPYTERHUB_SERVICE_PORT")
×
262
        if (jupyter_token is not None and jupyter_ip is not None and
×
263
                jupyter_port is not None):
264
            jupyter_url = (f"http://{jupyter_ip}:{jupyter_port}/services/"
×
265
                           "access-token-service/access-token")
266
            headers = {"Authorization": f"Token {jupyter_token}"}
×
267
            response = requests.get(jupyter_url, headers=headers, timeout=10)
×
268
            return response.json().get('access_token')
×
269

270
        # Try a simple environment variable, or None if that doesn't exist
271
        return os.getenv("OIDC_BEARER_TOKEN")
×
272

273
    @property
1✔
274
    def __group_collab_or_job(self) -> Dict[str, str]:
1✔
275
        """
276
        :return: The group, collab, or NMPI Job ID to associate with jobs
277
        :rtype: dict()
278
        """
279
        # Try to get a NMPI Job
280
        nmpi_job = os.getenv("NMPI_JOB_ID")
×
281
        if nmpi_job is not None and nmpi_job != "":
×
282
            nmpi_user = os.getenv("NMPI_USER")
×
283
            if nmpi_user is not None and nmpi_user != "":
×
284
                logger.info("Requesting job for NMPI job {}, user {}",
×
285
                            nmpi_job, nmpi_user)
286
                return {"nmpi_job": nmpi_job, "nmpi_user": nmpi_user}
×
287
            logger.info("Requesting spalloc job for NMPI job {}", nmpi_job)
×
288
            return {"nmpi_job": nmpi_job}
×
289

290
        # Try to get the collab from the path
291
        cwd = os.getcwd()
×
292
        match_obj = SHARED_PATH.match(cwd)
×
293
        if match_obj:
×
294
            collab = self.__get_collab_id_from_folder(
×
295
                match_obj.group(SHARED_GROUP))
296
            if collab is not None:
×
297
                return collab
×
298
        match_obj = SHARED_WITH_PATH.match(cwd)
×
299
        if match_obj:
×
300
            collab = self.__get_collab_id_from_folder(
×
301
                match_obj.group(SHARED_WITH_GROUP))
302
            if collab is not None:
×
303
                return collab
×
304

305
        # Try to use the config to get a group
306
        group = get_config_str_or_none("Machine", "spalloc_group")
×
307
        if group is not None:
×
308
            return {"group": group}
×
309

310
        # Nothing ventured, nothing gained
311
        return {}
×
312

313
    def __get_collab_id_from_folder(
1✔
314
            self, folder: str) -> Optional[Dict[str, str]]:
315
        """
316
        Currently hacky way to get the EBRAINS collab id from the
317
        drive folder, replicated from the NMPI collab template.
318
        """
319
        token = self.__bearer_token
×
320
        if token is None:
×
321
            return None
×
322
        ebrains_drive_client = ebrains_drive.connect(token=token)
×
323
        repo_by_title = ebrains_drive_client.repos.get_repos_by_name(folder)
×
324
        if len(repo_by_title) != 1:
×
325
            logger.warning(f"The repository for collab {folder} could not be"
×
326
                           " found; continuing as if not in a collaboratory")
327
            return {}
×
328
        # Owner is formatted as collab-<collab_id>-<permission>, and we want
329
        # to extract the <collab-id>
330
        owner = repo_by_title[0].owner
×
331
        collab_id = owner[:owner.rindex("-")]
×
332
        collab_id = collab_id[collab_id.find("-") + 1:]
×
333
        logger.info(f"Requesting job in collaboratory {collab_id}")
×
334
        return {"collab": collab_id}
×
335

336
    def exception_handler(
1✔
337
            self, exc_type: Type[BaseException], value: BaseException,
338
            traceback_obj: Optional[types.TracebackType]):
339
        """
340
        Handler of exceptions.
341

342
        :param type exc_type: the type of exception received
343
        :param Exception value: the value of the exception
344
        :param traceback traceback_obj: the trace back stuff
345
        """
346
        logger.error("Shutdown on exception")
×
347
        self._shutdown()
×
348
        return self.__sys_excepthook(exc_type, value, traceback_obj)
×
349

350
    def _should_run(self) -> bool:
1✔
351
        """
352
        Checks if the simulation should run.
353

354
        Will warn the user if there is no need to run
355

356
        :return: True if and only if one of the graphs has vertices in it
357
        :raises ConfigurationException: If the current state does not
358
            support a new run call
359
        """
360
        if self._data_writer.get_n_vertices() > 0:
×
361
            return True
×
362
        logger.warning(
×
363
            "Your graph has no vertices in it. "
364
            "Therefore the run call will exit immediately.")
365
        return False
×
366

367
    def run_until_complete(self, n_steps: Optional[int] = None):
1✔
368
        """
369
        Run a simulation until it completes.
370

371
        :param int n_steps:
372
            If not `None`, this specifies that the simulation should be
373
            requested to run for the given number of steps.  The host will
374
            still wait until the simulation itself says it has completed.
375
        """
376
        FecTimer.start_category(TimerCategory.RUN_OTHER)
×
377
        self._run_until_complete = True
×
378
        self._run(n_steps, sync_time=0.0)
×
379
        FecTimer.end_category(TimerCategory.RUN_OTHER)
×
380

381
    def run(self, run_time: Optional[float], sync_time: float = 0):
1✔
382
        """
383
        Run a simulation for a fixed amount of time.
384

385
        :param int run_time: the run duration in milliseconds.
386
        :param float sync_time:
387
            If not 0, this specifies that the simulation should pause after
388
            this duration.  The continue_simulation() method must then be
389
            called for the simulation to continue.
390
        """
391
        FecTimer.start_category(TimerCategory.RUN_OTHER)
×
392
        if self._run_until_complete:
×
393
            raise NotImplementedError("run after run_until_complete")
394
        self._run(run_time, sync_time)
×
395
        FecTimer.end_category(TimerCategory.RUN_OTHER)
×
396

397
    def __timesteps(self, time_in_ms: float) -> int:
1✔
398
        """
399
        Get a number of timesteps for a given time in milliseconds.
400

401
        :return: The number of timesteps
402
        :rtype: int
403
        """
404
        time_step_ms = self._data_writer.get_simulation_time_step_ms()
×
405
        n_time_steps = int(math.ceil(time_in_ms / time_step_ms))
×
406
        calc_time = n_time_steps * time_step_ms
×
407

408
        # Allow for minor float errors
409
        if abs(time_in_ms - calc_time) > 0.00001:
×
410
            logger.warning(
×
411
                "Time of {}ms "
412
                "is not a multiple of the machine time step of {}ms "
413
                "and has therefore been rounded up to {}ms",
414
                time_in_ms, time_step_ms, calc_time)
415
        return n_time_steps
×
416

417
    def _calc_run_time(self, run_time: Optional[float]) -> Union[
1✔
418
            Tuple[int, float], Tuple[None, None]]:
419
        """
420
        Calculates n_machine_time_steps and total_run_time based on run_time
421
        and machine_time_step.
422

423
        This method rounds the run up to the next timestep as discussed in
424
        https://github.com/SpiNNakerManchester/sPyNNaker/issues/149
425

426
        If run_time is `None` (run forever) both values will be `None`
427

428
        :param run_time: time user requested to run for in milliseconds
429
        :type run_time: float or None
430
        :return: n_machine_time_steps as a whole int and
431
            total_run_time in milliseconds
432
        :rtype: tuple(int,float) or tuple(None,None)
433
        """
434
        if run_time is None:
×
435
            return None, None
×
436
        n_machine_time_steps = self.__timesteps(run_time)
×
437
        total_run_timesteps = (
×
438
            (self._data_writer.get_current_run_timesteps() or 0) +
439
            n_machine_time_steps)
440
        total_run_time = (
×
441
            total_run_timesteps *
442
            self._data_writer.get_hardware_time_step_ms())
443

444
        logger.info(
×
445
            f"Simulating for {n_machine_time_steps} "
446
            f"{self._data_writer.get_simulation_time_step_ms()} ms timesteps "
447
            f"using a hardware timestep of "
448
            f"{self._data_writer.get_hardware_time_step_us()} us")
449
        return n_machine_time_steps, total_run_time
×
450

451
    def _run(self, run_time: Optional[float], sync_time: float):
1✔
452
        self._data_writer.start_run()
×
453

454
        try:
×
455
            self.__run(run_time, sync_time)
×
456
            self._data_writer.finish_run()
×
457
        except Exception:
×
458
            # if in debug mode, do not shut down machine
459
            if get_config_str("Mode", "mode") != "Debug":
×
460
                try:
×
461
                    self.stop()
×
462
                except Exception as stop_e:
×
463
                    logger.exception(f"Error {stop_e} when attempting to stop")
×
464
            self._data_writer.shut_down()
×
465
            raise
×
466

467
    @staticmethod
1✔
468
    def __is_main_thread() -> bool:
1✔
469
        """
470
        :return: Whether this is the main thread.
471
        :rtype: bool
472
        """
473
        return threading.get_ident() == threading.main_thread().ident
×
474

475
    def __run(self, run_time: Optional[float], sync_time: float):
1✔
476
        """
477
        The main internal run function.
478

479
        :param int run_time: the run duration in milliseconds.
480
        :param int sync_time:
481
            the time in milliseconds between synchronisations, or 0 to disable.
482
        """
483
        if not self._should_run():
×
484
            return
×
485

486
        # verify that we can keep doing auto pause and resume
487
        if self._data_writer.is_ran_ever():
×
488
            can_keep_running = all(
×
489
                executable_type.supports_auto_pause_and_resume
490
                for executable_type in
491
                self._data_writer.get_executable_types())
492
            if not can_keep_running:
×
493
                raise NotImplementedError(
494
                    "Only binaries that use the simulation interface can be"
495
                    " run more than once")
496

497
        self._adjust_config(run_time)
×
498

499
        # Install the Control-C handler
500
        if self.__is_main_thread():
×
501
            signal.signal(signal.SIGINT, self.__signal_handler)
×
502
            self._raise_keyboard_interrupt = True
×
503
            sys.excepthook = self.__sys_excepthook
×
504

505
        logger.info("Starting execution process")
×
506

507
        n_machine_time_steps, total_run_time = self._calc_run_time(run_time)
×
508
        if FecDataView.has_allocation_controller():
×
509
            FecDataView.get_allocation_controller().extend_allocation(
×
510
                total_run_time or 0.0)
511

512
        n_sync_steps = self.__timesteps(sync_time)
×
513

514
        # If we have never run before, or the graph has changed,
515
        # start by performing mapping
516
        if (self._data_writer.get_requires_mapping() and
×
517
                self._data_writer.is_ran_last()):
518
            self.stop()
×
519
            raise NotImplementedError(
520
                "The network cannot be changed between runs without"
521
                " resetting")
522

523
        # If we have reset and the graph has changed, stop any running
524
        # application
525
        if (self._data_writer.get_requires_data_generation() and
×
526
                self._data_writer.has_transceiver()):
527
            self._data_writer.get_transceiver().stop_application(
×
528
                self._data_writer.get_app_id())
529
            self._data_writer.reset_sync_signal()
×
530
        # build the graphs to modify with system requirements
531
        if self._data_writer.get_requires_mapping():
×
532
            if self._data_writer.is_soft_reset():
×
533
                # wipe out stuff associated with past mapping
534
                self._hard_reset()
×
535
            FecTimer.setup(self)
×
536

537
            self._add_dependent_verts_and_edges_for_application_graph()
×
538

539
            if get_config_bool("Buffers", "use_auto_pause_and_resume"):
×
540
                self._data_writer.set_plan_n_timesteps(get_config_int(
×
541
                    "Buffers", "minimum_auto_time_steps"))
542
            else:
543
                self._data_writer.set_plan_n_timesteps(n_machine_time_steps)
×
544

545
            self._do_mapping(total_run_time)
×
546

547
        if not self._data_writer.is_ran_last():
×
548
            self._do_write_metadata()
×
549

550
        # Check if anything has per-timestep SDRAM usage
551
        is_per_timestep_sdram = any(
×
552
            placement.vertex.sdram_required.per_timestep
553
            for placement in self._data_writer.iterate_placemements())
554

555
        # Disable auto pause and resume if the binary can't do it
556
        if not get_config_bool("Machine", "virtual_board"):
×
557
            for executable_type in self._data_writer.get_executable_types():
×
558
                if not executable_type.supports_auto_pause_and_resume:
×
559
                    set_config("Buffers", "use_auto_pause_and_resume", "False")
×
560
                    break
×
561

562
        # Work out the maximum run duration given all recordings
563
        if not self._data_writer.has_max_run_time_steps():
×
564
            self._data_writer.set_max_run_time_steps(
×
565
                self._deduce_data_n_timesteps())
566

567
        # Work out an array of timesteps to perform
568
        steps: Optional[Sequence[Optional[int]]] = None
×
569
        if (not get_config_bool("Buffers", "use_auto_pause_and_resume")
×
570
                or not is_per_timestep_sdram):
571
            # Runs should only be in units of max_run_time_steps at most
572
            if is_per_timestep_sdram and (
×
573
                    n_machine_time_steps is None
574
                    or (self._data_writer.get_max_run_time_steps()
575
                        < n_machine_time_steps)):
576
                raise ConfigurationException(
×
577
                    "The SDRAM required by one or more vertices is based on "
578
                    "the run time, so the run time is limited to "
579
                    f"{self._data_writer.get_max_run_time_steps()} time steps")
580

581
            steps = [n_machine_time_steps]
×
582
        elif run_time is not None:
×
583
            # With auto pause and resume, any time step is possible but run
584
            # time more than the first will guarantee that run will be called
585
            # more than once
586
            steps = self._generate_steps(n_machine_time_steps)
×
587

588
        # requires data_generation includes never run and requires_mapping
589
        if self._data_writer.get_requires_data_generation():
×
590
            self._do_load()
×
591

592
        # Run for each of the given steps
593
        if run_time is not None:
×
594
            assert steps is not None
×
595
            logger.info("Running for {} steps for a total of {}ms",
×
596
                        len(steps), run_time)
597
            for step in steps:
×
598
                run_step = self._data_writer.next_run_step()
×
599
                logger.info(f"Run {run_step} of {len(steps)}")
×
600
                self._do_run(step, n_sync_steps)
×
601
            self._data_writer.clear_run_steps()
×
602
        elif run_time is None and self._run_until_complete:
×
603
            logger.info("Running until complete")
×
604
            self._do_run(None, n_sync_steps)
×
605
        elif (not get_config_bool(
×
606
                "Buffers", "use_auto_pause_and_resume") or
607
                not is_per_timestep_sdram):
608
            logger.info("Running forever")
×
609
            self._do_run(None, n_sync_steps)
×
610
            logger.info("Waiting for stop request")
×
611
            with self._state_condition:
×
612
                while self._data_writer.is_no_stop_requested():
×
613
                    self._state_condition.wait()
×
614
        else:
615
            logger.info("Running forever in steps of {}ms",
×
616
                        self._data_writer.get_max_run_time_steps())
617
            while self._data_writer.is_no_stop_requested():
×
618
                logger.info(f"Run {self._data_writer.next_run_step()}")
×
619
                self._do_run(
×
620
                    self._data_writer.get_max_run_time_steps(), n_sync_steps)
621
            self._data_writer.clear_run_steps()
×
622

623
        # Indicate that the signal handler needs to act
624
        if self.__is_main_thread():
×
625
            self._raise_keyboard_interrupt = False
×
626
            sys.excepthook = self.exception_handler
×
627

628
    @final
1✔
629
    def _add_commands_to_command_sender(self, system_placements: Placements):
1✔
630
        """
631
        Runs, times and logs the VirtualMachineGenerator if required.
632

633
        May set then "machine" value
634
        """
635
        with FecTimer("Command Sender Adder", TimerWork.OTHER):
×
636
            all_command_senders = add_command_senders(system_placements)
×
637
            # add the edges from the command senders to the dependent vertices
638
            for command_sender in all_command_senders:
×
639
                self._data_writer.add_vertex(command_sender)
×
640
                edges, partition_ids = command_sender.edges_and_partitions()
×
641
                for edge, partition_id in zip(edges, partition_ids):
×
642
                    self._data_writer.add_edge(edge, partition_id)
×
643

644
    @final
1✔
645
    def _add_dependent_verts_and_edges_for_application_graph(self) -> None:
1✔
646
        # cache vertices to allow insertion during iteration
647
        vertices = list(self._data_writer.get_vertices_by_type(
×
648
                AbstractVertexWithEdgeToDependentVertices))
649
        for vertex in vertices:
×
650
            v = cast(ApplicationVertex, vertex)
×
651
            for dpt_vtx in vertex.dependent_vertices():
×
652
                if dpt_vtx.has_been_added_to_graph():
×
653
                    continue
×
654
                self._data_writer.add_vertex(dpt_vtx)
×
655
                edge_partition_ids = vertex.\
×
656
                    edge_partition_identifiers_for_dependent_vertex(dpt_vtx)
657
                for edge_identifier in edge_partition_ids:
×
658
                    self._data_writer.add_edge(
×
659
                        ApplicationEdge(v, dpt_vtx), edge_identifier)
660

661
    @final
1✔
662
    def _deduce_data_n_timesteps(self) -> int:
1✔
663
        """
664
        Operates the auto pause and resume functionality by figuring out
665
        how many timer ticks a simulation can run before SDRAM runs out,
666
        and breaks simulation into chunks of that long.
667

668
        :return: max time a simulation can run.
669
        :rtype: int
670
        """
671
        # Go through the placements and find how much SDRAM is used
672
        # on each chip
673
        usage_by_chip: Dict[XY, AbstractSDRAM] = dict()
×
674

675
        for place in self._data_writer.iterate_placemements():
×
676
            if isinstance(place.vertex, AbstractVirtual):
×
677
                continue
×
678

679
            sdram = place.vertex.sdram_required
×
680
            if (place.x, place.y) in usage_by_chip:
×
681
                usage_by_chip[place.x, place.y] += sdram
×
682
            else:
683
                usage_by_chip[place.x, place.y] = sdram
×
684

685
        # Go through the chips and divide up the remaining SDRAM, finding
686
        # the minimum number of machine timesteps to assign
687
        max_time_steps = sys.maxsize
×
688
        for (x, y), sdram in usage_by_chip.items():
×
689
            size = self._data_writer.get_chip_at(x, y).sdram
×
690
            if sdram.fixed > size:
×
691
                raise PacmanPlaceException(
×
692
                    f"Too much SDRAM has been allocated on chip {x}, {y}: "
693
                    f"{sdram.fixed} of {size}")
694
            if sdram.per_timestep:
×
695
                max_this_chip = int((size - sdram.fixed) // sdram.per_timestep)
×
696
                max_time_steps = min(max_time_steps, max_this_chip)
×
697

698
        return max_time_steps
×
699

700
    def _generate_steps(self, n_steps: Optional[int]) -> Sequence[int]:
1✔
701
        """
702
        Generates the list of "timer" runs. These are usually in terms of
703
        time steps, but need not be.
704

705
        :param int n_steps: the total runtime in machine time steps
706
        :return: list of time step lengths
707
        :rtype: list(int)
708
        """
709
        if n_steps is None or n_steps == 0:
×
710
            return [0]
×
711
        n_steps_per_segment = self._data_writer.get_max_run_time_steps()
×
712
        n_full_iterations = int(math.floor(n_steps / n_steps_per_segment))
×
713
        left_over_steps = n_steps - n_full_iterations * n_steps_per_segment
×
714
        steps = [int(n_steps_per_segment)] * n_full_iterations
×
715
        if left_over_steps:
×
716
            steps.append(int(left_over_steps))
×
717
        return steps
×
718

719
    def _execute_get_virtual_machine(self) -> None:
1✔
720
        """
721
        Runs, times and logs the VirtualMachineGenerator if required.
722

723
        May set then "machine" value
724
        """
725
        with FecTimer("Virtual machine generator", TimerWork.OTHER):
×
726
            self._data_writer.set_machine(virtual_machine_generator())
×
727
            self._data_writer.set_ipaddress("virtual")
×
728

729
    def _execute_allocator(self, total_run_time: Optional[float]) -> Optional[
1✔
730
            Tuple[str, int, Optional[str], bool, bool, Optional[Dict[XY, str]],
731
                  MachineAllocationController]]:
732
        """
733
        Runs, times and logs the SpallocAllocator or HBPAllocator if required.
734

735
        :param total_run_time: The total run time to request
736
        :type total_run_time: int or None
737
        :return: machine name, machine version, BMP details (if any),
738
            reset on startup flag, auto-detect BMP, SCAMP connection details,
739
            boot port, allocation controller
740
        :rtype: tuple(str, int, object, bool, bool, object, object,
741
            MachineAllocationController)
742
        """
743
        if self._data_writer.has_machine():
×
744
            return None
×
745
        if not is_config_none("Machine", "spalloc_server"):
×
746
            with FecTimer("SpallocAllocator", TimerWork.OTHER):
×
747
                return spalloc_allocator(
×
748
                    self.__bearer_token, **self.__group_collab_or_job)
749
        if not is_config_none("Machine", "remote_spinnaker_url"):
×
750
            with FecTimer("HBPAllocator", TimerWork.OTHER):
×
751
                # TODO: Would passing the bearer token to this ever make sense?
752
                return hbp_allocator(total_run_time)
×
753
        return None
×
754

755
    def _execute_machine_generator(self, allocator_data: Optional[Tuple[
1✔
756
            str, int, Optional[str], bool, bool, Optional[Dict[XY, str]],
757
            MachineAllocationController]]) -> None:
758
        """
759
        Runs, times and logs the MachineGenerator if required.
760

761
        May set the "machine" value if not already set
762

763
        :param allocator_data: `None` or
764
            (machine name, machine version, BMP details (if any),
765
            reset on startup flag, auto-detect BMP, SCAMP connection details,
766
            boot port, allocation controller)
767
        :type allocator_data: None or
768
            tuple(str, int, object, bool, bool, object, object,
769
            MachineAllocationController)
770
        """
771
        if self._data_writer.has_machine():
×
772
            return
×
773
        machine_name = get_config_str_or_none("Machine", "machine_name")
×
774
        if machine_name is not None:
×
775
            self._data_writer.set_ipaddress(machine_name)
×
776
            bmp_details = get_config_str_or_none("Machine", "bmp_names")
×
777
            auto_detect_bmp = get_config_bool("Machine", "auto_detect_bmp")
×
778
            scamp_connection_data = None
×
779
            reset_machine = get_config_bool(
×
780
                "Machine", "reset_machine_on_startup")
781
            board_version = FecDataView.get_machine_version().number
×
782

783
        elif allocator_data:
×
784
            (ipaddress, board_version, bmp_details,
×
785
             reset_machine, auto_detect_bmp, scamp_connection_data,
786
             machine_allocation_controller) = allocator_data
787
            self._data_writer.set_ipaddress(ipaddress)
×
788
            self._data_writer.set_allocation_controller(
×
789
                machine_allocation_controller)
790
        else:
791
            return
×
792

793
        with FecTimer("Machine generator", TimerWork.GET_MACHINE):
×
794
            machine, transceiver = machine_generator(
×
795
                bmp_details, board_version,
796
                auto_detect_bmp or False, scamp_connection_data,
797
                reset_machine or False)
798
            self._data_writer.set_transceiver(transceiver)
×
799
            self._data_writer.set_machine(machine)
×
800

801
    def _get_known_machine(self, total_run_time: float = 0.0):
1✔
802
        """
803
        The Python machine description object.
804

805
        :param float total_run_time: The total run time to request
806
        :rtype: ~spinn_machine.Machine
807
        """
808
        if not self._data_writer.has_machine():
×
809
            if get_config_bool("Machine", "virtual_board"):
×
810
                self._execute_get_virtual_machine()
×
811
            else:
812
                allocator_data = self._execute_allocator(total_run_time)
×
813
                self._execute_machine_generator(allocator_data)
×
814

815
    def _get_machine(self) -> None:
1✔
816
        """
817
        The factory method to get a machine.
818
        """
819
        FecTimer.start_category(TimerCategory.GET_MACHINE, True)
×
820
        if self._data_writer.is_user_mode() and \
×
821
                self._data_writer.is_soft_reset():
822
            # Make the reset hard
823
            logger.warning(
×
824
                "Calling Get machine after a reset force a hard reset and "
825
                "therefore generate a new machine")
826
            self._hard_reset()
×
827
        self._get_known_machine()
×
828
        if not self._data_writer.has_machine():
×
829
            raise ConfigurationException(
×
830
                "Not enough information provided to supply a machine")
831
        FecTimer.end_category(TimerCategory.GET_MACHINE)
×
832

833
    def _create_version_provenance(self) -> None:
1✔
834
        """
835
        Add the version information to the provenance data at the start.
836
        """
837
        with GlobalProvenance() as db:
1✔
838
            db.insert_version("spinn_utilities_version", spinn_utils_version)
1✔
839
            db.insert_version("spinn_machine_version", spinn_machine_version)
1✔
840
            db.insert_version("spalloc_version", spalloc_version)
1✔
841
            db.insert_version("spinnman_version", spinnman_version)
1✔
842
            db.insert_version("pacman_version", pacman_version)
1✔
843
            db.insert_version("front_end_common_version", fec_version)
1✔
844
            db.insert_version("numpy_version", numpy_version)
1✔
845
            db.insert_version("scipy_version", scipy_version)
1✔
846

847
    def _do_extra_mapping_algorithms(self) -> None:
1✔
848
        """
849
        Allows overriding classes to add algorithms.
850
        """
851

852
    def _json_machine(self) -> None:
1✔
853
        """
854
        Runs, times and logs WriteJsonMachine if required.
855
        """
856
        with FecTimer("Json machine", TimerWork.REPORT) as timer:
×
857
            if timer.skip_if_cfg_false("Reports", "write_json_machine"):
×
858
                return
×
859
            write_json_machine()
×
860

861
    def _report_network_specification(self) -> None:
1✔
862
        """
863
        Runs, times and logs the Network Specification report is requested.
864
        """
865
        with FecTimer(
×
866
                "Network Specification report", TimerWork.REPORT) as timer:
867
            if timer.skip_if_cfg_false(
×
868
                    "Reports", "write_network_specification_report"):
869
                return
×
870
            network_specification()
×
871

872
    def _execute_split_lpg_vertices(self, system_placements: Placements):
1✔
873
        """
874
        Runs, times and logs the SplitLPGVertices if required.
875
        """
876
        with FecTimer("Split Live Gather Vertices", TimerWork.OTHER):
×
877
            split_lpg_vertices(system_placements)
×
878

879
    def _report_board_chip(self) -> None:
1✔
880
        """
881
        Runs, times and logs the BoardChipReport is requested.
882
        """
883
        with FecTimer("Board chip report", TimerWork.REPORT) as timer:
×
884
            if timer.skip_if_cfg_false("Reports", "write_board_chip_report"):
×
885
                return
×
886
            board_chip_report()
×
887
            if FecDataView.has_allocation_controller():
×
888
                FecDataView.get_allocation_controller().make_report(
×
889
                    os.path.join(
890
                        FecDataView.get_run_dir_path(),
891
                        "machine_allocation.rpt"))
892

893
    def _execute_splitter_reset(self) -> None:
1✔
894
        """
895
        Runs, times and logs the splitter_reset.
896
        """
897
        with FecTimer("Splitter reset", TimerWork.OTHER):
×
898
            splitter_reset()
×
899

900
    # Overridden by sPyNNaker to choose an extended algorithm
901
    def _execute_splitter_selector(self) -> None:
1✔
902
        """
903
        Runs, times and logs the SplitterSelector.
904
        """
905
        with FecTimer("Splitter selector", TimerWork.OTHER):
×
906
            splitter_selector()
×
907

908
    def _execute_delay_support_adder(self) -> None:
1✔
909
        """
910
        Stub to allow sPyNNaker to add delay supports.
911
        """
912

913
    # Overridden by sPyNNaker to choose a different algorithm
914
    def _execute_splitter_partitioner(self) -> None:
1✔
915
        """
916
        Runs, times and logs the SplitterPartitioner if required.
917
        """
918
        if self._data_writer.get_n_vertices() == 0:
×
919
            return
×
920
        with FecTimer("Splitter partitioner", TimerWork.OTHER):
×
921
            self._data_writer.set_n_chips_in_graph(splitter_partitioner())
×
922

923
    def _execute_insert_chip_power_monitors(
1✔
924
            self, system_placements: Placements):
925
        """
926
        Run, time and log the InsertChipPowerMonitorsToGraphs if required.
927

928
        """
929
        with FecTimer("Insert chip power monitors", TimerWork.OTHER) as timer:
×
930
            if timer.skip_if_cfg_false("Reports", "write_energy_report"):
×
931
                return
×
932
            insert_chip_power_monitors_to_graphs(system_placements)
×
933

934
    @final
1✔
935
    def _execute_insert_extra_monitor_vertices(
1✔
936
            self, system_placements: Placements):
937
        """
938
        Run, time and log the InsertExtraMonitorVerticesToGraphs if required.
939
        """
940
        with FecTimer(
×
941
                "Insert extra monitor vertices", TimerWork.OTHER) as timer:
942
            if timer.skip_if_cfgs_false(
×
943
                    "Machine", "enable_advanced_monitor_support",
944
                    "enable_reinjection"):
945
                return
×
946
            # inserter checks for None app graph not an empty one
947
        gather_map, monitor_map = insert_extra_monitor_vertices_to_graphs(
×
948
            system_placements)
949
        self._data_writer.set_gatherer_map(gather_map)
×
950
        self._data_writer.set_monitor_map(monitor_map)
×
951

952
    def _report_partitioner(self) -> None:
1✔
953
        """
954
        Write, times and logs the partitioner_report if needed.
955
        """
956
        with FecTimer("Partitioner report", TimerWork.REPORT) as timer:
×
957
            if timer.skip_if_cfg_false("Reports", "write_partitioner_reports"):
×
958
                return
×
959
            partitioner_report()
×
960

961
    @property
1✔
962
    def get_number_of_available_cores_on_machine(self) -> int:
1✔
963
        """
964
        The number of available cores on the machine after taking
965
        into account preallocated resources.
966

967
        :return: number of available cores
968
        :rtype: int
969
        """
970
        machine = self._data_writer.get_machine()
×
971
        # get cores of machine
972
        cores = machine.total_available_user_cores
×
973
        ethernets = len(machine.ethernet_connected_chips)
×
974
        cores -= ((machine.n_chips - ethernets) *
×
975
                  self._data_writer.get_all_monitor_cores())
976
        cores -= ethernets * self._data_writer.get_ethernet_monitor_cores()
×
977
        return cores
×
978

979
    def _execute_application_placer(self, system_placements: Placements):
1✔
980
        """
981
        Runs, times and logs the Application Placer.
982

983
        Sets the "placements" data
984

985
        .. note::
986
            Calling of this method is based on the configuration placer value
987
        """
988
        with FecTimer("Application Placer", TimerWork.OTHER):
×
989
            self._data_writer.set_placements(place_application_graph(
×
990
                system_placements))
991

992
    def _do_placer(self, system_placements: Placements):
1✔
993
        """
994
        Runs, times and logs one of the placers.
995

996
        Sets the "placements" data
997

998
        Which placer is run depends on the configuration placer value
999

1000
        This method is the entry point for adding a new Placer
1001

1002
        :raise ConfigurationException:
1003
            if the configuration place value is unexpected
1004
        """
1005
        name = get_config_str("Mapping", "placer")
×
1006
        if name == "ApplicationPlacer":
×
1007
            return self._execute_application_placer(system_placements)
×
1008
        if "," in name:
×
1009
            raise ConfigurationException(
×
1010
                "Only a single algorithm is supported for placer")
1011
        raise ConfigurationException(
×
1012
            f"Unexpected cfg setting placer: {name}")
1013

1014
    def _do_write_metadata(self) -> None:
1✔
1015
        """
1016
        Do the various functions to write metadata to the SQLite files.
1017
        """
1018
        with FecTimer("Record vertex labels to database", TimerWork.REPORT):
×
1019
            with BufferDatabase() as db:
×
1020
                db.store_vertex_labels()
×
1021

1022
    @final
1✔
1023
    def _execute_system_multicast_routing_generator(self) -> None:
1✔
1024
        """
1025
        Runs, times and logs the SystemMulticastRoutingGenerator if required.
1026

1027
        May sets the data "data_in_multicast_routing_tables",
1028
        "data_in_multicast_key_to_chip_map" and
1029
        "system_multicast_router_timeout_keys"
1030
        """
1031
        with FecTimer("System multicast routing generator",
×
1032
                      TimerWork.OTHER) as timer:
1033
            if timer.skip_if_cfgs_false(
×
1034
                    "Machine", "enable_advanced_monitor_support",
1035
                    "enable_reinjection"):
1036
                return
×
1037
            data = system_multicast_routing_generator()
×
1038
            self._data_writer.set_system_multicast_routing_data(data)
×
1039

1040
    @final
1✔
1041
    def _execute_fixed_route_router(self) -> None:
1✔
1042
        """
1043
        Runs, times and logs the FixedRouteRouter if required.
1044

1045
        May set the "fixed_routes" data.
1046
        """
1047
        with FecTimer("Fixed route router", TimerWork.OTHER) as timer:
×
1048
            if timer.skip_if_cfg_false(
×
1049
                    "Machine", "enable_advanced_monitor_support"):
1050
                return
×
1051
            self._data_writer.set_fixed_routes(fixed_route_router(
×
1052
                DataSpeedUpPacketGatherMachineVertex))
1053

1054
    def _report_placements_with_application_graph(self) -> None:
1✔
1055
        """
1056
        Writes, times and logs the application graph placer report if
1057
        requested.
1058
        """
1059
        if self._data_writer.get_n_vertices() == 0:
×
1060
            return
×
1061
        with FecTimer("Placements with application graph report",
×
1062
                      TimerWork.REPORT) as timer:
1063
            if timer.skip_if_cfg_false(
×
1064
                    "Reports", "write_application_graph_placer_report"):
1065
                return
×
1066
            placer_reports_with_application_graph()
×
1067

1068
    def _json_placements(self) -> None:
1✔
1069
        """
1070
        Does, times and logs the writing of placements as JSON if requested.
1071
        """
1072
        with FecTimer("Json placements", TimerWork.REPORT) as timer:
×
1073
            if timer.skip_if_cfg_false("Reports", "write_json_placements"):
×
1074
                return
×
1075
            write_json_placements()
×
1076

1077
    @final
1✔
1078
    def _execute_application_router(self) -> None:
1✔
1079
        """
1080
        Runs, times and logs the ApplicationRouter.
1081

1082
        Sets the "routing_table_by_partition" data if called
1083

1084
        .. note::
1085
            Calling of this method is based on the configuration router value
1086
        """
1087
        with FecTimer("Application Router", TimerWork.RUNNING):
×
1088
            self._data_writer.set_routing_table_by_partition(
×
1089
                route_application_graph())
1090

1091
    @final
1✔
1092
    def _do_routing(self) -> None:
1✔
1093
        """
1094
        Runs, times and logs one of the routers.
1095

1096
        Sets the "routing_table_by_partition" data
1097

1098
        Which router is run depends on the configuration router value
1099

1100
        This method is the entry point for adding a new Router
1101

1102
        :raise ConfigurationException:
1103
            if the configuration router value is unexpected
1104
        """
1105
        name = get_config_str("Mapping", "router")
×
1106
        if name == "ApplicationRouter":
×
1107
            return self._execute_application_router()
×
1108
        if "," in name:
×
1109
            raise ConfigurationException(
×
1110
                "Only a single algorithm is supported for router")
1111
        raise ConfigurationException(
×
1112
            f"Unexpected cfg setting router: {name}")
1113

1114
    def _execute_basic_tag_allocator(self) -> None:
1✔
1115
        """
1116
        Runs, times and logs the Tag Allocator.
1117

1118
        Sets the "tag" data
1119
        """
1120
        with FecTimer("Basic tag allocator", TimerWork.OTHER):
×
1121
            self._data_writer.set_tags(basic_tag_allocator())
×
1122

1123
    def _report_tag_allocations(self) -> None:
1✔
1124
        """
1125
        Write, times and logs the tag allocator report if requested.
1126
        """
1127
        with FecTimer("Tag allocator report", TimerWork.REPORT) as timer:
×
1128
            if timer.skip_if_cfg_false(
×
1129
                    "Reports", "write_tag_allocation_reports"):
1130
                return
×
1131
            tag_allocator_report()
×
1132

1133
    @final
1✔
1134
    def _execute_global_allocate(
1✔
1135
            self, extra_allocations: Iterable[Tuple[ApplicationVertex, str]]):
1136
        """
1137
        Runs, times and logs the Global Zoned Routing Info Allocator.
1138

1139
        Sets "routing_info" is called
1140

1141
        .. note::
1142
            Calling of this method is based on the configuration
1143
            info_allocator value
1144
        """
1145
        with FecTimer("Global allocate", TimerWork.OTHER):
×
1146
            self._data_writer.set_routing_infos(
×
1147
                global_allocate(extra_allocations))
1148

1149
    @final
1✔
1150
    def _execute_flexible_allocate(
1✔
1151
            self, extra_allocations: Iterable[Tuple[ApplicationVertex, str]]):
1152
        """
1153
        Runs, times and logs the Zoned Routing Info Allocator.
1154

1155
        Sets "routing_info" is called
1156

1157
        .. note::
1158
            Calling of this method is based on the configuration
1159
            info_allocator value
1160
        """
1161
        with FecTimer("Zoned routing info allocator", TimerWork.OTHER):
×
1162
            self._data_writer.set_routing_infos(
×
1163
                flexible_allocate(extra_allocations))
1164

1165
    @final
1✔
1166
    def _do_info_allocator(self) -> None:
1✔
1167
        """
1168
        Runs, times and logs one of the info allocators.
1169

1170
        Sets the "routing_info" data
1171

1172
        Which allocator is run depends on the configuration info_allocator
1173
        value.
1174

1175
        This method is the entry point for adding a new Info Allocator
1176

1177
        :param list(tuple(ApplicationVertex,str)) extra_allocations:
1178
            Additional (vertex, partition identifier) pairs to allocate
1179
            keys to.  These might not appear in partitions in the graph
1180
            due to being added by the system.
1181
        :raise ConfigurationException:
1182
            if the configuration info_allocator value is unexpected
1183
        """
1184
        name = get_config_str("Mapping", "info_allocator")
×
1185
        if name == "GlobalZonedRoutingInfoAllocator":
×
1186
            return self._execute_global_allocate([])
×
1187
        if name == "ZonedRoutingInfoAllocator":
×
1188
            return self._execute_flexible_allocate([])
×
1189
        if "," in name:
×
1190
            raise ConfigurationException(
×
1191
                "Only a single algorithm is supported for info_allocator")
1192
        raise ConfigurationException(
×
1193
            f"Unexpected cfg setting info_allocator: {name}")
1194

1195
    def _report_router_info(self) -> None:
1✔
1196
        """
1197
        Writes, times and logs the router info report if requested.
1198
        """
1199
        with FecTimer("Router info report", TimerWork.REPORT) as timer:
×
1200
            if timer.skip_if_cfg_false("Reports", "write_router_info_report"):
×
1201
                return
×
1202
            routing_info_report([])
×
1203

1204
    @final
1✔
1205
    def _execute_basic_routing_table_generator(self) -> None:
1✔
1206
        """
1207
        Runs, times and logs the Routing Table Generator.
1208

1209
        .. note::
1210
            Currently no other Routing Table Generator supported.
1211
            To add an additional Generator copy the pattern of do_placer
1212
        """
1213
        with FecTimer("Basic routing table generator", TimerWork.OTHER):
×
1214
            self._data_writer.set_uncompressed(basic_routing_table_generator())
×
1215

1216
    @final
1✔
1217
    def _execute_merged_routing_table_generator(self) -> None:
1✔
1218
        """
1219
        Runs, times and logs the Routing Table Generator.
1220

1221
        .. note::
1222
            Currently no other Routing Table Generator supported.
1223
            To add an additional Generator copy the pattern of do_placer
1224
        """
1225
        with FecTimer("Merged routing table generator", TimerWork.OTHER):
×
1226
            self._data_writer.set_uncompressed(
×
1227
                merged_routing_table_generator())
1228

1229
        # TODO Nuke ZonedRoutingTableGenerator
1230

1231
    @final
1✔
1232
    def _do_routing_table_generator(self) -> None:
1✔
1233
        """
1234
        Runs, times and logs one of the routing table generators.
1235

1236
        Sets the "routing_info" data
1237

1238
        Which allocator is run depends on the configuration's
1239
        `routing_table_generator` value.
1240

1241
        This method is the entry point for adding a new routing table
1242
        generator.
1243

1244
        :raise ConfigurationException:
1245
            if the configuration's `routing_table_generator` value is
1246
            unexpected
1247
        """
1248
        name = get_config_str("Mapping", "routing_table_generator")
×
1249
        if name == "BasicRoutingTableGenerator":
×
1250
            return self._execute_basic_routing_table_generator()
×
1251
        if name == "MergedRoutingTableGenerator":
×
1252
            return self._execute_merged_routing_table_generator()
×
1253
        if "," in name:
×
1254
            raise ConfigurationException(
×
1255
                "Only a single algorithm is supported for"
1256
                " routing_table_generator")
1257
        raise ConfigurationException(
×
1258
            f"Unexpected cfg setting routing_table_generator: {name}")
1259

1260
    def _report_routers(self) -> None:
1✔
1261
        """
1262
        Write, times and logs the router report if requested.
1263
        """
1264
        with FecTimer("Router report", TimerWork.REPORT) as timer:
×
1265
            if timer.skip_if_cfg_false("Reports", "write_router_reports"):
×
1266
                return
×
1267
        router_report_from_paths()
×
1268

1269
    def _report_router_summary(self) -> None:
1✔
1270
        """
1271
        Write, times and logs the router summary report if requested.
1272
        """
1273
        with FecTimer("Router summary report", TimerWork.REPORT) as timer:
×
1274
            if timer.skip_if_cfg_false(
×
1275
                    "Reports", "write_router_summary_report"):
1276
                return
×
1277
            router_summary_report()
×
1278

1279
    def _json_routing_tables(self) -> None:
1✔
1280
        """
1281
        Write, time and log the routing tables as JSON if requested.
1282
        """
1283
        with FecTimer("Json routing tables", TimerWork.REPORT) as timer:
×
1284
            if timer.skip_if_cfg_false("Reports", "write_json_routing_tables"):
×
1285
                return
×
1286
            write_json_routing_tables(self._data_writer.get_uncompressed())
×
1287
            # Output ignored as never used
1288

1289
    def _report_drift(self, start: bool) -> None:
1✔
1290
        """
1291
        Write, time and log the inter-board timer drift.
1292

1293
        :param bool start: Is this the start or the end
1294
        """
1295
        with FecTimer("Drift report", TimerWork.REPORT) as timer:
×
1296
            if timer.skip_if_virtual_board():
×
1297
                return
×
1298
            if start and timer.skip_if_cfg_false(
×
1299
                    "Reports", "write_drift_report_start"):
1300
                return
×
1301
            if not start and timer.skip_if_cfg_false(
×
1302
                    "Reports", "write_drift_report_end"):
1303
                return
×
1304
            drift_report()
×
1305

1306
    @final
1✔
1307
    def _execute_locate_executable_start_type(self) -> None:
1✔
1308
        """
1309
        Runs, times and logs LocateExecutableStartType if required.
1310

1311
        May set the executable_types data.
1312
        """
1313
        with FecTimer("Locate executable start type", TimerWork.OTHER):
×
1314
            self._data_writer.set_executable_types(
×
1315
                locate_executable_start_type())
1316

1317
    @final
1✔
1318
    def _execute_buffer_manager_creator(self) -> None:
1✔
1319
        """
1320
        Run, times and logs the buffer manager creator if required.
1321

1322
        May set the buffer_manager data
1323
        """
1324
        if self._data_writer.has_buffer_manager():
×
1325
            return
×
1326
        with FecTimer("Buffer manager creator", TimerWork.OTHER) as timer:
×
1327
            if timer.skip_if_virtual_board():
×
1328
                return
×
1329

1330
            self._data_writer.set_buffer_manager(BufferManager())
×
1331

1332
    def _execute_sdram_outgoing_partition_allocator(self) -> None:
1✔
1333
        """
1334
        Runs, times and logs the SDRAMOutgoingPartitionAllocator.
1335
        """
1336
        with FecTimer("SDRAM outgoing partition allocator", TimerWork.OTHER):
×
1337
            sdram_outgoing_partition_allocator()
×
1338

1339
    def _execute_control_sync(self, do_sync: bool) -> None:
1✔
1340
        """
1341
        Control synchronisation on board.
1342

1343
        :param bool do_sync: Whether to enable synchronisation
1344
        """
1345
        with FecTimer("Control Sync", TimerWork.CONTROL) as timer:
×
1346
            if timer.skip_if_virtual_board():
×
1347
                return
×
1348
            self._data_writer.get_transceiver().control_sync(do_sync)
×
1349

1350
    def _do_mapping(self, total_run_time: Optional[float]) -> None:
1✔
1351
        """
1352
        Runs, times and logs all the algorithms in the mapping stage.
1353

1354
        :param float total_run_time:
1355
        """
1356
        FecTimer.start_category(TimerCategory.MAPPING)
×
1357

1358
        self._setup_java_caller()
×
1359
        self._do_extra_mapping_algorithms()
×
1360
        self._report_network_specification()
×
1361

1362
        self._execute_splitter_reset()
×
1363
        self._execute_splitter_selector()
×
1364
        self._execute_delay_support_adder()
×
1365

1366
        self._execute_splitter_partitioner()
×
1367
        allocator_data = self._execute_allocator(total_run_time)
×
1368
        self._execute_machine_generator(allocator_data)
×
1369
        self._json_machine()
×
1370
        self._report_board_chip()
×
1371

1372
        system_placements = Placements()
×
1373
        self._add_commands_to_command_sender(system_placements)
×
1374
        self._execute_split_lpg_vertices(system_placements)
×
1375
        self._execute_insert_chip_power_monitors(system_placements)
×
1376
        self._execute_insert_extra_monitor_vertices(system_placements)
×
1377

1378
        self._report_partitioner()
×
1379
        self._do_placer(system_placements)
×
1380
        self._report_placements_with_application_graph()
×
1381
        self._json_placements()
×
1382

1383
        self._execute_system_multicast_routing_generator()
×
1384
        self._execute_fixed_route_router()
×
1385
        self._do_routing()
×
1386

1387
        self._execute_basic_tag_allocator()
×
1388
        self._report_tag_allocations()
×
1389

1390
        self._do_info_allocator()
×
1391
        self._report_router_info()
×
1392
        self._do_routing_table_generator()
×
1393
        self._report_uncompressed_routing_table()
×
1394
        self._report_routers()
×
1395
        self._report_router_summary()
×
1396
        self._json_routing_tables()
×
1397
        self._execute_locate_executable_start_type()
×
1398
        self._execute_buffer_manager_creator()
×
1399

1400
        FecTimer.end_category(TimerCategory.MAPPING)
×
1401

1402
    # Overridden by spy which adds placement_order
1403
    def _execute_graph_data_specification_writer(self) -> None:
1✔
1404
        """
1405
        Runs, times, and logs the GraphDataSpecificationWriter.
1406

1407
        Creates and fills the data spec database
1408
        """
1409
        with FecTimer("Graph data specification writer", TimerWork.OTHER):
×
1410
            self._data_writer.set_ds_database_path(
×
1411
                graph_data_specification_writer())
1412

1413
    def _do_data_generation(self) -> None:
1✔
1414
        """
1415
        Runs, Times and logs the data generation.
1416
        """
1417
        self._execute_sdram_outgoing_partition_allocator()
×
1418
        self._execute_graph_data_specification_writer()
×
1419

1420
    def _execute_routing_setup(self) -> None:
1✔
1421
        """
1422
        Runs, times and logs the RoutingSetup if required.
1423
        """
1424
        if self._multicast_routes_loaded:
×
1425
            return
×
1426
        with FecTimer("Routing setup", TimerWork.LOADING) as timer:
×
1427
            if timer.skip_if_virtual_board():
×
1428
                return
×
1429
            # Only needs the x and y of chips with routing tables
1430
            routing_setup()
×
1431

1432
    def _execute_graph_binary_gatherer(self) -> None:
1✔
1433
        """
1434
        Runs, times and logs the GraphBinaryGatherer if required.
1435
        """
1436
        with FecTimer("Graph binary gatherer", TimerWork.OTHER) as timer:
×
1437
            try:
×
1438
                self._data_writer.set_executable_targets(
×
1439
                    graph_binary_gatherer())
1440
            except KeyError:
×
1441
                if get_config_bool("Machine", "virtual_board"):
×
1442
                    logger.warning(
×
1443
                        "Ignoring executable not found as using virtual")
1444
                    timer.error("executable not found and virtual board")
×
1445
                    return
×
1446
                raise
×
1447

1448
    @final
1✔
1449
    def _execute_host_bitfield_compressor(self) -> Optional[
1✔
1450
            MulticastRoutingTables]:
1451
        """
1452
        Runs, times and logs the HostBasedBitFieldRouterCompressor
1453

1454
        .. note::
1455
            Calling of this method is based on the configuration compressor or
1456
            virtual_compressor value
1457

1458
        :return: Compressed routing tables
1459
        :rtype: ~pacman.model.routing_tables.MulticastRoutingTables
1460
        """
1461
        with FecTimer("Host based bitfield router compressor",
×
1462
                      TimerWork.OTHER) as timer:
1463
            if timer.skip_if_virtual_board():
×
1464
                return None
×
1465
            self._multicast_routes_loaded = False
×
1466
            compressed = host_based_bit_field_router_compressor()
×
1467
            return compressed
×
1468

1469
    @final
1✔
1470
    def _execute_ordered_covering_compressor(self) -> MulticastRoutingTables:
1✔
1471
        """
1472
        Runs, times and logs the OrderedCoveringCompressor.
1473

1474
        .. note::
1475
            Calling of this method is based on the configuration compressor or
1476
            virtual_compressor value
1477

1478
        :return: Compressed routing tables
1479
        :rtype: ~pacman.model.routing_tables.MulticastRoutingTables
1480
        """
1481
        with FecTimer("Ordered covering compressor", TimerWork.OTHER) as timer:
×
1482
            self._multicast_routes_loaded = False
×
1483
            precompressed = self._data_writer.get_precompressed()
×
1484
            if self._compression_skipable(precompressed):
×
1485
                timer.skip("Tables already small enough")
×
1486
                return precompressed
×
1487
            return ordered_covering_compressor()
×
1488

1489
    @final
1✔
1490
    def _execute_ordered_covering_compression(self) -> Optional[
1✔
1491
            MulticastRoutingTables]:
1492
        """
1493
        Runs, times and logs the ordered covering compressor on machine.
1494

1495
        .. note::
1496
            Calling of this method is based on the configuration compressor or
1497
            virtual_compressor value
1498
        """
1499
        with FecTimer("Ordered covering compressor",
×
1500
                      TimerWork.COMPRESSING) as timer:
1501
            if timer.skip_if_virtual_board():
×
1502
                return None
×
1503
            precompressed = self._data_writer.get_precompressed()
×
1504
            if self._compression_skipable(precompressed):
×
1505
                timer.skip("Tables already small enough")
×
1506
                self._multicast_routes_loaded = False
×
1507
                return precompressed
×
1508
            ordered_covering_compression()
×
1509
            self._multicast_routes_loaded = True
×
1510
            return None
×
1511

1512
    @final
1✔
1513
    def _execute_pair_compressor(self) -> MulticastRoutingTables:
1✔
1514
        """
1515
        Runs, times and logs the PairCompressor.
1516

1517
        .. note::
1518
            Calling of this method is based on the configuration compressor or
1519
            virtual_compressor value
1520

1521
        :return: Compressed routing table
1522
        :rtype: ~pacman.model.routing_tables.MulticastRoutingTables
1523
        """
1524
        with FecTimer("Pair compressor", TimerWork.OTHER) as timer:
×
1525
            precompressed = self._data_writer.get_precompressed()
×
1526
            self._multicast_routes_loaded = False
×
1527
            if self._compression_skipable(precompressed):
×
1528
                timer.skip("Tables already small enough")
×
1529
                return precompressed
×
1530
            return pair_compressor()
×
1531

1532
    @final
1✔
1533
    def _execute_pair_compression(self) -> Optional[MulticastRoutingTables]:
1✔
1534
        """
1535
        Runs, times and logs the pair compressor on machine.
1536

1537
        .. note::
1538
            Calling of this method is based on the configuration compressor or
1539
            virtual_compressor value
1540
        """
1541
        with FecTimer("Pair on chip router compression",
×
1542
                      TimerWork.COMPRESSING) as timer:
1543
            if timer.skip_if_virtual_board():
×
1544
                return None
×
1545
            precompressed = self._data_writer.get_precompressed()
×
1546
            if self._compression_skipable(precompressed):
×
1547
                timer.skip("Tables already small enough")
×
1548
                self._multicast_routes_loaded = False
×
1549
                return precompressed
×
1550
            pair_compression()
×
1551
            self._multicast_routes_loaded = True
×
1552
            return None
×
1553

1554
    @final
1✔
1555
    def _execute_pair_unordered_compressor(self) -> MulticastRoutingTables:
1✔
1556
        """
1557
        Runs, times and logs the CheckedUnorderedPairCompressor.
1558

1559
        .. note::
1560
            Calling of this method is based on the configuration compressor or
1561
            virtual_compressor value
1562

1563
        :return: compressed routing tables
1564
        :rtype: ~pacman.model.routing_tables.MulticastRoutingTables
1565
        """
1566
        with FecTimer("Pair unordered compressor", TimerWork.OTHER) as timer:
×
1567
            self._multicast_routes_loaded = False
×
1568
            precompressed = self._data_writer.get_precompressed()
×
1569
            if self._compression_skipable(precompressed):
×
1570
                timer.skip("Tables already small enough")
×
1571
                return precompressed
×
1572
            return pair_compressor(ordered=False)
×
1573

1574
    def _compressor_name(self) -> Tuple[str, bool]:
1✔
1575
        if get_config_bool("Machine", "virtual_board"):
×
1576
            name = get_config_str_or_none("Mapping", "virtual_compressor")
×
1577
            if name is None:
×
1578
                logger.info("As no virtual_compressor specified "
×
1579
                            "using compressor setting")
1580
                name = get_config_str("Mapping", "compressor")
×
1581
        else:
1582
            name = get_config_str("Mapping", "compressor")
×
1583
        pre_compress = "BitField" not in name
×
1584
        return name, pre_compress
×
1585

1586
    def _compression_skipable(self, tables) -> bool:
1✔
1587
        if get_config_bool(
×
1588
                "Mapping", "router_table_compress_as_far_as_possible"):
1589
            return False
×
1590
        machine = self._data_writer.get_machine()
×
1591
        return (tables.get_max_number_of_entries()
×
1592
                <= machine.min_n_router_enteries)
1593

1594
    def _execute_pre_compression(self, pre_compress: bool):
1✔
1595
        name = get_config_str_or_none("Mapping", "precompressor")
×
1596
        if not pre_compress or name is None:
×
1597
            # Declare the precompressed data to be the uncompressed data
1598
            self._data_writer.set_precompressed(
×
1599
                self._data_writer.get_uncompressed())
1600
            return
×
1601
        elif name != "Ranged":
×
1602
            raise ConfigurationException(
×
1603
                f"Unexpected cfg setting precompressor: {name}")
1604

1605
        with FecTimer("Ranged Compressor", TimerWork.OTHER) as timer:
×
1606
            if self._compression_skipable(
×
1607
                    self._data_writer.get_uncompressed()):
1608
                timer.skip("Tables already small enough")
×
1609
                self._data_writer.set_precompressed(
×
1610
                    self._data_writer.get_uncompressed())
1611
                return
×
1612
            self._data_writer.set_precompressed(range_compressor())
×
1613

1614
    def _do_early_compression(self, name: str) -> Optional[
1✔
1615
            MulticastRoutingTables]:
1616
        """
1617
        Calls a compressor based on the name provided.
1618

1619
        .. note::
1620
            This method is the entry point for adding a new compressor that
1621
             can or must run early.
1622

1623
        :param str name: Name of a compressor
1624
        :return: CompressedRoutingTables (likely to be `None)`,
1625
            RouterCompressorProvenanceItems (may be an empty list)
1626
        :rtype: tuple(~pacman.model.routing_tables.MulticastRoutingTables or
1627
            None, list(ProvenanceDataItem))
1628
        :raise ConfigurationException: if the name is not expected
1629
        """
1630
        if name == "OrderedCoveringCompressor":
×
1631
            return self._execute_ordered_covering_compressor()
×
1632
        elif name == "OrderedCoveringOnChipRouterCompression":
×
1633
            return self._execute_ordered_covering_compression()
×
1634
        elif name == "PairCompressor":
×
1635
            return self._execute_pair_compressor()
×
1636
        elif name == "PairOnChipRouterCompression":
×
1637
            return self._execute_pair_compression()
×
1638
        elif name == "PairUnorderedCompressor":
×
1639
            return self._execute_pair_unordered_compressor()
×
1640

1641
        # delay compression until later
1642
        return None
×
1643

1644
    def _do_delayed_compression(
1✔
1645
            self, name: str,
1646
            compressed: Optional[MulticastRoutingTables]) -> Optional[
1647
                MulticastRoutingTables]:
1648
        """
1649
        Run compression that must be delayed until later.
1650

1651
        .. note::
1652
            This method is the entry point for adding a new compressor that
1653
            can not run at the normal place
1654

1655
        :param str name: Name of a compressor
1656
        :return: CompressedRoutingTables (likely to be `None`),
1657
            RouterCompressorProvenanceItems (may be an empty list)
1658
        :rtype: ~pacman.model.routing_tables.MulticastRoutingTables or None
1659
        :raise ConfigurationException: if the name is not expected
1660
        """
1661
        if self._multicast_routes_loaded or compressed:
×
1662
            # Already compressed
1663
            return compressed
×
1664
        # overridden in spy to handle:
1665
        # SpynnakerMachineBitFieldOrderedCoveringCompressor
1666
        # SpynnakerMachineBitFieldPairRouterCompressor
1667

1668
        if name == "HostBasedBitFieldRouterCompressor":
×
1669
            return self._execute_host_bitfield_compressor()
×
1670
        if "," in name:
×
1671
            raise ConfigurationException(
×
1672
                "Only a single algorithm is supported for compressor")
1673

1674
        raise ConfigurationException(
×
1675
            f"Unexpected cfg setting compressor: {name}")
1676

1677
    @final
1✔
1678
    def _execute_load_routing_tables(
1✔
1679
            self, compressed: Optional[MulticastRoutingTables]) -> None:
1680
        """
1681
        Runs, times and logs the RoutingTableLoader if required.
1682

1683
        :param compressed:
1684
        :type compressed: ~.MulticastRoutingTables or None
1685
        """
1686
        if not compressed:
×
1687
            return
×
1688
        with FecTimer("Routing table loader", TimerWork.LOADING) as timer:
×
1689
            self._multicast_routes_loaded = True
×
1690
            if timer.skip_if_virtual_board():
×
1691
                return
×
1692
            routing_table_loader(compressed)
×
1693

1694
    def _report_uncompressed_routing_table(self) -> None:
1✔
1695
        """
1696
        Runs, times and logs the router report from router tables if requested.
1697
        """
1698
        with FecTimer("Uncompressed routing table report",
×
1699
                      TimerWork.REPORT) as timer:
1700
            if timer.skip_if_cfg_false(
×
1701
                    "Reports", "write_uncompressed"):
1702
                return
×
1703
            router_report_from_router_tables()
×
1704

1705
    def _report_bit_field_compressor(self) -> None:
1✔
1706
        """
1707
        Runs, times and logs the BitFieldCompressorReport if requested.
1708
        """
1709
        with FecTimer("Bitfield compressor report", TimerWork.REPORT) as timer:
×
1710
            if timer.skip_if_cfg_false(
×
1711
                    "Reports",  "write_bit_field_compressor_report"):
1712
                return
×
1713
            # BitFieldSummary output ignored as never used
1714
            bitfield_compressor_report()
×
1715

1716
    def _execute_load_fixed_routes(self) -> None:
1✔
1717
        """
1718
        Runs, times and logs Load Fixed Routes if required.
1719
        """
1720
        with FecTimer("Load fixed routes", TimerWork.LOADING) as timer:
×
1721
            if timer.skip_if_cfg_false(
×
1722
                    "Machine", "enable_advanced_monitor_support"):
1723
                return
×
1724
            if timer.skip_if_virtual_board():
×
1725
                return
×
1726
            load_fixed_routes()
×
1727

1728
    def _execute_load_system_data_specification(self) -> None:
1✔
1729
        """
1730
        Runs, times and logs the load_system_data_specs if required.
1731
        """
1732
        with FecTimer(
×
1733
                "Load system data specification", TimerWork.OTHER) as timer:
1734
            if timer.skip_if_virtual_board():
×
1735
                return
×
1736
            load_system_data_specs()
×
1737

1738
    def _execute_load_system_executable_images(self) -> None:
1✔
1739
        """
1740
        Runs, times and logs the loading of executable images.
1741
        """
1742
        with FecTimer(
×
1743
                "Load executable system Images", TimerWork.LOADING) as timer:
1744
            if timer.skip_if_virtual_board():
×
1745
                return
×
1746
            load_sys_images()
×
1747

1748
    def _execute_load_application_data_specification(self) -> None:
1✔
1749
        """
1750
        Runs, times and logs :py:meth:`load_application_data_specs`
1751
        if required.
1752

1753
        :return: map of placement and DSG data, and loaded data flag.
1754
        :rtype: dict(tuple(int,int,int),DataWritten) or DsWriteInfo
1755
        """
1756
        with FecTimer("Load Application data specification",
×
1757
                      TimerWork.LOADING) as timer:
1758
            if timer.skip_if_virtual_board():
×
1759
                return
×
1760
            return load_application_data_specs()
×
1761

1762
    def _execute_tags_from_machine_report(self) -> None:
1✔
1763
        """
1764
        Run, times and logs the TagsFromMachineReport if requested.
1765
        """
1766
        with FecTimer(
×
1767
                "Tags from machine report", TimerWork.EXTRACTING) as timer:
1768
            if timer.skip_if_virtual_board():
×
1769
                return
×
1770
            if timer.skip_if_cfg_false(
×
1771
                    "Reports", "write_tag_allocation_reports"):
1772
                return
×
1773
            tags_from_machine_report()
×
1774

1775
    def _execute_load_tags(self) -> None:
1✔
1776
        """
1777
        Runs, times and logs the Tags Loader if required.
1778
        """
1779
        # TODO why: if graph_changed or data_changed:
1780
        with FecTimer("Tags Loader", TimerWork.LOADING) as timer:
×
1781
            if timer.skip_if_virtual_board():
×
1782
                return
×
1783
            tags_loader()
×
1784

1785
    def _do_extra_load_algorithms(self) -> None:
1✔
1786
        """
1787
        Runs, times and logs any extra load algorithms.
1788
        """
1789

1790
    def _report_memory_on_host(self) -> None:
1✔
1791
        """
1792
        Runs, times and logs MemoryMapOnHostReport if requested.
1793
        """
1794
        with FecTimer("Memory report", TimerWork.REPORT) as timer:
×
1795
            if timer.skip_if_virtual_board():
×
1796
                return
×
1797
            if timer.skip_if_cfg_false(
×
1798
                    "Reports", "write_memory_map_report"):
1799
                return
×
1800
            memory_map_on_host_report()
×
1801

1802
    def _report_memory_on_chip(self) -> None:
1✔
1803
        """
1804
        Runs, times and logs MemoryMapOnHostChipReport if requested.
1805
        """
1806
        with FecTimer("Memory report", TimerWork.REPORT) as timer:
×
1807
            if timer.skip_if_virtual_board():
×
1808
                return
×
1809
            if timer.skip_if_cfg_false(
×
1810
                    "Reports", "write_memory_map_report"):
1811
                return
×
1812
            memory_map_on_host_chip_report()
×
1813

1814
    # TODO consider different cfg flags
1815
    def _report_compressed(self, compressed: Optional[
1✔
1816
            MulticastRoutingTables]) -> None:
1817
        """
1818
        Runs, times and logs the compressor reports if requested.
1819

1820
        :param compressed:
1821
        :type compressed: ~.MulticastRoutingTables or None
1822
        """
1823
        with FecTimer("Compressor report", TimerWork.REPORT) as timer:
×
NEW
1824
            if timer.skip_all_cfgs_false(
×
1825
                    [("Reports", "write_compressed"),
1826
                     ("Reports", "write_compression_comparison"),
1827
                     ("Reports", "write_compression_summary"),
1828
                     ("Mapping", "run_compression_checker")],
1829
                    "No reports need compressed routing tables"):
UNCOV
1830
                return
×
1831

1832
            if compressed is None:
×
1833
                if timer.skip_if_virtual_board():
×
1834
                    return
×
1835
                compressed = read_routing_tables_from_machine()
×
1836

NEW
1837
            if get_config_bool("Reports", "write_compressed"):
×
NEW
1838
                router_report_from_compressed_router_tables(compressed)
×
NEW
1839
            if get_config_bool("Reports", "write_compression_comparison"):
×
NEW
1840
                generate_comparison_router_report(compressed)
×
NEW
1841
            if get_config_bool("Reports", "write_compression_summary"):
×
NEW
1842
                router_compressed_summary_report(compressed)
×
NEW
1843
            if get_config_bool("Mapping", "run_compression_checker"):
×
NEW
1844
                routing_tables = self._data_writer.get_uncompressed()
×
NEW
1845
                generate_routing_compression_checker_report(
×
1846
                    routing_tables, compressed)
1847

1848
    def _report_fixed_routes(self) -> None:
1✔
1849
        """
1850
        Runs, times and logs the FixedRouteFromMachineReport if requested.
1851
        """
1852
        with FecTimer("Fixed route report", TimerWork.REPORT) as timer:
×
1853
            if timer.skip_if_virtual_board():
×
1854
                return
×
1855
            if timer.skip_if_cfg_false(
×
1856
                    "Machine", "enable_advanced_monitor_support"):
1857
                return
×
1858
            # TODO at the same time as LoadFixedRoutes?
1859
            fixed_route_from_machine_report()
×
1860

1861
    def _execute_application_load_executables(self) -> None:
1✔
1862
        """
1863
        Algorithms needed for loading the binaries to the SpiNNaker machine.
1864
        """
1865
        with FecTimer("Load executable app images",
×
1866
                      TimerWork.LOADING) as timer:
1867
            if timer.skip_if_virtual_board():
×
1868
                return
×
1869
            load_app_images()
×
1870

1871
    def _do_load(self) -> None:
1✔
1872
        """
1873
        Runs, times and logs the load algorithms.
1874
        """
1875
        FecTimer.start_category(TimerCategory.LOADING)
×
1876

1877
        if self._data_writer.get_requires_mapping():
×
1878
            self._execute_routing_setup()
×
1879
            self._execute_graph_binary_gatherer()
×
1880
        # loading_algorithms
1881
        compressor, pre_compress = self._compressor_name()
×
1882
        self._execute_pre_compression(pre_compress)
×
1883
        compressed = self._do_early_compression(compressor)
×
1884

1885
        self._do_data_generation()
×
1886

1887
        self._execute_control_sync(False)
×
1888
        if self._data_writer.get_requires_mapping():
×
1889
            self._execute_load_fixed_routes()
×
1890
        self._execute_load_system_data_specification()
×
1891
        self._execute_load_system_executable_images()
×
1892
        self._execute_load_tags()
×
1893
        self._execute_load_application_data_specification()
×
1894

1895
        self._do_extra_load_algorithms()
×
1896
        compressed = self._do_delayed_compression(compressor, compressed)
×
1897
        self._execute_load_routing_tables(compressed)
×
1898
        self._report_bit_field_compressor()
×
1899

1900
        # TODO Was master correct to run the report first?
1901
        self._execute_tags_from_machine_report()
×
1902
        if self._data_writer.get_requires_mapping():
×
1903
            self._report_memory_on_host()
×
1904
            self._report_memory_on_chip()
×
1905
            self._report_compressed(compressed)
×
1906
            self._report_fixed_routes()
×
1907
        self._execute_application_load_executables()
×
1908

1909
        FecTimer.end_category(TimerCategory.LOADING)
×
1910

1911
    def _report_sdram_usage_per_chip(self) -> None:
1✔
1912
        # TODO why in do run
1913
        with FecTimer("Sdram usage per chip report",
×
1914
                      TimerWork.REPORT) as timer:
1915
            if timer.skip_if_cfg_false(
×
1916
                    "Reports", "write_sdram_usage_report_per_chip"):
1917
                return
×
1918
            sdram_usage_report_per_chip()
×
1919

1920
    def _execute_dsg_region_reloader(self) -> None:
1✔
1921
        """
1922
        Runs, times and logs the DSGRegionReloader if required.
1923

1924
        Reload any parameters over the loaded data if we have already
1925
        run and not using a virtual board and the data hasn't already
1926
        been regenerated
1927
        """
1928
        if not self._data_writer.is_ran_ever():
×
1929
            return
×
1930
        if self._data_writer.is_hard_reset():
×
1931
            return
×
1932
        with FecTimer("DSG region reloader", TimerWork.LOADING) as timer:
×
1933
            if timer.skip_if_virtual_board():
×
1934
                return
×
1935
            reload_dsg_regions()
×
1936

1937
    def _execute_graph_provenance_gatherer(self) -> None:
1✔
1938
        """
1939
        Runs, times and log the GraphProvenanceGatherer if requested.
1940
        """
1941
        with FecTimer("Graph provenance gatherer", TimerWork.OTHER) as timer:
×
1942
            if timer.skip_if_cfg_false("Reports",
×
1943
                                       "read_graph_provenance_data"):
1944
                return
×
1945
            graph_provenance_gatherer()
×
1946

1947
    def _execute_placements_provenance_gatherer(self) -> None:
1✔
1948
        """
1949
        Runs, times and log the PlacementsProvenanceGatherer if requested.
1950
        """
1951
        with FecTimer(
×
1952
                "Placements provenance gatherer", TimerWork.OTHER) as timer:
1953
            if timer.skip_if_cfg_false("Reports",
×
1954
                                       "read_placements_provenance_data"):
1955
                return
×
1956
            if timer.skip_if_virtual_board():
×
1957
                return
×
1958
            # Also used in recover from error where is is not all placements
1959
            placements_provenance_gatherer(
×
1960
                self._data_writer.get_n_placements(),
1961
                self._data_writer.iterate_placemements())
1962

1963
    def _execute_router_provenance_gatherer(self) -> None:
1✔
1964
        """
1965
        Runs, times and log the RouterProvenanceGatherer if requested.
1966
        """
1967
        with FecTimer(
×
1968
                "Router provenance gatherer", TimerWork.EXTRACTING) as timer:
1969
            if timer.skip_if_cfg_false("Reports",
×
1970
                                       "read_router_provenance_data"):
1971
                return
×
1972
            if timer.skip_if_virtual_board():
×
1973
                return
×
1974
            router_provenance_gatherer()
×
1975

1976
    def _execute_profile_data_gatherer(self) -> None:
1✔
1977
        """
1978
        Runs, times and logs the ProfileDataGatherer if requested.
1979
        """
1980
        with FecTimer("Profile data gatherer", TimerWork.EXTRACTING) as timer:
×
1981
            if timer.skip_if_cfg_false("Reports", "read_profile_data"):
×
1982
                return
×
1983
            if timer.skip_if_virtual_board():
×
1984
                return
×
1985
            profile_data_gatherer()
×
1986

1987
    def _do_read_provenance(self) -> None:
1✔
1988
        """
1989
        Runs, times and log the methods that gather provenance.
1990

1991
        :rtype: list(ProvenanceDataItem)
1992
        """
1993
        self._execute_graph_provenance_gatherer()
×
1994
        self._execute_placements_provenance_gatherer()
×
1995
        self._execute_router_provenance_gatherer()
×
1996
        self._execute_profile_data_gatherer()
×
1997

1998
    def _report_energy(self) -> None:
1✔
1999
        """
2000
        Runs, times and logs the energy report if requested.
2001
        """
2002
        with FecTimer("Energy report", TimerWork.REPORT) as timer:
×
2003
            if timer.skip_if_cfg_false("Reports", "write_energy_report"):
×
2004
                return
×
2005
            if timer.skip_if_virtual_board():
×
2006
                return
×
2007

2008
            # TODO runtime is None
2009
            power_used = compute_energy_used()
×
2010

2011
            energy_provenance_reporter(power_used)
×
2012

2013
            # create energy reporter
2014
            energy_reporter = EnergyReport()
×
2015
            # run energy report
2016
            energy_reporter.write_energy_report(power_used)
×
2017

2018
    def _do_provenance_reports(self) -> None:
1✔
2019
        """
2020
        Runs any reports based on provenance.
2021
        """
2022

2023
    def _execute_clear_io_buf(self) -> None:
1✔
2024
        """
2025
        Runs, times and logs the ChipIOBufClearer if required.
2026
        """
2027
        if self._data_writer.get_current_run_timesteps() is None:
×
2028
            return
×
2029
        with FecTimer("Clear IO buffer", TimerWork.CONTROL) as timer:
×
2030
            if timer.skip_if_virtual_board():
×
2031
                return
×
2032
            # TODO Why check empty_graph is always false??
2033
            if timer.skip_if_cfg_false("Reports", "clear_iobuf_during_run"):
×
2034
                return
×
2035
            chip_io_buf_clearer()
×
2036

2037
    def _execute_runtime_update(self, n_sync_steps: int) -> None:
1✔
2038
        """
2039
        Runs, times and logs the runtime updater if required.
2040

2041
        :param int n_sync_steps:
2042
            The number of timesteps between synchronisations
2043
        """
2044
        with FecTimer("Runtime Update", TimerWork.LOADING) as timer:
×
2045
            if timer.skip_if_virtual_board():
×
2046
                return
×
2047
            if (ExecutableType.USES_SIMULATION_INTERFACE in
×
2048
                    self._data_writer.get_executable_types()):
2049
                chip_runtime_updater(n_sync_steps)
×
2050
            else:
2051
                timer.skip("No Simulation Interface used")
×
2052

2053
    def _execute_create_database_interface(
1✔
2054
            self, run_time: Optional[float]) -> None:
2055
        """
2056
        Runs, times and logs Database Interface Creator.
2057

2058
        Sets the _database_file_path data object
2059

2060
        :param int run_time: the run duration in milliseconds.
2061
        """
2062
        with FecTimer("Create database interface", TimerWork.OTHER):
×
2063
            # Used to used compressed routing tables if available on host
2064
            # TODO consider not saving router tables.
2065
            self._data_writer.set_database_file_path(
×
2066
                database_interface(run_time))
2067

2068
    def _execute_create_notifiaction_protocol(self) -> None:
1✔
2069
        """
2070
        Runs, times and logs the creation of the Notification Protocol.
2071

2072
        Sets the notification_interface data object
2073
        """
2074
        with FecTimer("Create notification protocol", TimerWork.OTHER):
×
2075
            self._data_writer.set_notification_protocol(
×
2076
                create_notification_protocol())
2077

2078
    def _execute_runner(
1✔
2079
            self, n_sync_steps: int, run_time: Optional[float]) -> None:
2080
        """
2081
        Runs, times and logs the ApplicationRunner.
2082

2083
        :param int n_sync_steps:
2084
            The number of timesteps between synchronisations
2085
        :param int run_time: the run duration in milliseconds.
2086
        """
2087
        with FecTimer(FecTimer.APPLICATION_RUNNER, TimerWork.RUNNING) as timer:
×
2088
            if timer.skip_if_virtual_board():
×
2089
                return
×
2090
            # Don't timeout if a stepped mode is in operation
2091
            if n_sync_steps:
×
2092
                time_threshold = None
×
2093
            else:
2094
                time_threshold = get_config_int(
×
2095
                    "Machine", "post_simulation_overrun_before_error")
2096
            application_runner(
×
2097
                run_time, time_threshold, self._run_until_complete)
2098

2099
    def _execute_extract_iobuff(self) -> None:
1✔
2100
        """
2101
        Runs, times and logs the ChipIOBufExtractor if required.
2102
        """
2103
        with FecTimer("Extract IO buff", TimerWork.EXTRACTING) as timer:
×
2104
            if timer.skip_if_virtual_board():
×
2105
                return
×
2106
            if timer.skip_if_cfg_false("Reports", "extract_iobuf"):
×
2107
                return
×
2108
            # ErrorMessages, WarnMessages output ignored as never used!
2109
            chip_io_buf_extractor()
×
2110

2111
    def _execute_buffer_extractor(self) -> None:
1✔
2112
        """
2113
        Runs, times and logs the BufferExtractor if required.
2114
        """
2115
        with FecTimer("Buffer extractor", TimerWork.EXTRACT_DATA) as timer:
×
2116
            if timer.skip_if_virtual_board():
×
2117
                return
×
2118
            bm = self._data_writer.get_buffer_manager()
×
2119
            bm.get_placement_data()
×
2120

2121
    def _do_extract_from_machine(self) -> None:
1✔
2122
        """
2123
        Runs, times and logs the steps to extract data from the machine.
2124

2125
        :param run_time: the run duration in milliseconds.
2126
        :type run_time: int or None
2127
        """
2128
        self._execute_extract_iobuff()
×
2129
        self._execute_buffer_extractor()
×
2130
        self._execute_clear_io_buf()
×
2131

2132
        # FinaliseTimingData never needed as just pushed self._ to inputs
2133
        self._do_read_provenance()
×
2134
        self._report_energy()
×
2135
        self._do_provenance_reports()
×
2136

2137
    def __do_run(
1✔
2138
            self, n_machine_time_steps: Optional[int],
2139
            n_sync_steps: int) -> None:
2140
        """
2141
        Runs, times and logs the do run steps.
2142

2143
        :param n_machine_time_steps: Number of timesteps run
2144
        :type n_machine_time_steps: int or None
2145
        :param int n_sync_steps:
2146
            The number of timesteps between synchronisations
2147
        """
2148
        # TODO virtual board
2149
        FecTimer.start_category(TimerCategory.RUN_LOOP)
×
2150
        run_time = None
×
2151
        if n_machine_time_steps is not None:
×
2152
            run_time = (n_machine_time_steps *
×
2153
                        self._data_writer.get_simulation_time_step_ms())
2154
        self._data_writer.increment_current_run_timesteps(
×
2155
            n_machine_time_steps)
2156

2157
        self._report_sdram_usage_per_chip()
×
2158
        self._report_drift(start=True)
×
2159
        if self._data_writer.get_requires_mapping():
×
2160
            self._execute_create_database_interface(run_time)
×
2161
        self._execute_create_notifiaction_protocol()
×
2162
        if (self._data_writer.is_ran_ever() and
×
2163
                not self._data_writer.get_requires_mapping() and
2164
                not self._data_writer.get_requires_data_generation()):
2165
            self._execute_dsg_region_reloader()
×
2166
        self._execute_runtime_update(n_sync_steps)
×
2167
        self._execute_runner(n_sync_steps, run_time)
×
2168
        if n_machine_time_steps is not None or self._run_until_complete:
×
2169
            self._do_extract_from_machine()
×
2170
        # reset at the end of each do_run cycle
2171
        self._report_drift(start=False)
×
2172
        self._execute_control_sync(True)
×
2173
        FecTimer.end_category(TimerCategory.RUN_LOOP)
×
2174

2175
    def _do_run(
1✔
2176
            self, n_machine_time_steps: Optional[int],
2177
            n_sync_steps: int) -> None:
2178
        """
2179
        Runs, times and logs the do run steps.
2180

2181
        :param n_machine_time_steps: Number of timesteps run
2182
        :type n_machine_time_steps: int or None
2183
        :param int n_sync_steps:
2184
            The number of timesteps between synchronisations
2185
        """
2186
        try:
×
2187
            self.__do_run(n_machine_time_steps, n_sync_steps)
×
2188
        except KeyboardInterrupt:
×
2189
            logger.error("User has aborted the simulation")
×
2190
            self._shutdown()
×
2191
            sys.exit(1)
×
2192
        except Exception as run_e:
×
2193
            self._recover_from_error(run_e)
×
2194

2195
            # re-raise exception
2196
            raise run_e
×
2197

2198
    def _recover_from_error(self, exception: Exception) -> None:
1✔
2199
        """
2200
        :param Exception exception:
2201
        """
2202
        try:
×
2203
            self.__recover_from_error(exception)
×
2204
        except Exception as rec_e:
×
2205
            logger.exception(
×
2206
                f"Error {rec_e} when attempting to recover from error")
2207

2208
    def __recover_from_error(self, exception: Exception) -> None:
1✔
2209
        """
2210
        :param Exception exception:
2211
        """
2212
        # if exception has an exception, print to system
2213
        logger.error("An error has occurred during simulation")
×
2214
        # Print the detail including the traceback
2215
        logger.error(exception)
×
2216

2217
        logger.info("\n\nAttempting to extract data\n\n")
×
2218

2219
        # Extract router provenance
2220
        try:
×
2221
            router_provenance_gatherer()
×
2222
        except Exception:
×
2223
            logger.exception("Error reading router provenance")
×
2224

2225
        # Find the cores that are not in an expected state
2226
        unsuccessful_cores = CPUInfos()
×
2227
        if isinstance(exception, SpiNNManCoresNotInStateException):
×
2228
            unsuccessful_cores = exception.failed_core_states()
×
2229

2230
        # If there are no cores in a bad state, find those not yet in
2231
        # their finished state
2232
        transceiver = self._data_writer.get_transceiver()
×
2233
        if not unsuccessful_cores:
×
2234
            for executable_type, core_subsets in \
×
2235
                    self._data_writer.get_executable_types().items():
2236
                unsuccessful_cores = transceiver.get_cpu_infos(
×
2237
                    core_subsets, executable_type.end_state, False)
2238

2239
        # Print the details of error cores
2240
        logger.error(unsuccessful_cores.get_status_string())
×
2241

2242
        # Find the cores that are not in RTE i.e. that can still be read
2243
        non_rte_cores = unsuccessful_cores.infos_not_in_states(
×
2244
            [CPUState.RUN_TIME_EXCEPTION, CPUState.WATCHDOG])
2245

2246
        # If there are any cores that are not in RTE, extract data from them
2247
        if (non_rte_cores and
×
2248
                ExecutableType.USES_SIMULATION_INTERFACE in
2249
                self._data_writer.get_executable_types()):
2250
            non_rte_core_subsets = CoreSubsets()
×
2251
            for (x, y, p) in non_rte_cores:
×
2252
                non_rte_core_subsets.add_processor(x, y, p)
×
2253

2254
            # Attempt to force the cores to write provenance and exit
2255
            try:
×
2256
                chip_provenance_updater(non_rte_core_subsets)
×
2257
            except Exception:
×
2258
                logger.exception("Could not update provenance on chip")
×
2259

2260
            # Extract any written provenance data
2261
            try:
×
2262
                transceiver = self._data_writer.get_transceiver()
×
2263
                finished_cores = transceiver.get_cpu_infos(
×
2264
                    non_rte_core_subsets, CPUState.FINISHED, True)
2265
                finished_placements = Placements()
×
2266
                for (x, y, p) in finished_cores:
×
2267
                    try:
×
2268
                        placement = self._data_writer.\
×
2269
                            get_placement_on_processor(x, y, p)
2270
                        finished_placements.add_placement(placement)
×
2271
                    except Exception:   # pylint: disable=broad-except
×
2272
                        pass  # already recovering from error
×
2273
                placements_provenance_gatherer(
×
2274
                    finished_placements.n_placements,
2275
                    finished_placements.placements)
2276
            except Exception as pro_e:
×
2277
                logger.exception(f"Could not read provenance due to {pro_e}")
×
2278

2279
        # Read IOBUF where possible (that should be everywhere)
2280
        iobuf = IOBufExtractor()
×
2281
        try:
×
2282
            errors, warnings = iobuf.extract_iobuf()
×
2283
        except Exception:
×
2284
            logger.exception("Could not get iobuf")
×
2285
            errors, warnings = [], []
×
2286

2287
        # Print the IOBUFs
2288
        self._print_iobuf(errors, warnings)
×
2289

2290
    @staticmethod
1✔
2291
    def _print_iobuf(errors: Iterable[str], warnings: Iterable[str]):
1✔
2292
        """
2293
        :param list(str) errors:
2294
        :param list(str) warnings:
2295
        """
2296
        for warning in warnings:
×
2297
            logger.warning(warning)
×
2298
        for error in errors:
×
2299
            logger.error(error)
×
2300

2301
    def reset(self) -> None:
1✔
2302
        """
2303
        Puts the simulation back at time zero.
2304
        """
2305
        FecTimer.start_category(TimerCategory.RESETTING)
×
2306
        if not self._data_writer.is_ran_last():
×
2307
            if not self._data_writer.is_ran_ever():
×
2308
                logger.error("Ignoring the reset before the run")
×
2309
            else:
2310
                logger.error("Ignoring the repeated reset call")
×
2311
            return
×
2312

2313
        logger.info("Resetting")
×
2314

2315
        if self._data_writer.get_user_accessed_machine():
×
2316
            logger.warning(
×
2317
                "A reset after a get machine call is always hard and "
2318
                "therefore the previous machine is no longer valid")
2319
            self._hard_reset()
×
2320
        else:
2321
            self._data_writer.soft_reset()
×
2322

2323
        # rewind the buffers from the buffer manager, to start at the beginning
2324
        # of the simulation again and clear buffered out
2325
        if self._data_writer.has_buffer_manager():
×
2326
            self._data_writer.get_buffer_manager().reset()
×
2327

2328
        # Reset the graph off the machine, to set things to time 0
2329
        self.__reset_graph_elements()
×
2330
        FecTimer.end_category(TimerCategory.RESETTING)
×
2331

2332
    def __repr__(self) -> str:
1✔
2333
        if self._data_writer.has_ipaddress():
×
2334
            return (f"general front end instance for machine "
×
2335
                    f"{self._data_writer.get_ipaddress()}")
2336
        else:
2337
            return "general front end instance no machine set"
×
2338

2339
    def _shutdown(self) -> None:
1✔
2340
        # if stopping on machine, clear IP tags and routing table
2341
        self.__clear()
1✔
2342

2343
        # stop the transceiver and allocation controller
2344
        if self._data_writer.has_transceiver():
1!
2345
            transceiver = self._data_writer.get_transceiver()
×
2346
            transceiver.stop_application(self._data_writer.get_app_id())
×
2347

2348
        self.__close_allocation_controller()
1✔
2349
        self._data_writer.clear_notification_protocol()
1✔
2350
        FecTimer.stop_category_timing()
1✔
2351
        self._data_writer.shut_down()
1✔
2352

2353
    def __clear(self) -> None:
1✔
2354
        if not self._data_writer.has_transceiver():
1!
2355
            return
1✔
2356
        transceiver = self._data_writer.get_transceiver()
×
2357

2358
        if get_config_bool("Machine", "clear_tags"):
×
2359
            for ip_tag in self._data_writer.get_tags().ip_tags:
×
2360
                transceiver.clear_ip_tag(
×
2361
                    ip_tag.tag, board_address=ip_tag.board_address)
2362
            for reverse_ip_tag in self._data_writer.get_tags().reverse_ip_tags:
×
2363
                transceiver.clear_ip_tag(
×
2364
                    reverse_ip_tag.tag,
2365
                    board_address=reverse_ip_tag.board_address)
2366

2367
        # if clearing routing table entries, clear
2368
        if get_config_bool("Machine", "clear_routing_tables"):
×
2369
            for router_table in self._data_writer.get_uncompressed():
×
2370
                transceiver.clear_multicast_routes(
×
2371
                    router_table.x, router_table.y)
2372

2373
    def __close_allocation_controller(self) -> None:
1✔
2374
        if FecDataView.has_allocation_controller():
1!
2375
            FecDataView.get_allocation_controller().close()
1✔
2376
            self._data_writer.set_allocation_controller(None)
1✔
2377

2378
    def stop(self) -> None:
1✔
2379
        """
2380
        End running of the simulation.
2381
        """
2382
        self._data_writer.stopping()
1✔
2383
        FecTimer.start_category(TimerCategory.SHUTTING_DOWN)
1✔
2384
        # If we have run forever, stop the binaries
2385

2386
        try:
1✔
2387
            if (self._data_writer.is_ran_ever()
1!
2388
                    and self._data_writer.get_current_run_timesteps() is None
2389
                    and not get_config_bool("Machine", "virtual_board")
2390
                    and not self._run_until_complete):
2391
                self._do_stop_workflow()
×
2392
            elif get_config_bool("Reports", "read_provenance_data_on_end"):
1!
2393
                self._do_read_provenance()
×
2394

2395
        except Exception as e:
×
2396
            self._recover_from_error(e)
×
2397
            self.write_errored_file()
×
2398
            raise
×
2399
        finally:
2400
            # shut down the machine properly
2401
            self._shutdown()
1!
2402

2403
        self.write_finished_file()
1✔
2404
        # No matching FecTimer.end_category as shutdown stops timer
2405

2406
    def _execute_application_finisher(self) -> None:
1✔
2407
        with FecTimer("Application finisher", TimerWork.CONTROL):
×
2408
            application_finisher()
×
2409

2410
    def _do_stop_workflow(self) -> None:
1✔
2411
        self._execute_application_finisher()
×
2412
        self._do_extract_from_machine()
×
2413

2414
    def stop_run(self) -> None:
1✔
2415
        """
2416
        Request that the current infinite run stop.
2417

2418
        .. note::
2419
            This will need to be called from another thread as the infinite
2420
            run call is blocking.
2421

2422
        :raises SpiNNUtilsException:
2423
            If the stop_run was not expected in the current state.
2424
        """
2425
        # Do not do start category here
2426
        # as called from a different thread while running
2427
        if self._data_writer.is_stop_already_requested():
×
2428
            logger.warning("Second Request to stop_run ignored")
×
2429
            return
×
2430
        with self._state_condition:
×
2431
            self._data_writer.request_stop()
×
2432
            self._state_condition.notify_all()
×
2433

2434
    def continue_simulation(self) -> None:
1✔
2435
        """
2436
        Continue a simulation that has been started in stepped mode.
2437
        """
2438
        sync_signal = self._data_writer.get_next_sync_signal()
×
2439
        transceiver = self._data_writer.get_transceiver()
×
2440
        transceiver.send_signal(self._data_writer.get_app_id(), sync_signal)
×
2441

2442
    @staticmethod
1✔
2443
    def __reset_object(obj) -> None:
1✔
2444
        # Reset an object if appropriate
2445
        if isinstance(obj, AbstractCanReset):
×
2446
            obj.reset_to_first_timestep()
×
2447

2448
    def __reset_graph_elements(self) -> None:
1✔
2449
        # Reset any object that can reset
2450
        for vertex in self._data_writer.iterate_vertices():
×
2451
            self.__reset_object(vertex)
×
2452
        for p in self._data_writer.iterate_partitions():
×
2453
            for edge in p.edges:
×
2454
                self.__reset_object(edge)
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc