• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

pyiron / executorlib / 13248108792

10 Feb 2025 06:43PM UTC coverage: 95.592% (-0.1%) from 95.735%
13248108792

Pull #565

github

web-flow
Merge 68f1e4ca4 into f7de7f7ca
Pull Request #565: [feature] Add option to specify number of nodes

8 of 8 new or added lines in 2 files covered. (100.0%)

12 existing lines in 4 files now uncovered.

1106 of 1157 relevant lines covered (95.59%)

0.96 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

91.67
/executorlib/interactive/slurm.py
1
import os
1✔
2
from typing import Optional
1✔
3

4
from executorlib.standalone.interactive.spawner import SubprocessSpawner
1✔
5

6
SLURM_COMMAND = "srun"
1✔
7

8

9
def validate_max_workers(max_workers: int, cores: int, threads_per_core: int):
1✔
10
    cores_total = int(os.environ["SLURM_NTASKS"]) * int(
1✔
11
        os.environ["SLURM_CPUS_PER_TASK"]
12
    )
13
    cores_requested = max_workers * cores * threads_per_core
1✔
14
    if cores_total < cores_requested:
1✔
15
        raise ValueError(
1✔
16
            "The number of requested cores is larger than the available cores "
17
            + str(cores_total)
18
            + " < "
19
            + str(cores_requested)
20
        )
21

22

23
class SrunSpawner(SubprocessSpawner):
1✔
24
    def __init__(
1✔
25
        self,
26
        cwd: Optional[str] = None,
27
        cores: int = 1,
28
        threads_per_core: int = 1,
29
        gpus_per_core: int = 0,
30
        num_nodes: Optional[int] = None,
31
        exclusive: bool = False,
32
        openmpi_oversubscribe: bool = False,
33
        slurm_cmd_args: Optional[list[str]] = None,
34
    ):
35
        """
36
        Srun interface implementation.
37

38
        Args:
39
            cwd (str, optional): The current working directory. Defaults to None.
40
            cores (int, optional): The number of cores to use. Defaults to 1.
41
            threads_per_core (int, optional): The number of threads per core. Defaults to 1.
42
            gpus_per_core (int, optional): The number of GPUs per core. Defaults to 0.
43
            openmpi_oversubscribe (bool, optional): Whether to oversubscribe the cores. Defaults to False.
44
            slurm_cmd_args (list[str], optional): Additional command line arguments. Defaults to [].
45
        """
46
        super().__init__(
1✔
47
            cwd=cwd,
48
            cores=cores,
49
            openmpi_oversubscribe=openmpi_oversubscribe,
50
            threads_per_core=threads_per_core,
51
        )
52
        self._gpus_per_core = gpus_per_core
1✔
53
        self._slurm_cmd_args = slurm_cmd_args
1✔
54
        self._num_nodes = num_nodes
1✔
55
        self._exclusive = exclusive
1✔
56

57
    def generate_command(self, command_lst: list[str]) -> list[str]:
1✔
58
        """
59
        Generate the command list for the Srun interface.
60

61
        Args:
62
            command_lst (list[str]): The command list.
63

64
        Returns:
65
            list[str]: The generated command list.
66
        """
67
        command_prepend_lst = generate_slurm_command(
1✔
68
            cores=self._cores,
69
            cwd=self._cwd,
70
            threads_per_core=self._threads_per_core,
71
            gpus_per_core=self._gpus_per_core,
72
            num_nodes=self._num_nodes,
73
            exclusive=self._exclusive,
74
            openmpi_oversubscribe=self._openmpi_oversubscribe,
75
            slurm_cmd_args=self._slurm_cmd_args,
76
        )
77
        return super().generate_command(
1✔
78
            command_lst=command_prepend_lst + command_lst,
79
        )
80

81

82
def generate_slurm_command(
1✔
83
    cores: int,
84
    cwd: Optional[str],
85
    threads_per_core: int = 1,
86
    gpus_per_core: int = 0,
87
    num_nodes: Optional[int] = None,
88
    exclusive: bool = False,
89
    openmpi_oversubscribe: bool = False,
90
    slurm_cmd_args: Optional[list[str]] = None,
91
) -> list[str]:
92
    """
93
    Generate the command list for the SLURM interface.
94

95
    Args:
96
        cores (int): The number of cores.
97
        cwd (str): The current working directory.
98
        threads_per_core (int, optional): The number of threads per core. Defaults to 1.
99
        gpus_per_core (int, optional): The number of GPUs per core. Defaults to 0.
100
        openmpi_oversubscribe (bool, optional): Whether to oversubscribe the cores. Defaults to False.
101
        slurm_cmd_args (list[str], optional): Additional command line arguments. Defaults to [].
102

103
    Returns:
104
        list[str]: The generated command list.
105
    """
106
    command_prepend_lst = [SLURM_COMMAND, "-n", str(cores)]
1✔
107
    if cwd is not None:
1✔
108
        command_prepend_lst += ["-D", cwd]
1✔
109
    if num_nodes is not None:
1✔
UNCOV
110
        command_prepend_lst += ["-N", str(num_nodes)]
×
111
    if threads_per_core > 1:
1✔
UNCOV
112
        command_prepend_lst += ["--cpus-per-task" + str(threads_per_core)]
×
113
    if gpus_per_core > 0:
1✔
114
        command_prepend_lst += ["--gpus-per-task=" + str(gpus_per_core)]
1✔
115
    if exclusive:
1✔
116
        command_prepend_lst += ["--exact"]
×
117
    if openmpi_oversubscribe:
1✔
118
        command_prepend_lst += ["--oversubscribe"]
1✔
119
    if slurm_cmd_args is not None and len(slurm_cmd_args) > 0:
1✔
120
        command_prepend_lst += slurm_cmd_args
1✔
121
    return command_prepend_lst
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc