• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

pyiron / pympipool / 9929937381

14 Jul 2024 06:36PM UTC coverage: 93.776%. Remained the same
9929937381

Pull #374

github

web-flow
Merge a12422364 into dc1b4604c
Pull Request #374: Rename pympipool to executorlib

52 of 58 new or added lines in 13 files covered. (89.66%)

889 of 948 relevant lines covered (93.78%)

0.94 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

75.0
/executorlib/__init__.py
1
from typing import Optional
1✔
2

3
from executorlib.interactive import create_executor
1✔
4
from executorlib.interactive.dependencies import ExecutorWithDependencies
1✔
5
from executorlib.shared.inputcheck import (
1✔
6
    check_plot_dependency_graph as _check_plot_dependency_graph,
7
)
8
from executorlib.shared.inputcheck import (
1✔
9
    check_refresh_rate as _check_refresh_rate,
10
)
11
from executorlib.shell.executor import SubprocessExecutor
1✔
12
from executorlib.shell.interactive import ShellExecutor
1✔
13

14
from ._version import get_versions
1✔
15

16
__version__ = get_versions()["version"]
1✔
17
__all__ = [
1✔
18
    SubprocessExecutor,
19
    ShellExecutor,
20
]
21

22

23
try:
1✔
24
    from executorlib.cache.executor import FileExecutor
1✔
25

26
    __all__ += [FileExecutor]
1✔
NEW
27
except ImportError:
×
NEW
28
    pass
×
29

30

31
class Executor:
1✔
32
    """
33
    The executorlib.Executor leverages either the message passing interface (MPI), the SLURM workload manager or
34
    preferable the flux framework for distributing python functions within a given resource allocation. In contrast to
35
    the mpi4py.futures.MPIPoolExecutor the executorlib.Executor can be executed in a serial python process and does not
36
    require the python script to be executed with MPI. It is even possible to execute the executorlib.Executor directly
37
    in an interactive Jupyter notebook.
38

39
    Args:
40
        max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of
41
                           cores which can be used in parallel - just like the max_cores parameter. Using max_cores is
42
                           recommended, as computers have a limited number of compute cores.
43
        max_cores (int): defines the number cores which can be used in parallel
44
        cores_per_worker (int): number of MPI cores to be used for each function call
45
        threads_per_core (int): number of OpenMP threads to be used for each function call
46
        gpus_per_worker (int): number of GPUs per worker - defaults to 0
47
        oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI and SLURM only) - default False
48
        cwd (str/None): current working directory where the parallel python task is executed
49
        conda_environment_name (str): name of the conda environment to initialize
50
        conda_environment_path (str): path of the conda environment to initialize
51
        executor (flux.job.FluxExecutor): Flux Python interface to submit the workers to flux
52
        hostname_localhost (boolean): use localhost instead of the hostname to establish the zmq connection. In the
53
                                      context of an HPC cluster this essential to be able to communicate to an
54
                                      Executor running on a different compute node within the same allocation. And
55
                                      in principle any computer should be able to resolve that their own hostname
56
                                      points to the same address as localhost. Still MacOS >= 12 seems to disable
57
                                      this look up for security reasons. So on MacOS it is required to set this
58
                                      option to true
59
        backend (str): Switch between the different backends "flux", "local" or "slurm". Alternatively, when "auto"
60
                       is selected (the default) the available backend is determined automatically.
61
        block_allocation (boolean): To accelerate the submission of a series of python functions with the same resource
62
                                    requirements, executorlib supports block allocation. In this case all resources have
63
                                    to be defined on the executor, rather than during the submission of the individual
64
                                    function.
65
        init_function (None): optional function to preset arguments for functions which are submitted later
66
        command_line_argument_lst (list): Additional command line arguments for the srun call (SLURM only)
67
        pmi (str): PMI interface to use (OpenMPI v5 requires pmix) default is None (Flux only)
68
        disable_dependencies (boolean): Disable resolving future objects during the submission.
69
        refresh_rate (float): Set the refresh rate in seconds, how frequently the input queue is checked.
70
        plot_dependency_graph (bool): Plot the dependencies of multiple future objects without executing them. For
71
                                      debugging purposes and to get an overview of the specified dependencies.
72

73
    Examples:
74
        ```
75
        >>> import numpy as np
76
        >>> from executorlib import Executor
77
        >>>
78
        >>> def calc(i, j, k):
79
        >>>     from mpi4py import MPI
80
        >>>     size = MPI.COMM_WORLD.Get_size()
81
        >>>     rank = MPI.COMM_WORLD.Get_rank()
82
        >>>     return np.array([i, j, k]), size, rank
83
        >>>
84
        >>> def init_k():
85
        >>>     return {"k": 3}
86
        >>>
87
        >>> with Executor(cores=2, init_function=init_k) as p:
88
        >>>     fs = p.submit(calc, 2, j=4)
89
        >>>     print(fs.result())
90
        [(array([2, 4, 3]), 2, 0), (array([2, 4, 3]), 2, 1)]
91
        ```
92
    """
93

94
    def __init__(
1✔
95
        self,
96
        max_workers: int = 1,
97
        max_cores: int = 1,
98
        cores_per_worker: int = 1,
99
        threads_per_core: int = 1,
100
        gpus_per_worker: int = 0,
101
        oversubscribe: bool = False,
102
        cwd: Optional[str] = None,
103
        conda_environment_name: Optional[str] = None,
104
        conda_environment_path: Optional[str] = None,
105
        executor=None,
106
        hostname_localhost: bool = False,
107
        backend: str = "auto",
108
        block_allocation: bool = True,
109
        init_function: Optional[callable] = None,
110
        command_line_argument_lst: list[str] = [],
111
        pmi: Optional[str] = None,
112
        disable_dependencies: bool = False,
113
        refresh_rate: float = 0.01,
114
        plot_dependency_graph: bool = False,
115
    ):
116
        # Use __new__() instead of __init__(). This function is only implemented to enable auto-completion.
NEW
117
        pass
×
118

119
    def __new__(
1✔
120
        cls,
121
        max_workers: int = 1,
122
        max_cores: int = 1,
123
        cores_per_worker: int = 1,
124
        threads_per_core: int = 1,
125
        gpus_per_worker: int = 0,
126
        oversubscribe: bool = False,
127
        cwd: Optional[str] = None,
128
        conda_environment_name: Optional[str] = None,
129
        conda_environment_path: Optional[str] = None,
130
        executor=None,
131
        hostname_localhost: bool = False,
132
        backend: str = "auto",
133
        block_allocation: bool = False,
134
        init_function: Optional[callable] = None,
135
        command_line_argument_lst: list[str] = [],
136
        pmi: Optional[str] = None,
137
        disable_dependencies: bool = False,
138
        refresh_rate: float = 0.01,
139
        plot_dependency_graph: bool = False,
140
    ):
141
        """
142
        Instead of returning a executorlib.Executor object this function returns either a executorlib.mpi.PyMPIExecutor,
143
        executorlib.slurm.PySlurmExecutor or executorlib.flux.PyFluxExecutor depending on which backend is available. The
144
        executorlib.flux.PyFluxExecutor is the preferred choice while the executorlib.mpi.PyMPIExecutor is primarily used
145
        for development and testing. The executorlib.flux.PyFluxExecutor requires flux-core from the flux-framework to be
146
        installed and in addition flux-sched to enable GPU scheduling. Finally, the executorlib.slurm.PySlurmExecutor
147
        requires the SLURM workload manager to be installed on the system.
148

149
        Args:
150
            max_workers (int): for backwards compatibility with the standard library, max_workers also defines the
151
                               number of cores which can be used in parallel - just like the max_cores parameter. Using
152
                               max_cores is recommended, as computers have a limited number of compute cores.
153
            max_cores (int): defines the number cores which can be used in parallel
154
            cores_per_worker (int): number of MPI cores to be used for each function call
155
            threads_per_core (int): number of OpenMP threads to be used for each function call
156
            gpus_per_worker (int): number of GPUs per worker - defaults to 0
157
            oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI and SLURM only) - default False
158
            cwd (str/None): current working directory where the parallel python task is executed
159
            conda_environment_name (str): name of the conda environment to initialize
160
            conda_environment_path (str): path of the conda environment to initialize
161
            executor (flux.job.FluxExecutor): Flux Python interface to submit the workers to flux
162
            hostname_localhost (boolean): use localhost instead of the hostname to establish the zmq connection. In the
163
                                      context of an HPC cluster this essential to be able to communicate to an
164
                                      Executor running on a different compute node within the same allocation. And
165
                                      in principle any computer should be able to resolve that their own hostname
166
                                      points to the same address as localhost. Still MacOS >= 12 seems to disable
167
                                      this look up for security reasons. So on MacOS it is required to set this
168
                                      option to true
169
            backend (str): Switch between the different backends "flux", "local" or "slurm". Alternatively, when "auto"
170
                           is selected (the default) the available backend is determined automatically.
171
            block_allocation (boolean): To accelerate the submission of a series of python functions with the same
172
                                        resource requirements, executorlib supports block allocation. In this case all
173
                                        resources have to be defined on the executor, rather than during the submission
174
                                        of the individual function.
175
            init_function (None): optional function to preset arguments for functions which are submitted later
176
            command_line_argument_lst (list): Additional command line arguments for the srun call (SLURM only)
177
            pmi (str): PMI interface to use (OpenMPI v5 requires pmix) default is None (Flux only)
178
            disable_dependencies (boolean): Disable resolving future objects during the submission.
179
            refresh_rate (float): Set the refresh rate in seconds, how frequently the input queue is checked.
180
            plot_dependency_graph (bool): Plot the dependencies of multiple future objects without executing them. For
181
                                          debugging purposes and to get an overview of the specified dependencies.
182

183
        """
184
        if not disable_dependencies:
1✔
185
            return ExecutorWithDependencies(
1✔
186
                max_workers=max_workers,
187
                max_cores=max_cores,
188
                cores_per_worker=cores_per_worker,
189
                threads_per_core=threads_per_core,
190
                gpus_per_worker=gpus_per_worker,
191
                oversubscribe=oversubscribe,
192
                cwd=cwd,
193
                conda_environment_name=conda_environment_name,
194
                conda_environment_path=conda_environment_path,
195
                executor=executor,
196
                hostname_localhost=hostname_localhost,
197
                backend=backend,
198
                block_allocation=block_allocation,
199
                init_function=init_function,
200
                command_line_argument_lst=command_line_argument_lst,
201
                pmi=pmi,
202
                refresh_rate=refresh_rate,
203
                plot_dependency_graph=plot_dependency_graph,
204
            )
205
        else:
NEW
206
            _check_plot_dependency_graph(plot_dependency_graph=plot_dependency_graph)
×
NEW
207
            _check_refresh_rate(refresh_rate=refresh_rate)
×
NEW
208
            return create_executor(
×
209
                max_workers=max_workers,
210
                max_cores=max_cores,
211
                cores_per_worker=cores_per_worker,
212
                threads_per_core=threads_per_core,
213
                gpus_per_worker=gpus_per_worker,
214
                oversubscribe=oversubscribe,
215
                cwd=cwd,
216
                conda_environment_name=conda_environment_name,
217
                conda_environment_path=conda_environment_path,
218
                executor=executor,
219
                hostname_localhost=hostname_localhost,
220
                backend=backend,
221
                block_allocation=block_allocation,
222
                init_function=init_function,
223
                command_line_argument_lst=command_line_argument_lst,
224
                pmi=pmi,
225
            )
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc