• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

pyiron / executorlib / 12308493217

13 Dec 2024 02:32AM UTC coverage: 95.286% (-0.4%) from 95.639%
12308493217

Pull #519

github

web-flow
Merge d14ebbd94 into f1c4ffa9c
Pull Request #519: Add option to write flux log files

7 of 11 new or added lines in 4 files covered. (63.64%)

6 existing lines in 3 files now uncovered.

950 of 997 relevant lines covered (95.29%)

0.95 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.14
/executorlib/standalone/inputcheck.py
1
import inspect
1✔
2
import multiprocessing
1✔
3
import os.path
1✔
4
from concurrent.futures import Executor
1✔
5
from typing import Callable, List, Optional
1✔
6

7

8
def check_oversubscribe(oversubscribe: bool) -> None:
1✔
9
    """
10
    Check if oversubscribe is True and raise a ValueError if it is.
11
    """
12
    if oversubscribe:
1✔
13
        raise ValueError(
1✔
14
            "Oversubscribing is not supported for the executorlib.flux.PyFLuxExecutor backend."
15
            "Please use oversubscribe=False instead of oversubscribe=True."
16
        )
17

18

19
def check_command_line_argument_lst(command_line_argument_lst: List[str]) -> None:
1✔
20
    """
21
    Check if command_line_argument_lst is not empty and raise a ValueError if it is.
22
    """
23
    if len(command_line_argument_lst) > 0:
1✔
24
        raise ValueError(
1✔
25
            "The command_line_argument_lst parameter is not supported for the SLURM backend."
26
        )
27

28

29
def check_gpus_per_worker(gpus_per_worker: int) -> None:
1✔
30
    """
31
    Check if gpus_per_worker is not 0 and raise a TypeError if it is.
32
    """
33
    if gpus_per_worker != 0:
1✔
34
        raise TypeError(
1✔
35
            "GPU assignment is not supported for the executorlib.mpi.PyMPIExecutor backend."
36
            "Please use gpus_per_worker=0 instead of gpus_per_worker="
37
            + str(gpus_per_worker)
38
            + "."
39
        )
40

41

42
def check_executor(executor: Executor) -> None:
1✔
43
    """
44
    Check if executor is not None and raise a ValueError if it is.
45
    """
46
    if executor is not None:
1✔
47
        raise ValueError(
1✔
48
            "The executor parameter is only supported for the flux framework backend."
49
        )
50

51

52
def check_nested_flux_executor(nested_flux_executor: bool) -> None:
1✔
53
    """
54
    Check if nested_flux_executor is True and raise a ValueError if it is.
55
    """
56
    if nested_flux_executor:
1✔
57
        raise ValueError(
1✔
58
            "The nested_flux_executor parameter is only supported for the flux framework backend."
59
        )
60

61

62
def check_resource_dict(function: Callable) -> None:
1✔
63
    """
64
    Check if the function has a parameter named 'resource_dict' and raise a ValueError if it does.
65
    """
66
    if "resource_dict" in inspect.signature(function).parameters.keys():
1✔
67
        raise ValueError(
1✔
68
            "The parameter resource_dict is used internally in executorlib, "
69
            "so it cannot be used as a parameter in the submitted functions."
70
        )
71

72

73
def check_resource_dict_is_empty(resource_dict: dict) -> None:
1✔
74
    """
75
    Check if resource_dict is not empty and raise a ValueError if it is.
76
    """
77
    if len(resource_dict) > 0:
1✔
78
        raise ValueError(
1✔
79
            "When block_allocation is enabled, the resource requirements have to be defined on the executor level."
80
        )
81

82

83
def check_refresh_rate(refresh_rate: float) -> None:
1✔
84
    """
85
    Check if refresh_rate is not 0.01 and raise a ValueError if it is.
86
    """
87
    if refresh_rate != 0.01:
1✔
88
        raise ValueError(
1✔
89
            "The sleep_interval parameter is only used when disable_dependencies=False."
90
        )
91

92

93
def check_plot_dependency_graph(plot_dependency_graph: bool) -> None:
1✔
94
    """
95
    Check if plot_dependency_graph is True and raise a ValueError if it is.
96
    """
97
    if plot_dependency_graph:
1✔
98
        raise ValueError(
1✔
99
            "The plot_dependency_graph parameter is only used when disable_dependencies=False."
100
        )
101

102

103
def check_pmi(backend: str, pmi: Optional[str]) -> None:
1✔
104
    """
105
    Check if pmi is valid for the selected backend and raise a ValueError if it is not.
106
    """
107
    if backend != "flux_allocation" and pmi is not None:
1✔
108
        raise ValueError("The pmi parameter is currently only implemented for flux.")
1✔
109
    elif backend == "flux_allocation" and pmi not in ["pmix", "pmi1", "pmi2", None]:
1✔
110
        raise ValueError(
1✔
111
            "The pmi parameter supports [pmix, pmi1, pmi2], but not: " + pmi
112
        )
113

114

115
def check_init_function(block_allocation: bool, init_function: Callable) -> None:
1✔
116
    """
117
    Check if block_allocation is False and init_function is not None, and raise a ValueError if it is.
118
    """
119
    if not block_allocation and init_function is not None:
1✔
120
        raise ValueError("")
1✔
121

122

123
def check_max_workers_and_cores(
1✔
124
    max_workers: Optional[int], max_cores: Optional[int]
125
) -> None:
126
    if max_workers is not None:
1✔
127
        raise ValueError(
1✔
128
            "The number of workers cannot be controlled with the pysqa based backend."
129
        )
130
    if max_cores is not None:
1✔
131
        raise ValueError(
1✔
132
            "The number of cores cannot be controlled with the pysqa based backend."
133
        )
134

135

136
def check_hostname_localhost(hostname_localhost: Optional[bool]) -> None:
1✔
137
    if hostname_localhost is not None:
1✔
138
        raise ValueError(
1✔
139
            "The option to connect to hosts based on their hostname is not available with the pysqa based backend."
140
        )
141

142

143
def check_flux_executor_pmi_mode(flux_executor_pmi_mode: Optional[str]) -> None:
1✔
144
    if flux_executor_pmi_mode is not None:
1✔
145
        raise ValueError(
1✔
146
            "The option to specify the flux pmi mode is not available with the pysqa based backend."
147
        )
148

149

150
def check_flux_log_files(flux_log_files: Optional[bool]) -> None:
1✔
151
    """
152
    Check if flux_log_files is True and raise a ValueError if it is.
153
    """
154
    if flux_log_files:
1✔
NEW
155
        raise ValueError(
×
156
            "The flux_log_files parameter is only supported for the flux framework backend."
157
        )
158

159

160
def check_pysqa_config_directory(pysqa_config_directory: Optional[str]) -> None:
1✔
161
    """
162
    Check if pysqa_config_directory is None and raise a ValueError if it is not.
163
    """
164
    if pysqa_config_directory is not None:
1✔
165
        raise ValueError(
1✔
166
            "pysqa_config_directory parameter is only supported for pysqa backend."
167
        )
168

169

170
def validate_number_of_cores(
1✔
171
    max_cores: Optional[int] = None,
172
    max_workers: Optional[int] = None,
173
    cores_per_worker: Optional[int] = None,
174
    set_local_cores: bool = False,
175
) -> int:
176
    """
177
    Validate the number of cores and return the appropriate value.
178
    """
179
    if max_cores is None and max_workers is None:
1✔
180
        if not set_local_cores:
1✔
181
            raise ValueError(
1✔
182
                "Block allocation requires a fixed set of computational resources. Neither max_cores nor max_workers are defined."
183
            )
184
        else:
185
            max_workers = multiprocessing.cpu_count()
×
186
    elif max_cores is not None and max_workers is None:
1✔
187
        max_workers = int(max_cores / cores_per_worker)
1✔
188
    return max_workers
1✔
189

190

191
def check_file_exists(file_name: str):
1✔
192
    if file_name is None:
1✔
193
        raise ValueError("file_name is not set.")
1✔
194
    if not os.path.exists(file_name):
1✔
195
        raise ValueError("file_name is not written to the file system.")
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc