• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

pyiron / pympipool / 9660887308

25 Jun 2024 10:34AM UTC coverage: 93.77% (+0.2%) from 93.597%
9660887308

push

github

web-flow
Merge pull request #364 from pyiron/conda_environment

Add support for conda environments again

20 of 20 new or added lines in 5 files covered. (100.0%)

1 existing line in 1 file now uncovered.

888 of 947 relevant lines covered (93.77%)

0.94 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

94.12
/pympipool/shared/interface.py
1
from abc import ABC
1✔
2
import subprocess
1✔
3
from typing import Optional
1✔
4

5

6
MPI_COMMAND = "mpiexec"
1✔
7
SLURM_COMMAND = "srun"
1✔
8

9

10
class BaseInterface(ABC):
1✔
11
    def __init__(self, cwd: str, cores: int = 1, oversubscribe: bool = False):
1✔
12
        self._cwd = cwd
1✔
13
        self._cores = cores
1✔
14
        self._oversubscribe = oversubscribe
1✔
15

16
    def bootup(
1✔
17
        self,
18
        command_lst: list[str],
19
        prefix_name: Optional[str] = None,
20
        prefix_path: Optional[str] = None,
21
    ):
UNCOV
22
        raise NotImplementedError
×
23

24
    def shutdown(self, wait: bool = True):
1✔
25
        raise NotImplementedError
×
26

27
    def poll(self):
1✔
28
        raise NotImplementedError
×
29

30

31
class SubprocessInterface(BaseInterface):
1✔
32
    def __init__(
1✔
33
        self,
34
        cwd: Optional[str] = None,
35
        cores: int = 1,
36
        oversubscribe: bool = False,
37
    ):
38
        super().__init__(
1✔
39
            cwd=cwd,
40
            cores=cores,
41
            oversubscribe=oversubscribe,
42
        )
43
        self._process = None
1✔
44

45
    def bootup(
1✔
46
        self,
47
        command_lst: list[str],
48
        prefix_name: Optional[str] = None,
49
        prefix_path: Optional[str] = None,
50
    ):
51
        if prefix_name is None and prefix_path is None:
1✔
52
            self._process = subprocess.Popen(
1✔
53
                args=self.generate_command(command_lst=command_lst),
54
                cwd=self._cwd,
55
                stdin=subprocess.DEVNULL,
56
            )
57
        else:
58
            import conda_subprocess
1✔
59

60
            self._process = conda_subprocess.Popen(
1✔
61
                args=self.generate_command(command_lst=command_lst),
62
                cwd=self._cwd,
63
                stdin=subprocess.DEVNULL,
64
                prefix_path=prefix_path,
65
                prefix_name=prefix_name,
66
            )
67

68
    def generate_command(self, command_lst: list[str]) -> list[str]:
1✔
69
        return command_lst
1✔
70

71
    def shutdown(self, wait: bool = True):
1✔
72
        self._process.communicate()
1✔
73
        self._process.terminate()
1✔
74
        if wait:
1✔
75
            self._process.wait()
1✔
76
        self._process = None
1✔
77

78
    def poll(self):
1✔
79
        return self._process is not None and self._process.poll() is None
1✔
80

81

82
class MpiExecInterface(SubprocessInterface):
1✔
83
    def generate_command(self, command_lst: list[str]):
1✔
84
        command_prepend_lst = generate_mpiexec_command(
1✔
85
            cores=self._cores,
86
            oversubscribe=self._oversubscribe,
87
        )
88
        return super().generate_command(
1✔
89
            command_lst=command_prepend_lst + command_lst,
90
        )
91

92

93
class SrunInterface(SubprocessInterface):
1✔
94
    def __init__(
1✔
95
        self,
96
        cwd: Optional[str] = None,
97
        cores: int = 1,
98
        threads_per_core: int = 1,
99
        gpus_per_core: int = 0,
100
        oversubscribe: bool = False,
101
        command_line_argument_lst: list[str] = [],
102
    ):
103
        super().__init__(
1✔
104
            cwd=cwd,
105
            cores=cores,
106
            oversubscribe=oversubscribe,
107
        )
108
        self._threads_per_core = threads_per_core
1✔
109
        self._gpus_per_core = gpus_per_core
1✔
110
        self._command_line_argument_lst = command_line_argument_lst
1✔
111

112
    def generate_command(self, command_lst: list[str]) -> list[str]:
1✔
113
        command_prepend_lst = generate_slurm_command(
1✔
114
            cores=self._cores,
115
            cwd=self._cwd,
116
            threads_per_core=self._threads_per_core,
117
            gpus_per_core=self._gpus_per_core,
118
            oversubscribe=self._oversubscribe,
119
            command_line_argument_lst=self._command_line_argument_lst,
120
        )
121
        return super().generate_command(
1✔
122
            command_lst=command_prepend_lst + command_lst,
123
        )
124

125

126
def generate_mpiexec_command(cores: int, oversubscribe: bool = False) -> list[str]:
1✔
127
    if cores == 1:
1✔
128
        return []
1✔
129
    else:
130
        command_prepend_lst = [MPI_COMMAND, "-n", str(cores)]
1✔
131
        if oversubscribe:
1✔
132
            command_prepend_lst += ["--oversubscribe"]
1✔
133
        return command_prepend_lst
1✔
134

135

136
def generate_slurm_command(
1✔
137
    cores: int,
138
    cwd: str,
139
    threads_per_core: int = 1,
140
    gpus_per_core: int = 0,
141
    oversubscribe: bool = False,
142
    command_line_argument_lst: list[str] = [],
143
) -> list[str]:
144
    command_prepend_lst = [SLURM_COMMAND, "-n", str(cores)]
1✔
145
    if cwd is not None:
1✔
146
        command_prepend_lst += ["-D", cwd]
1✔
147
    if threads_per_core > 1:
1✔
148
        command_prepend_lst += ["--cpus-per-task" + str(threads_per_core)]
×
149
    if gpus_per_core > 0:
1✔
150
        command_prepend_lst += ["--gpus-per-task=" + str(gpus_per_core)]
1✔
151
    if oversubscribe:
1✔
152
        command_prepend_lst += ["--oversubscribe"]
1✔
153
    if len(command_line_argument_lst) > 0:
1✔
154
        command_prepend_lst += command_line_argument_lst
1✔
155
    return command_prepend_lst
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc