• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

WISDEM / WEIS / 11962331085

21 Nov 2024 09:56PM UTC coverage: 78.263% (-0.5%) from 78.802%
11962331085

Pull #308

github

dzalkind
Merge remote-tracking branch 'origin/develop' into DLC_RefactorCaseInputs
Pull Request #308: DLC Generation - Refactor and New Cases

445 of 659 new or added lines in 9 files covered. (67.53%)

3 existing lines in 3 files now uncovered.

21415 of 27363 relevant lines covered (78.26%)

0.78 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/weis/glue_code/mpi_tools.py
1
import os
×
2
import sys
×
3

4
from openmdao.utils.mpi import MPI
×
5

6

7
def under_mpirun():
×
8
    """Return True if we're being executed under mpirun."""
9
    # this is a bit of a hack, but there appears to be
10
    # no consistent set of environment vars between MPI
11
    # implementations.
12
    for name in os.environ.keys():
×
13
        if (
×
14
            name == "OMPI_COMM_WORLD_RANK"
15
            or name == "MPIEXEC_HOSTNAME"
16
            or name.startswith("MPIR_")
17
            or name.startswith("MPICH_")
18
            or name.startswith("INTEL_ONEAPI_MPI_")
19
            or name.startswith("I_MPI_")
20
        ):
21
            return True
×
22
    return False
×
23

24

25
if under_mpirun():
×
26

27
    def debug(*msg):  # pragma: no cover
28
        newmsg = ["%d: " % MPI.COMM_WORLD.rank] + list(msg)
29
        for m in newmsg:
30
            sys.stdout.write("%s " % m)
31
        sys.stdout.write("\n")
32
        sys.stdout.flush()
33

34
else:
35
    MPI = None
×
36

37

38
def map_comm_heirarchical(n_DV, n_OF, openmp=False):
×
39
    """
40
    Heirarchical parallelization communicator mapping.  Assumes a number of top level processes
41
    equal to the number of design variables (x2 if central finite differencing is used), each
42
    with its associated number of openfast simulations.
43
    When openmp flag is turned on, the code spreads the openfast simulations across nodes to
44
    lavereage the opnemp parallelization of OpenFAST. The cores that will run under openmp, are marked
45
    in the color map as 1000000. The ones handling python and the DV are marked as 0, and
46
    finally the master ones for each openfast run are marked with a 1.
47
    """
48
    if openmp:
×
49
        n_procs_per_node = 36  # Number of
×
50
        num_procs = MPI.COMM_WORLD.Get_size()
×
51
        n_nodes = num_procs / n_procs_per_node
×
52

53
        comm_map_down = {}
×
54
        comm_map_up = {}
×
55
        color_map = [1000000] * num_procs
×
56

57
        n_DV_per_node = n_DV / n_nodes
×
58

59
        # for m in range(n_DV_per_node):
60
        for nn in range(int(n_nodes)):
×
61
            for n_dv in range(int(n_DV_per_node)):
×
62
                comm_map_down[nn * n_procs_per_node + n_dv] = [
×
63
                    int(n_DV_per_node) + n_dv * n_OF + nn * (n_procs_per_node) + j for j in range(n_OF)
64
                ]
65

66
                # This core handles python, so in the colormap the entry is 0
67
                color_map[nn * n_procs_per_node + n_dv] = int(0)
×
68
                # These cores handles openfast, so in the colormap the entry is 1
69
                for k in comm_map_down[nn * n_procs_per_node + n_dv]:
×
70
                    color_map[k] = int(1)
×
71

72
                for j in comm_map_down[nn * n_procs_per_node + n_dv]:
×
73
                    comm_map_up[j] = nn * n_procs_per_node + n_dv
×
74
    else:
75
        N = n_DV + n_DV * n_OF
×
76
        comm_map_down = {}
×
77
        comm_map_up = {}
×
78
        color_map = [0] * n_DV
×
79

80
        for i in range(n_DV):
×
81
            comm_map_down[i] = [n_DV + j + i * n_OF for j in range(n_OF)]
×
82
            color_map.extend([i + 1] * n_OF)
×
83

84
            for j in comm_map_down[i]:
×
85
                comm_map_up[j] = i
×
86

87
    return comm_map_down, comm_map_up, color_map
×
88

89

90
def subprocessor_loop(comm_map_up):
×
91
    """
92
    Subprocessors loop, waiting to receive a function and its arguements to evaluate.
93
    Output of the function is returned.  Loops until a stop signal is received
94

95
    Input data format:
96
    data[0] = function to be evaluated
97
    data[1] = [list of arguments]
98
    If the function to be evaluated does not fit this format, then a wrapper function
99
    should be created and passed, that handles the setup, argument assignment, etc
100
    for the actual function.
101

102
    Stop sigal:
103
    data[0] = False
104
    """
105
    # comm        = impl.world_comm()
106
    rank = MPI.COMM_WORLD.Get_rank()
×
107
    rank_target = comm_map_up[rank]
×
108

109
    keep_running = True
×
110
    while keep_running:
×
111
        data = MPI.COMM_WORLD.recv(source=(rank_target), tag=0)
×
112
        if data[0] == False:
×
113
            break
×
114
        else:
115
            func_execution = data[0]
×
116
            args = data[1]
×
117
            output = func_execution(args)
×
118
            MPI.COMM_WORLD.send(output, dest=(rank_target), tag=1)
×
119

120

121
def subprocessor_stop(comm_map_down):
×
122
    """
123
    Send stop signal to subprocessors
124
    """
125
    # comm = MPI.COMM_WORLD
126
    for rank in comm_map_down.keys():
×
127
        subranks = comm_map_down[rank]
×
128
        for subrank_i in subranks:
×
129
            MPI.COMM_WORLD.send([False], dest=subrank_i, tag=0)
×
130
        print("All MPI subranks closed.")
×
131

132

133
if __name__ == "__main__":
×
134

135
    (
×
136
        _,
137
        _,
138
        _,
139
    ) = map_comm_heirarchical(2, 4)
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc