• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

WISDEM / WEIS / 10927734534

18 Sep 2024 06:09PM UTC coverage: 79.235% (-0.4%) from 79.668%
10927734534

Pull #315

github

web-flow
Merge 397ba5241 into f779fa594
Pull Request #315: WEIS 1.3.1

21 of 180 new or added lines in 4 files covered. (11.67%)

8 existing lines in 4 files now uncovered.

21647 of 27320 relevant lines covered (79.23%)

0.79 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

52.69
/weis/glue_code/runWEIS.py
1
import numpy as np
1✔
2
import os, sys, time, json
1✔
3
import openmdao.api as om
1✔
4
from weis.glue_code.gc_LoadInputs     import WindTurbineOntologyPythonWEIS
1✔
5
from wisdem.glue_code.gc_WT_InitModel import yaml2openmdao
1✔
6
from weis.glue_code.gc_PoseOptimization  import PoseOptimizationWEIS
1✔
7
from weis.glue_code.glue_code         import WindPark
1✔
8
from wisdem.commonse.mpi_tools        import MPI
1✔
9
from wisdem.commonse                  import fileIO
1✔
10
from weis.glue_code.gc_ROSCOInputs    import assign_ROSCO_values
1✔
11
from weis.control.tmd                 import assign_TMD_values
1✔
12
from weis.aeroelasticse.FileTools     import save_yaml
1✔
13
from wisdem.inputs.validation         import simple_types
1✔
14

15
fd_methods = ['SLSQP','SNOPT', 'LD_MMA']
1✔
16
evolutionary_methods = ['DE', 'NSGA2']
1✔
17

18
if MPI:
1✔
19
    from wisdem.commonse.mpi_tools import map_comm_heirarchical, subprocessor_loop, subprocessor_stop
×
20

21
def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, geometry_override=None, modeling_override=None, analysis_override=None):
1✔
22
    # Load all yaml inputs and validate (also fills in defaults)
23
    wt_initial = WindTurbineOntologyPythonWEIS(
1✔
24
        fname_wt_input,
25
        fname_modeling_options,
26
        fname_opt_options,
27
        modeling_override=modeling_override,
28
        analysis_override=analysis_override
29
        )
30
    wt_init, modeling_options, opt_options = wt_initial.get_input_data()
1✔
31

32
    # Initialize openmdao problem. If running with multiple processors in MPI, use parallel finite differencing equal to the number of cores used.
33
    # Otherwise, initialize the WindPark system normally. Get the rank number for parallelization. We only print output files using the root processor.
34
    myopt = PoseOptimizationWEIS(wt_init, modeling_options, opt_options)
1✔
35

36
    if MPI:
1✔
37
        n_DV = myopt.get_number_design_variables()
×
38
        # Extract the number of cores available
39
        max_cores = MPI.COMM_WORLD.Get_size()
×
40

41
        # Define the color map for the parallelization, determining the maximum number of parallel finite difference (FD)
42
        # evaluations based on the number of design variables (DV). OpenFAST on/off changes things.
43
        if modeling_options['Level3']['flag']:
×
44

45
            # If we are running an optimization method that doesn't use finite differencing, set the number of DVs to 1
NEW
46
            if not (opt_options['driver']['design_of_experiments']['flag']) and (opt_options['driver']['optimization']['solver'] in evolutionary_methods):
×
NEW
47
                n_DV *= 5  # targeting 10*n_DV population size... this is what the equivalent FD coloring would take
×
NEW
48
            elif not (opt_options['driver']['design_of_experiments']['flag'] or opt_options['driver']['optimization']['solver'] in fd_methods):
×
UNCOV
49
                n_DV = 1
×
50

51

52
            # If openfast is called, the maximum number of FD is the number of DV, if we have the number of cores available that doubles the number of DVs,
53
            # otherwise it is half of the number of DV (rounded to the lower integer).
54
            # We need this because a top layer of cores calls a bottom set of cores where OpenFAST runs.
55
            if max_cores > 2. * n_DV:
×
56
                n_FD = n_DV
×
57
            else:
58
                n_FD = int(np.floor(max_cores / 2))
×
59
            # Get the number of OpenFAST runs from the user input and the max that can run in parallel given the resources
60
            # The number of OpenFAST runs is the minimum between the actual number of requested OpenFAST simulations, and
61
            # the number of cores available (minus the number of DV, which sit and wait for OF to complete)
62
            n_OF_runs = modeling_options['DLC_driver']['n_cases']
×
63
            n_DV = max([n_DV, 1])
×
64
            max_parallel_OF_runs = max([int(np.floor((max_cores - n_DV) / n_DV)), 1])
×
65
            n_OF_runs_parallel = min([int(n_OF_runs), max_parallel_OF_runs])
×
66
        elif modeling_options['Level2']['flag']:
×
67

68
            if not (opt_options['driver']['design_of_experiments']['flag'] or opt_options['driver']['optimization']['solver'] in fd_methods):
×
69
                n_DV = 1
×
70

71

72
            if max_cores > 2. * n_DV:
×
73
                n_FD = n_DV
×
74
            else:
75
                n_FD = int(np.floor(max_cores / 2))
×
76
            n_OF_runs = modeling_options['Level2']['linearization']['NLinTimes']
×
77
            n_DV = max([n_DV, 1])
×
78
            max_parallel_OF_runs = max([int(np.floor((max_cores - n_DV) / n_DV)), 1])
×
79
            n_OF_runs_parallel = min([int(n_OF_runs), max_parallel_OF_runs])
×
80
        else:
81
            # If OpenFAST is not called, the number of parallel calls to compute the FDs is just equal to the minimum of cores available and DV
82
            n_FD = min([max_cores, n_DV])
×
83
            n_OF_runs_parallel = 1
×
84
            # if we're doing a GA or such, "FD" means "entities in epoch"
NEW
85
            if opt_options['driver']['optimization']['solver'] in evolutionary_methods:
×
86
                n_FD = max_cores
×
87

88
        # Define the color map for the cores (how these are distributed between finite differencing and openfast runs)
89
        if opt_options['driver']['design_of_experiments']['flag']:
×
90
            n_FD = MPI.COMM_WORLD.Get_size()
×
91
            n_OF_runs_parallel = 1
×
92
            rank    = MPI.COMM_WORLD.Get_rank()
×
93
            comm_map_up = comm_map_down = {}
×
94
            for r in range(MPI.COMM_WORLD.Get_size()):
×
95
                comm_map_up[r] = [r]
×
96
            color_i = 0
×
97
        else:
98
            n_FD = max([n_FD, 1])
×
99
            if modeling_options['Level3']['flag'] == True and modeling_options['Level3']['AeroDyn']['WakeMod'] == 3:
×
100
                olaf = True
×
101
            else:
102
                olaf = False
×
103
            comm_map_down, comm_map_up, color_map = map_comm_heirarchical(n_FD, n_OF_runs_parallel, openmp=olaf)
×
104
            rank    = MPI.COMM_WORLD.Get_rank()
×
105
            if rank < len(color_map):
×
106
                try:
×
107
                    color_i = color_map[rank]
×
108
                except IndexError:
×
109
                    raise ValueError('The number of finite differencing variables is {} and the correct number of cores were not allocated'.format(n_FD))
×
110
            else:
111
                color_i = max(color_map) + 1
×
112
            comm_i  = MPI.COMM_WORLD.Split(color_i, 1)
×
113

114
    else:
115
        color_i = 0
1✔
116
        rank = 0
1✔
117

118
    # make the folder_output relative to the input, if it's a relative path
119
    analysis_input_dir = os.path.dirname(opt_options['fname_input_analysis'])
1✔
120
    opt_options['general']['folder_output'] = os.path.join(analysis_input_dir,opt_options['general']['folder_output'])
1✔
121

122
    folder_output = opt_options['general']['folder_output']
1✔
123
    if rank == 0 and not os.path.isdir(folder_output):
1✔
124
        os.makedirs(folder_output,exist_ok=True)
1✔
125

126
    if color_i == 0: # the top layer of cores enters, the others sit and wait to run openfast simulations
1✔
127
        # if MPI and opt_options['driver']['optimization']['flag']:
128
        if MPI:
1✔
129
            if modeling_options['Level3']['flag'] or modeling_options['Level2']['flag']:
×
130
                # Parallel settings for OpenFAST
131
                modeling_options['General']['openfast_configuration']['mpi_run'] = True
×
132
                modeling_options['General']['openfast_configuration']['mpi_comm_map_down'] = comm_map_down
×
133
                if opt_options['driver']['design_of_experiments']['flag']:
×
134
                    modeling_options['General']['openfast_configuration']['cores'] = 1
×
135
                else:
136
                    modeling_options['General']['openfast_configuration']['cores'] = n_OF_runs_parallel
×
137

138
            # Parallel settings for OpenMDAO
139
            if opt_options['driver']['design_of_experiments']['flag']:
×
140
                wt_opt = om.Problem(model=WindPark(modeling_options = modeling_options, opt_options = opt_options), reports=False)
×
141
            else:
142
                wt_opt = om.Problem(model=om.Group(num_par_fd=n_FD), comm=comm_i, reports=False)
×
143
                wt_opt.model.add_subsystem('comp', WindPark(modeling_options = modeling_options, opt_options = opt_options), promotes=['*'])
×
144
        else:
145
            # Sequential finite differencing and openfast simulations
146
            modeling_options['General']['openfast_configuration']['mpi_run'] = False
1✔
147
            modeling_options['General']['openfast_configuration']['cores']   = 1
1✔
148
            wt_opt = om.Problem(model=WindPark(modeling_options = modeling_options, opt_options = opt_options), reports=False)
1✔
149

150
        # If at least one of the design variables is active, setup an optimization
151
        if opt_options['opt_flag']:
1✔
152
            wt_opt = myopt.set_driver(wt_opt)
1✔
153
            wt_opt = myopt.set_objective(wt_opt)
1✔
154
            wt_opt = myopt.set_design_variables(wt_opt, wt_init)
1✔
155
            wt_opt = myopt.set_constraints(wt_opt)
1✔
156

157
            if opt_options['driver']['design_of_experiments']['flag']:
1✔
158
                wt_opt.driver.options['debug_print'] = ['desvars','ln_cons','nl_cons','objs']
1✔
159
                wt_opt.driver.options['procs_per_model'] = 1 # n_OF_runs_parallel # int(max_cores / np.floor(max_cores/n_OF_runs))
1✔
160

161
        wt_opt = myopt.set_recorders(wt_opt)
1✔
162
        wt_opt.driver.options['debug_print'] = ['desvars','ln_cons','nl_cons','objs','totals']
1✔
163

164
        # Setup openmdao problem
165
        if opt_options['opt_flag']:
1✔
166
            wt_opt.setup()
1✔
167
        else:
168
            # If we're not performing optimization, we don't need to allocate
169
            # memory for the derivative arrays.
170
            wt_opt.setup(derivatives=False)
1✔
171

172
        # Load initial wind turbine data from wt_initial to the openmdao problem
173
        wt_opt = yaml2openmdao(wt_opt, modeling_options, wt_init, opt_options)
1✔
174
        wt_opt = assign_ROSCO_values(wt_opt, modeling_options, opt_options)
1✔
175
        if modeling_options['flags']['TMDs']:
1✔
176
            wt_opt = assign_TMD_values(wt_opt, wt_init, opt_options)
1✔
177

178
        wt_opt = myopt.set_initial(wt_opt, wt_init)
1✔
179
        if modeling_options['Level3']['flag']:
1✔
180
            wt_opt = myopt.set_initial_weis(wt_opt)
1✔
181

182
        # If the user provides values in geometry_override, they overwrite
183
        # whatever values have been set by the yaml files.
184
        # This is useful for performing black-box wrapped optimization without
185
        # needing to modify the yaml files.
186
        # Some logic is used here if the user gives a smalller size for the
187
        # design variable than expected to input the values into the end
188
        # of the array.
189
        # This is useful when optimizing twist, where the first few indices
190
        # do not need to be optimized as they correspond to a circular cross-section.
191
        if geometry_override is not None:
1✔
192
            for key in geometry_override:
×
193
                num_values = np.array(geometry_override[key]).size
×
194
                key_size = wt_opt[key].size
×
195
                idx_start = key_size - num_values
×
196
                wt_opt[key][idx_start:] = geometry_override[key]
×
197

198
        # Place the last design variables from a previous run into the problem.
199
        # This needs to occur after the above setup() and yaml2openmdao() calls
200
        # so these values are correctly placed in the problem.
201
        wt_opt = myopt.set_restart(wt_opt)
1✔
202

203
        if 'check_totals' in opt_options['driver']['optimization']:
1✔
204
            if opt_options['driver']['optimization']['check_totals']:
×
205
                wt_opt.run_model()
×
206
                totals = wt_opt.compute_totals()
×
207

208
        if 'check_partials' in opt_options['driver']['optimization']:
1✔
209
            if opt_options['driver']['optimization']['check_partials']:
×
210
                wt_opt.run_model()
×
211
                checks = wt_opt.check_partials(compact_print=True)
×
212

213
        sys.stdout.flush()
1✔
214
        # Run openmdao problem
215
        if opt_options['opt_flag']:
1✔
216
            wt_opt.run_driver()
1✔
217
        else:
218
            wt_opt.run_model()
1✔
219

220
        if (not MPI) or (MPI and rank == 0):
1✔
221
            # Save data coming from openmdao to an output yaml file
222
            froot_out = os.path.join(folder_output, opt_options['general']['fname_output'])
1✔
223
            # Remove the fst_vt key from the dictionary and write out the modeling options
224
            modeling_options['General']['openfast_configuration']['fst_vt'] = {}
1✔
225
            if not modeling_options['Level3']['from_openfast']:
1✔
226
                wt_initial.write_ontology(wt_opt, froot_out)
1✔
227
            wt_initial.write_options(froot_out)
1✔
228

229
            # openMDAO doesn't save constraint values, so we get them from this construction
230
            problem_var_dict = wt_opt.list_driver_vars(
1✔
231
                desvar_opts=["lower", "upper",],
232
                cons_opts=["lower", "upper", "equals",],
233
            )
234
            def simple_types_temp(indict : dict) -> dict:  # DEBUG!!!!!
1✔
235
                """
236
                until the changes to WISDEM go through...
237
                """
238
                def convert(value):
1✔
239
                    if isinstance(value, np.ndarray):
1✔
240
                        return convert(value.tolist())
1✔
241
                    elif isinstance(value, dict):
1✔
242
                        return {key: convert(value) for key, value in value.items()}
1✔
243
                    elif isinstance(value, (list, tuple, set)):
1✔
244
                        return [convert(item) for item in value]  # treat all as list
1✔
245
                    elif isinstance(value, (np.generic)):
1✔
246
                        return value.item()  # convert numpy primatives to python primative underlying
1✔
247
                    elif isinstance(value, (float, int, bool, str)):
1✔
248
                        return value  # this should be the end case
1✔
249
                    else:
250
                        return ""
1✔
251
                return convert(indict)
1✔
252
            save_yaml(folder_output, "problem_vars.yaml", simple_types_temp(problem_var_dict))
1✔
253
            # save_yaml(folder_output, "problem_vars.yaml", simple_types(problem_var_dict))
254

255
            # Save data to numpy and matlab arrays
256
            fileIO.save_data(froot_out, wt_opt)
1✔
257

258
    if MPI and \
1✔
259
            (modeling_options['Level3']['flag'] or modeling_options['Level2']['flag']) and \
260
            (not opt_options['driver']['design_of_experiments']['flag']) and \
261
            color_i < 1000000:
262
        # subprocessor ranks spin, waiting for FAST simulations to run.
263
        # Only true for cores actually in use, not the ones supporting openfast openmp (marked as color_i = 1000000)
264
        sys.stdout.flush()
×
265
        if rank in comm_map_up.keys():
×
266
            subprocessor_loop(comm_map_up)
×
267
        sys.stdout.flush()
×
268

269
        # close signal to subprocessors
270
        subprocessor_stop(comm_map_down)
×
271
        sys.stdout.flush()
×
272

273
    # Send each core in use to a barrier synchronization
274
    if MPI and color_i < 1000000:
1✔
275
        MPI.COMM_WORLD.Barrier()
×
276

277
    if rank == 0:
1✔
278
        return wt_opt, modeling_options, opt_options
1✔
279
    else:
280
        return [], [], []
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc