• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

WISDEM / WEIS / 17959868633

23 Sep 2025 09:43PM UTC coverage: 58.153% (-0.5%) from 58.657%
17959868633

push

github

web-flow
Support for WindIO 2.0 and WISDEM 4.0 (#430)

* beginning migration to windiov2

* can now run through at least one example

* more examples running

* fixing more tests and examples

* testing improvements

* more test fixes

* trying with wisdem windio2

* fixing command option

* trying less restrictive model options writing

* making sure to test py313

* some progress, but have to sync on raft changes too

* fix section_offset_y and section_offset_x in visualizer

* gha update

* some more viz app progress

* Joints have names, openfast only

* Fix Ca_coarse definition

* fixed unit tests

* consistency in 15mw files, enabling unit tests

* wisdem integration and examples debugging progress

* testing improvements

* setting truth values

* trap all zeros in mode shapes

* Make merit figure checks lower case

* remove conflicting schema for merit figure

* trying again with kestrel-generated truth archive

* fix rectangular by pulling from windio branch directly

* redo tagging

* typo in url

* Try m2-pkg-config in windows

* changing to strip theory so hams does not choke on the size of the mesh

* Revert into to RAFT viz test

* Install bs4 in conda

* Sync tower design geometry/modeling with regular IEA-15 inputs

* Sync TMD example with vanilla Volturnus geometry/modeling

* Activate test mode for overrides example

* make sure ontology is updated in both weis and wisdem before being written out

* Fix tiny typo

* Reduce SeaState nodes to reduce memory usage

---------

Co-authored-by: Garrett Barter <garrett.barter@nrel.gov>
Co-authored-by: ptrbortolotti <ptrbortolotti@gmail.com>

681 of 775 new or added lines in 18 files covered. (87.87%)

126 existing lines in 8 files now uncovered.

7949 of 13669 relevant lines covered (58.15%)

0.58 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

51.8
/weis/glue_code/runWEIS.py
1
import numpy as np
1✔
2
import os, sys
1✔
3
import openmdao.api as om
1✔
4
from weis.glue_code.gc_LoadInputs     import WindTurbineOntologyPythonWEIS
1✔
5
from wisdem.glue_code.gc_WT_InitModel import yaml2openmdao
1✔
6
from weis.glue_code.gc_PoseOptimization  import PoseOptimizationWEIS
1✔
7
from weis.glue_code.glue_code         import WindPark
1✔
8
from openmdao.utils.mpi import MPI
1✔
9
from wisdem.commonse                  import fileIO
1✔
10
from weis.glue_code.gc_ROSCOInputs    import assign_ROSCO_values
1✔
11
from weis.control.tmd                 import assign_TMD_values
1✔
12
from openfast_io.FileTools     import save_yaml
1✔
13
from wisdem.inputs.validation         import simple_types
1✔
14
from weis.glue_code.mpi_tools import compute_optimal_nP
1✔
15

16

17
if MPI:
1✔
18
    from weis.glue_code.mpi_tools import map_comm_heirarchical, subprocessor_loop, subprocessor_stop
×
19

20
def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, 
1✔
21
             geometry_override=None, modeling_override=None, analysis_override=None, 
22
             prepMPI=False, maxnP=1):
23
    # Load all yaml inputs and validate (also fills in defaults)
24
    wt_initial = WindTurbineOntologyPythonWEIS(
1✔
25
        fname_wt_input,
26
        fname_modeling_options,
27
        fname_opt_options,
28
        modeling_override=modeling_override,
29
        analysis_override=analysis_override
30
        )
31
    wt_init, modeling_options, opt_options = wt_initial.get_input_data()
1✔
32

33
    # Initialize openmdao problem. If running with multiple processors in MPI, use parallel finite differencing equal to the number of cores used.
34
    # Otherwise, initialize the WindPark system normally. Get the rank number for parallelization. We only print output files using the root processor.
35
    myopt = PoseOptimizationWEIS(wt_init, modeling_options, opt_options)
1✔
36
    
37
    if MPI:
1✔
38
        if not prepMPI:
×
39
            nFD = modeling_options['General']['openfast_configuration']['nFD']
×
40
            nOFp = modeling_options['General']['openfast_configuration']['nOFp']
×
41
        else:
42
            nFD = 1
×
43
            nOFp = 0
×
44
        # Define the color map for the cores (how these are distributed between finite differencing and openfast runs)
45
        if opt_options['driver']['design_of_experiments']['flag']:
×
46
            nFD = MPI.COMM_WORLD.Get_size()
×
47
            nOFp = 1
×
48
            rank    = MPI.COMM_WORLD.Get_rank()
×
49
            comm_map_up = comm_map_down = {}
×
50
            for r in range(MPI.COMM_WORLD.Get_size()):
×
51
                comm_map_up[r] = [r]
×
52
            color_i = 0
×
53
        else:
54
            nFD = max([nFD, 1])
×
55
            comm_map_down, comm_map_up, color_map = map_comm_heirarchical(nFD, nOFp)
×
56
            rank    = MPI.COMM_WORLD.Get_rank()
×
57
            if rank < len(color_map):
×
58
                try:
×
59
                    color_i = color_map[rank]
×
60
                except IndexError:
×
61
                    raise ValueError('The number of finite differencing variables is {} and the correct number of cores were not allocated'.format(nFD))
×
62
            else:
63
                color_i = max(color_map) + 1
×
64
            comm_i  = MPI.COMM_WORLD.Split(color_i, 1)
×
65

66
    else:
67
        color_i = 0
1✔
68
        rank = 0
1✔
69

70
    # make the folder_output relative to the input, if it's a relative path
71
    analysis_input_dir = os.path.dirname(opt_options['fname_input_analysis'])
1✔
72
    opt_options['general']['folder_output'] = os.path.join(analysis_input_dir,opt_options['general']['folder_output'])
1✔
73

74
    folder_output = opt_options['general']['folder_output']
1✔
75
    if rank == 0 and not os.path.isdir(folder_output):
1✔
76
        os.makedirs(folder_output,exist_ok=True)
1✔
77

78
    if color_i == 0: # the top layer of cores enters, the others sit and wait to run openfast simulations
1✔
79
        # if MPI and opt_options['driver']['optimization']['flag']:
80
        if MPI:
1✔
81
            if modeling_options['OpenFAST']['flag'] or modeling_options['OpenFAST_Linear']['flag']:
×
82
                # Parallel settings for OpenFAST
83
                modeling_options['General']['openfast_configuration']['mpi_run'] = True
×
84
                modeling_options['General']['openfast_configuration']['mpi_comm_map_down'] = comm_map_down
×
85
                if opt_options['driver']['design_of_experiments']['flag']:
×
86
                    modeling_options['General']['openfast_configuration']['cores'] = 1
×
87
                else:
88
                    modeling_options['General']['openfast_configuration']['cores'] = nOFp
×
89

90
            # Parallel settings for OpenMDAO
91
            if opt_options['driver']['design_of_experiments']['flag']:
×
92
                wt_opt = om.Problem(model=WindPark(modeling_options = modeling_options, opt_options = opt_options), reports=False)
×
93
            else:
94
                wt_opt = om.Problem(model=om.Group(num_par_fd=nFD), comm=comm_i, reports=False)
×
95
                wt_opt.model.add_subsystem('comp', WindPark(modeling_options = modeling_options, opt_options = opt_options), promotes=['*'])
×
96
        else:
97
            # Sequential finite differencing and openfast simulations
98
            modeling_options['General']['openfast_configuration']['mpi_run'] = False
1✔
99
            modeling_options['General']['openfast_configuration']['cores']   = 1
1✔
100
            wt_opt = om.Problem(model=WindPark(modeling_options = modeling_options, opt_options = opt_options), reports=False)
1✔
101

102
        # If at least one of the design variables is active, setup an optimization
103
        if opt_options['opt_flag']:
1✔
104
            wt_opt = myopt.set_driver(wt_opt)
1✔
105
            wt_opt = myopt.set_objective(wt_opt)
1✔
106
            wt_opt = myopt.set_design_variables(wt_opt, wt_init)
1✔
107
            wt_opt = myopt.set_constraints(wt_opt)
1✔
108

109
            if opt_options['driver']['design_of_experiments']['flag']:
1✔
110
                wt_opt.driver.options['debug_print'] = ['desvars','ln_cons','nl_cons','objs']
×
111
                wt_opt.driver.options['procs_per_model'] = 1
×
112
        wt_opt = myopt.set_recorders(wt_opt)
1✔
113
        wt_opt.driver.options['debug_print'] = ['desvars','ln_cons','nl_cons','objs','totals']
1✔
114

115
        # Setup openmdao problem
116
        if opt_options['opt_flag']:
1✔
117
            wt_opt.setup()
1✔
118
        else:
119
            # If we're not performing optimization, we don't need to allocate
120
            # memory for the derivative arrays.
121
            wt_opt.setup(derivatives=False)
1✔
122

123
        # Estimate number of design variables and parallel calls to OpenFASRT given 
124
        # the computational resources available. This is used to setup WEIS for an MPI run
125
        if prepMPI:
1✔
126
            nFD = 0
×
127
            for dv in wt_opt.model.list_outputs(is_design_var=True, out_stream=None):
×
128
                # dv is a tuple with (name, info)
129
                nFD += len(dv[1]['val'])
×
130

131
            # number of finite differences should be at least 1
132
            nFD = max([1,nFD])
×
133

134
            # Compute number of processors
135
            modeling_options = compute_optimal_nP(nFD, myopt.n_OF_runs, modeling_options, opt_options, maxnP = maxnP)
×
136

137
        # If WEIS is called simply to prep for an MPI call, no need to proceed and simply 
138
        # return the number of finite differences and OpenFAST calls, and stop
139
        # Otherwise, keep going assigning inputs and running the OpenMDAO model/driver
140
        if not prepMPI:
1✔
141
            # Load initial wind turbine data from wt_initial to the openmdao problem
142
            wt_opt = yaml2openmdao(wt_opt, modeling_options, wt_init, opt_options)
1✔
143
            wt_opt = assign_ROSCO_values(wt_opt, modeling_options, opt_options)
1✔
144
            if modeling_options['flags']['TMDs']:
1✔
145
                wt_opt = assign_TMD_values(wt_opt, wt_init, opt_options)
×
146

147
            wt_opt = myopt.set_initial(wt_opt, wt_init)
1✔
148
            if modeling_options['OpenFAST']['flag']:
1✔
149
                wt_opt = myopt.set_initial_weis(wt_opt)
1✔
150

151
            # If the user provides values in geometry_override, they overwrite
152
            # whatever values have been set by the yaml files.
153
            # This is useful for performing black-box wrapped optimization without
154
            # needing to modify the yaml files.
155
            # Some logic is used here if the user gives a smalller size for the
156
            # design variable than expected to input the values into the end
157
            # of the array.
158
            # This is useful when optimizing twist, where the first few indices
159
            # do not need to be optimized as they correspond to a circular cross-section.
160
            if geometry_override is not None:
1✔
161
                for key in geometry_override:
1✔
UNCOV
162
                    num_values = np.array(geometry_override[key]).size
×
UNCOV
163
                    key_size = wt_opt[key].size
×
UNCOV
164
                    idx_start = key_size - num_values
×
UNCOV
165
                    wt_opt[key][idx_start:] = geometry_override[key]
×
166

167
            # Place the last design variables from a previous run into the problem.
168
            # This needs to occur after the above setup() and yaml2openmdao() calls
169
            # so these values are correctly placed in the problem.
170
            wt_opt = myopt.set_restart(wt_opt)
1✔
171

172
            if 'check_totals' in opt_options['driver']['optimization']:
1✔
173
                if opt_options['driver']['optimization']['check_totals']:
×
174
                    wt_opt.run_model()
×
175
                    totals = wt_opt.compute_totals()
×
176

177
            if 'check_partials' in opt_options['driver']['optimization']:
1✔
178
                if opt_options['driver']['optimization']['check_partials']:
×
179
                    wt_opt.run_model()
×
180
                    checks = wt_opt.check_partials(compact_print=True)
×
181

182
            sys.stdout.flush()
1✔
183
            # Run openmdao problem
184
            if opt_options['opt_flag']:
1✔
185
                wt_opt.run_driver()
1✔
186
            else:
187
                wt_opt.run_model()
1✔
188

189
            if (not MPI) or (MPI and rank == 0):
1✔
190
                # Save data coming from openmdao to an output yaml file
191
                froot_out = os.path.join(folder_output, opt_options['general']['fname_output'])
1✔
192
                # Remove the fst_vt key from the dictionary and write out the modeling options
193
                modeling_options['General']['openfast_configuration']['fst_vt'] = {}
1✔
194
                if not modeling_options['OpenFAST']['from_openfast']:
1✔
195
                    wt_initial.update_ontology(wt_opt)
1✔
196
                wt_initial.write_outputs(froot_out)
1✔
197

198
                # openMDAO doesn't save constraint values, so we get them from this construction
199
                problem_var_dict = wt_opt.list_driver_vars(
1✔
200
                    desvar_opts=["lower", "upper",],
201
                    cons_opts=["lower", "upper", "equals",],
202
                )
203
                save_yaml(folder_output, "problem_vars.yaml", simple_types(problem_var_dict))
1✔
204

205
                # Save data to numpy and matlab arrays
206
                fileIO.save_data(froot_out, wt_opt)
1✔
207

208
    if MPI and \
1✔
209
            (modeling_options['OpenFAST']['flag'] or modeling_options['OpenFAST_Linear']['flag']) and \
210
            (not opt_options['driver']['design_of_experiments']['flag']):
211
        # subprocessor ranks spin, waiting for FAST simulations to run.
212
        sys.stdout.flush()
×
213
        if rank in comm_map_up.keys():
×
214
            subprocessor_loop(comm_map_up)
×
215
        sys.stdout.flush()
×
216

217
        # close signal to subprocessors
218
        subprocessor_stop(comm_map_down)
×
219
        sys.stdout.flush()
×
220

221
    # Send each core in use to a barrier synchronization
222
    # Next, share WEIS outputs across all processors from rank=0 (root)
223
    if MPI:
1✔
224
        MPI.COMM_WORLD.Barrier()
×
225
        rank = MPI.COMM_WORLD.Get_rank()
×
226
        if rank != 0:
×
227
            wt_opt = None
×
228
            modeling_options = None
×
229
            opt_options = None
×
230
        
231
        # MPI.COMM_WORLD.bcast cannot broadcast out a full OpenMDAO problem
232
        # We don't need it for now, but this might become an issue if we start
233
        # stacking multiple WEIS calls on top of each other and we need to 
234
        # reuse wt_opt from one call to the next
235
        modeling_options = MPI.COMM_WORLD.bcast(modeling_options, root = 0)
×
236
        opt_options = MPI.COMM_WORLD.bcast(opt_options, root = 0)
×
237
        MPI.COMM_WORLD.Barrier()
×
238

239
    return wt_opt, modeling_options, opt_options
1✔
240

STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc