• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

WISDEM / WEIS / 9919956665

13 Jul 2024 12:34PM UTC coverage: 79.574% (+8.7%) from 70.904%
9919956665

Pull #289

github

gbarter
use the OpenMDAO approach to the problem_vars.json output
Pull Request #289: Release with improved installation

163 of 521 new or added lines in 17 files covered. (31.29%)

707 existing lines in 19 files now uncovered.

21618 of 27167 relevant lines covered (79.57%)

0.8 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/weis/visualization/utils.py
1
'''
2
Various functions for help visualizing WEIS outputs
3
'''
NEW
4
from weis.aeroelasticse.FileTools import load_yaml
×
NEW
5
import pandas as pd
×
NEW
6
import numpy as np
×
NEW
7
import openmdao.api as om
×
NEW
8
import glob
×
NEW
9
import json
×
NEW
10
import multiprocessing as mp
×
11

NEW
12
try:
×
NEW
13
    import ruamel_yaml as ry
×
NEW
14
except Exception:
×
NEW
15
    try:
×
NEW
16
        import ruamel.yaml as ry
×
NEW
17
    except Exception:
×
NEW
18
        raise ImportError('No module named ruamel.yaml or ruamel_yaml')
×
19

NEW
20
def read_cm(cm_file):
×
21
    """
22
    Function originally from:
23
    https://github.com/WISDEM/WEIS/blob/main/examples/16_postprocessing/rev_DLCs_WEIS.ipynb
24

25
    Parameters
26
    __________
27
    cm_file : The file path for case matrix
28

29
    Returns
30
    _______
31
    cm : The dataframe of case matrix
32
    dlc_inds : The indices dictionary indicating where corresponding dlc is used for each run
33
    """
NEW
34
    cm_dict = load_yaml(cm_file, package=1)
×
NEW
35
    cnames = []
×
NEW
36
    for c in list(cm_dict.keys()):
×
NEW
37
        if isinstance(c, ry.comments.CommentedKeySeq):
×
NEW
38
            cnames.append(tuple(c))
×
39
        else:
NEW
40
            cnames.append(c)
×
NEW
41
    cm = pd.DataFrame(cm_dict, columns=cnames)
×
42

NEW
43
    return cm
×
44

NEW
45
def parse_contents(data):
×
46
    """
47
    Function from:
48
    https://github.com/WISDEM/WEIS/blob/main/examples/09_design_of_experiments/postprocess_results.py
49
    """
NEW
50
    collected_data = {}
×
NEW
51
    for key in data.keys():
×
NEW
52
        if key not in collected_data.keys():
×
NEW
53
            collected_data[key] = []
×
54

NEW
55
        for key_idx, _ in enumerate(data[key]):
×
NEW
56
            if isinstance(data[key][key_idx], int):
×
NEW
57
                collected_data[key].append(np.array(data[key][key_idx]))
×
NEW
58
            elif len(data[key][key_idx]) == 1:
×
NEW
59
                try:
×
NEW
60
                    collected_data[key].append(np.array(data[key][key_idx][0]))
×
NEW
61
                except:
×
NEW
62
                    collected_data[key].append(np.array(data[key][key_idx]))
×
63
            else:
NEW
64
                collected_data[key].append(np.array(data[key][key_idx]))
×
65

NEW
66
    df = pd.DataFrame.from_dict(collected_data)
×
67

NEW
68
    return df
×
69

70

NEW
71
def load_vars_file(fn_vars):
×
72
    """
73
    load a json file of problem variables as output from WEIS (as problem_vars.json)
74

75
    parameters:
76
    -----------
77
    fn_vars: str
78
        a filename to read
79

80
    returns:
81
    --------
82
    vars : dict[dict]
83
        a dictionary of dictionaries holding the problem_vars from WEIS
84
    """
85

NEW
86
    with open(fn_vars, "r") as fjson:
×
87
        # unpack in a useful form
NEW
88
        vars = {k: dict(v) for k, v in json.load(fjson).items()}
×
NEW
89
    return vars
×
90

91

NEW
92
def compare_om_data(
×
93
    dataOM_1,
94
    dataOM_2,
95
    fn_1="data 1",
96
    fn_2="data 2",
97
    verbose=False,
98
):
99
    """
100
    compare openmdao data dictionaries to find the in-common (and not) keys
101

102
    args:
103
        dataOM_1: dict
104
            an openmdao data dictionary
105
        dataOM_2: dict
106
            an openmdao data dictionary
107
        fn_1: str (optional)
108
            display name for the first data dictionary
109
        fn_2: str (optional)
110
            display name for the second data dictionary
111
        verbose : bool (optional, default: False)
112
            if we want to print what's happening
113

114
    returns:
115
        keys_all: set
116
            intersection (i.e. common) keys between the two OM data dictionaries
117
        diff_keys_12: set
118
            directional difference of keys between first and second OM data dicts
119
        diff_keys_21: set
120
            directional difference of keys between second and first OM data dicts
121
    """
122

NEW
123
    diff_keys_12 = set(dataOM_1).difference(dataOM_2)
×
NEW
124
    diff_keys_21 = set(dataOM_2).difference(dataOM_1)
×
NEW
125
    if len(diff_keys_12):
×
NEW
126
        if verbose:
×
NEW
127
            print(f"the following keys are only in {fn_1}:")
×
NEW
128
    for key_m in diff_keys_12:
×
NEW
129
        if verbose:
×
NEW
130
            print(f"\t{key_m}")
×
NEW
131
    if len(diff_keys_21):
×
NEW
132
        if verbose:
×
NEW
133
            print(f"the following keys are only in {fn_2}:")
×
NEW
134
    for key_m in diff_keys_21:
×
NEW
135
        if verbose:
×
NEW
136
            print(f"\t{key_m}")
×
NEW
137
    keys_all = set(dataOM_1).intersection(dataOM_2)
×
NEW
138
    if verbose:
×
NEW
139
        print(f"the following keys are in both {fn_1} and {fn_2}:")
×
NEW
140
    for key_m in keys_all:
×
NEW
141
        if verbose:
×
NEW
142
            print(f"\t{key_m}")
×
143

NEW
144
    return keys_all, diff_keys_12, diff_keys_21
×
145

146

NEW
147
def load_OMsql(
×
148
    log,
149
    parse_multi=False,
150
    meta=None,
151
    verbose=False,
152
):
153
    """
154
    load the openmdao sql file produced by a WEIS run into a dictionary
155

156
    parameters:
157
    -----------
158
        log : str
159
            filename of the log sql database that should be loaded
160
        parse_multi : bool
161
            switch to turn on rank/iteration parsing and storage
162
        meta : str
163
            filename of the meta log sql database that should be loaded
164
        verbose : bool (optional, default: False)
165
            if we want to print what's happening
166

167
    returns:
168
        rec_data: dict
169
            dictionary of the data recorded by openMDAO
170

171
    """
172

173
    # heads-up print
NEW
174
    if verbose:
×
NEW
175
        print(f"loading {log}")
×
176

177
    # create an openmdao reader for recorded output data
NEW
178
    cr = om.CaseReader(log, metadata_filename=meta)
×
179

180
    # create a dict for output data that's been recorded
NEW
181
    rec_data = {}
×
182
    # loop over the cases
NEW
183
    for case in cr.get_cases("driver"):
×
NEW
184
        if parse_multi:
×
NEW
185
            rankNo = case.name.split(":")[0]
×
NEW
186
            assert rankNo.startswith("rank")
×
NEW
187
            rankNo = int(rankNo[4:])
×
NEW
188
            iterNo = int(case.name.split("|")[-1])
×
189

190
        # for each key in the outputs
NEW
191
        for key in case.outputs.keys():
×
192

NEW
193
            if key not in rec_data:
×
194
                # if this key isn't present, create a new list
NEW
195
                rec_data[key] = []
×
NEW
196
            if len(case[key]) == 1:
×
197
                # otherwise coerce to float if possible and add the data to the list
NEW
198
                rec_data[key].append(float(case[key]))
×
199
            else:
200
                # otherwise a numpy array if possible and add the data to the list
NEW
201
                rec_data[key].append(np.array(case[key]))
×
202

NEW
203
        if parse_multi:
×
204
            # add rank/iter metadata
NEW
205
            for key in ["rank", "iter"]:  # for each key in the outputs
×
NEW
206
                if key not in rec_data:  # if this key isn't present, create a new list
×
NEW
207
                    rec_data[key] = []
×
NEW
208
            rec_data["rank"].append(rankNo)
×
NEW
209
            rec_data["iter"].append(iterNo)
×
210

NEW
211
    return rec_data  # return the output
×
212

213

NEW
214
def load_OMsql_multi(
×
215
    log_fmt,
216
    meta_in=None,
217
    process_multi = True,
218
    verbose=False,
219
):
220
    """
221
    load the multi-processor openmdao sql files produced by WEIS into a dict
222

223
    parameters:
224
    -----------
225
        log_fmt : str
226
            format string for the process-wise WEIS/OM log files
227
        meta_in : str (optional, default: None)
228
            filename string of the meta log file (will override automatic discovery)
229
        post_multi : bool (optional, default: True)
230
            postprocess in parallel using the multiprocessing library
231
        verbose : bool (optional, default: False)
232
            if we want to print what's happening
233

234
    returns:
235
    --------
236
        data_dict : dict
237
            dictionary of all the datapoints extracted from the WEIS/OM log files
238
    """
239

240
    # use glob to find the logs that match the format string
NEW
241
    opt_logs = sorted(
×
242
        glob.glob(log_fmt),
243
        key = lambda v : int(v.split("_")[-1])
244
            if (v.split("_")[-1] != "meta")
245
            else 1e8,
246
    )
NEW
247
    if len(opt_logs) < 1:
×
NEW
248
        raise FileExistsError("No output logs to postprocess!")
×
249

250
    # remove the "meta" log from the collection
NEW
251
    meta_found = None
×
NEW
252
    for idx, log in enumerate(opt_logs):
×
NEW
253
        if "meta" in log:
×
NEW
254
            meta_found = log  # save the meta file
×
NEW
255
            opt_logs.pop(idx)  # remove the meta log from the list
×
NEW
256
            break
×
257

258
    # handle meta logfile discovery... not sure what it actually does
NEW
259
    if meta_in is not None:
×
NEW
260
        meta = meta_in  # if a meta is given, override
×
NEW
261
    elif meta_found is not None:
×
NEW
262
        meta = meta_found  # if a meta is not given but one is found, use that
×
263
    else:
NEW
264
        meta = None  # otherwise, run without a meta
×
265

266
    # extract the ranks from the sql files
NEW
267
    sql_ranks = [ol.split("_")[-1] for ol in opt_logs]
×
268

269
    # run multiprocessing
NEW
270
    if process_multi:
×
NEW
271
        cores = mp.cpu_count()
×
NEW
272
        pool = mp.Pool(min(len(opt_logs), cores))
×
273

274
        # load sql file
NEW
275
        outdata = pool.starmap(load_OMsql, [(log, True, meta, verbose) for log in opt_logs])
×
NEW
276
        pool.close()
×
NEW
277
        pool.join()
×
278
    else: # no multiprocessing
NEW
279
        outdata = [load_OMsql(log, parse_multi=True, verbose=verbose, meta=meta) for log in opt_logs]
×
280

281
    # create a dictionary and turn it into a dataframe for convenience
NEW
282
    collected_data = {}
×
NEW
283
    ndarray_keys = []
×
NEW
284
    for sql_rank, data in zip(sql_ranks, outdata):
×
NEW
285
        for key in data.keys():
×
NEW
286
            if key not in collected_data.keys():
×
NEW
287
                collected_data[key] = []
×
NEW
288
            if key == "rank": # adjust the rank based on sql file rank
×
NEW
289
                data[key] = [int(sql_rank) for _ in data[key]]
×
NEW
290
            for idx_key, _ in enumerate(data[key]):
×
NEW
291
                if isinstance(data[key][idx_key], int):
×
NEW
292
                    collected_data[key].append(int(np.array(data[key][idx_key])))
×
NEW
293
                elif isinstance(data[key][idx_key], float):
×
NEW
294
                    collected_data[key].append(float(np.array(data[key][idx_key])))
×
NEW
295
                elif len(data[key][idx_key]) == 1:
×
NEW
296
                    collected_data[key].append(float(np.array(data[key][idx_key])))
×
297
                    # try:
298
                    #     collected_data[key].append(np.array(data[key][idx_key][0]))
299
                    # except:
300
                    #     collected_data[key].append(np.array(data[key][idx_key]))
301
                else:
NEW
302
                    collected_data[key].append(np.array(data[key][idx_key]).tolist())
×
NEW
303
                    ndarray_keys.append(key)
×
NEW
304
    df = pd.DataFrame(collected_data)
×
305

306
    # return a dictionary of the data that was extracted
NEW
307
    return df.to_dict(orient="list")
×
308

309

NEW
310
def consolidate_multi(
×
311
    dataOMmulti,
312
    vars_dict,
313
    feas_tol=1e-5,
314
):
315
    """
316
    load the multi-processor openmdao sql files and squash them to the
317
    per-iteration best-feasible result
318

319
    parameters:
320
    -----------
321
        dataOMmulti : dict
322
            dictionary of all the datapoints extracted from the multiprocess
323
            WEIS/OM log files
324
        vars_dict:
325
            experiment design variables to be analyzed
326
        feas_tol : float (optional)
327
            tolerance for feasibility analysis
328
    returns:
329
    --------
330
        dataOMbest_DE : dict
331
            dictionary of the per-iteration best-feasible simulations
332
    """
333

NEW
334
    dfOMmulti = pd.DataFrame(dataOMmulti)
×
NEW
335
    tfeas, cfeas = get_feasible_iterations(dataOMmulti, vars_dict, feas_tol=feas_tol)
×
336

NEW
337
    dfOMmulti = dfOMmulti[tfeas].reset_index()
×
338

NEW
339
    dataOMbest_DE = dfOMmulti.groupby("iter").apply(
×
340
        lambda grp : grp.loc[grp["floatingse.system_structural_mass"].idxmin()],
341
        include_groups=False,
342
    ).to_dict()
343

NEW
344
    for key in dataOMbest_DE.keys():
×
NEW
345
        dataOMbest_DE[key] = np.array(list(dataOMbest_DE[key].values()))
×
346

NEW
347
    return dataOMbest_DE
×
348

349

NEW
350
def get_feasible_iterations(
×
351
    dataOM,
352
    vars_dict,
353
    feas_tol=1e-5,
354
):
355
    """
356
    get iteration-wise total and per-constraint feasibility from an experiment
357

358
    args:
359
        dataOM: dict
360
            openmdao data dictionary
361
        vars_dict:
362
            experiment design variables for checking
363

364
    returns:
365
        total_feasibility: np.ndarray[bool]
366
            iteration-wise total feasibility indications
367
        feasibility_constraintwise: dict[np.ndarray[bool]]
368
            dictionary to map from constraint names to iteration-wise feasibility indications for that constraint
369
    """
370

371
    # assert len(vars_dict["objectives"].values()) == 1, "can't handle multi-objective... yet. -cfrontin"
NEW
372
    objective_name = list(vars_dict["objectives"].values())[0]["name"]
×
373

NEW
374
    feasibility_constraintwise = dict()
×
NEW
375
    total_feasibility = np.ones_like(np.array(dataOM[objective_name]).reshape(-1,1), dtype=bool)
×
NEW
376
    for k, v in vars_dict["constraints"].items():
×
NEW
377
        feasibility = np.ones_like(dataOM[objective_name], dtype=bool).reshape(-1, 1)
×
NEW
378
        values = np.array(dataOM[v["name"]])
×
NEW
379
        if len(values.shape) == 1:
×
NEW
380
            values = values.reshape(-1,1)
×
NEW
381
        if v.get("upper") is not None:
×
NEW
382
            feasibility = np.logical_and(feasibility, np.all(np.less_equal(values, (1+feas_tol)*v["upper"]), axis=1).reshape(-1, 1))
×
NEW
383
        if v.get("lower") is not None:
×
NEW
384
            feasibility = np.logical_and(feasibility, np.all(np.greater_equal(values, (1-feas_tol)*v["lower"]), axis=1).reshape(-1, 1))
×
NEW
385
        feasibility_constraintwise[v["name"]] = feasibility
×
NEW
386
        total_feasibility = np.logical_and(total_feasibility, feasibility)
×
NEW
387
    return total_feasibility, feasibility_constraintwise
×
388

389

NEW
390
def verify_vars(
×
391
    vars_1,
392
    *vars_i,
393
):
394
    """
395
    verifies format of DVs, constraints, objective variable file
396
    guarantees a list of experiments has the same variables
397
    adjusts unbounded constraints
398
    returns verified list of vars
399
    """
400

NEW
401
    for vars_2 in vars_i:
×
NEW
402
        if vars_2 is not None:
×
NEW
403
            for k0 in set(vars_1.keys()).union(vars_2):
×
NEW
404
                assert k0 in vars_1
×
NEW
405
                assert k0 in vars_2
×
NEW
406
                for k1 in set(vars_1[k0].keys()).union(vars_2[k0].keys()):
×
NEW
407
                    assert k1 in vars_1[k0]
×
NEW
408
                    assert k1 in vars_2[k0]
×
NEW
409
                    for k2 in set(vars_1[k0][k1].keys()).union(vars_2[k0][k1].keys()):
×
NEW
410
                        assert k2 in vars_1[k0][k1]
×
NEW
411
                        assert k2 in vars_2[k0][k1]
×
NEW
412
                        if k2 == "val":
×
NEW
413
                            continue
×
NEW
414
                        if isinstance(vars_1[k0][k1][k2], str):
×
NEW
415
                            assert vars_1[k0][k1][k2] == vars_2[k0][k1][k2]
×
NEW
416
                        elif vars_1[k0][k1][k2] is not None:
×
NEW
417
                            assert np.all(np.isclose(vars_1[k0][k1][k2], vars_2[k0][k1][k2]))
×
418
                        else:
NEW
419
                            assert (vars_1[k0][k1][k2] is None) and (vars_2[k0][k1][k2] is None)
×
420

NEW
421
    vars_unified = vars_1.copy()
×
NEW
422
    for k0 in vars_unified.keys():
×
NEW
423
        for k1 in vars_unified[k0].keys():
×
NEW
424
            if (vars_unified[k0][k1].get("lower") is not None) and (vars_unified[k0][k1].get("lower") < -1e28):
×
NEW
425
                vars_unified[k0][k1]["lower"] = -np.inf
×
NEW
426
            if (vars_unified[k0][k1].get("upper") is not None) and (vars_unified[k0][k1].get("upper") > 1e28):
×
NEW
427
                vars_unified[k0][k1]["upper"] = np.inf
×
428

NEW
429
    return vars_unified
×
430

431

NEW
432
def prettyprint_variables(
×
433
    keys_all,
434
    keys_obj,
435
    keys_DV,
436
    keys_constr,
437
):
438
    """
439
    print the variables we have with a prefix showing whether they are an
440
    objective variable (**), design variabie (--), constraint (<>), or unknown
441
    (??)
442
    """
443

444
    # print them nicely
NEW
445
    print()
×
NEW
446
    [
×
447
        print(
448
            f"** {key}"
449
            if key in keys_obj
450
            else f"-- {key}" if key in keys_DV else f"<> {key}" if key in keys_constr else f"?? {key}"
451
        )
452
        for key in keys_all
453
    ]
NEW
454
    print()
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc