• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

WISDEM / WEIS / 16309547368

16 Jul 2025 03:21AM UTC coverage: 58.865% (-1.6%) from 60.46%
16309547368

push

github

web-flow
Merge pull request #409 from WISDEM/develop

WEIS v1.6

605 of 817 new or added lines in 20 files covered. (74.05%)

525 existing lines in 10 files now uncovered.

8094 of 13750 relevant lines covered (58.87%)

0.59 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

28.54
/weis/visualization/utils.py
1
'''
2
Various functions for help visualizing WEIS outputs
3
'''
4
from openfast_io.FileTools import load_yaml
1✔
5
import weis.inputs as sch
1✔
6
import pandas as pd
1✔
7
import numpy as np
1✔
8
import openmdao.api as om
1✔
9
import glob
1✔
10
import json
1✔
11
import multiprocessing as mp
1✔
12
import plotly.graph_objects as go
1✔
13
import os
1✔
14
import io
1✔
15
import yaml
1✔
16
import re
1✔
17
import socket
1✔
18
from dash import html
1✔
19
#from matplotlib.gridspec import GridSpec
20
import matplotlib.pyplot as plt
1✔
21
import matplotlib
1✔
22
import pickle
1✔
23
import raft
1✔
24
#from raft.helpers import *
25

26
import vtk
1✔
27
import dash_vtk
1✔
28
from dash_vtk.utils import to_mesh_state
1✔
29
import pyvista as pv
1✔
30
import plotly
1✔
31

32
try:
1✔
33
    import ruamel_yaml as ry
1✔
UNCOV
34
except Exception:
×
UNCOV
35
    try:
×
UNCOV
36
        import ruamel.yaml as ry
×
37
    except Exception:
×
38
        raise ImportError('No module named ruamel.yaml or ruamel_yaml')
×
39

40
try:
1✔
41
    from vtkmodules.util.numpy_support import numpy_to_vtk, vtk_to_numpy
1✔
42
except:
×
43
    from vtk.util.numpy_support import numpy_to_vtk, vtk_to_numpy
×
44

45

46
def checkPort(port, host="0.0.0.0"):
1✔
47
    # check port availability and then close the socket
48
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
×
49
    result = False
×
50
    try:
×
51
        sock.bind((host, port))
×
52
        result = True
×
53
    except:
×
54
        result = False
×
55

56
    sock.close()
×
57
    return result
×
58

59

60
def parse_yaml(file_path):
1✔
61
    '''
62
    Parse the data contents in dictionary format
63
    '''
64
    # TODO: Encountering the error for parsing hyperlink data - either skip or add that?
65
    #       load_yaml doesn't work as well..
66
    # print('Reading the input yaml file..')
67
    try:
1✔
68
        # dict = load_yaml(file_path, 1)
69
        with io.open(file_path, 'r') as stream:
1✔
70
            dict = yaml.safe_load(stream)
1✔
71
            # try:
72
            #     dict = yaml.safe_load(stream)
73
            # except yaml.YAMLError as exc:
74
            #     from subprocess import call
75
            #     call(["yamllint", "-f", "parsable", file_path])
76
        
77
        dict['yamlPath'] = file_path
1✔
78
        # print('input file dict:\n', dict)
79
        return dict
1✔
80
    
81
    except FileNotFoundError:
×
82
        print('Could not locate the input yaml file..')
×
83
        exit()
×
84
    
85
    except Exception as e:
×
86
        print(e)
×
87
        exit()
×
88

89

90
def dict_to_html(data, out_html_list, level):
1✔
91
    '''
92
    Show the nested dictionary data to html
93
    '''
94
    
95
    for k1, v1 in data.items():
×
96
        if not k1 in ['dirs', 'files']:
×
97
            if not isinstance(v1, list) and not isinstance(v1, dict):
×
98
                out_html_list.append(html.H6(f'{"---"*level}{k1}: {v1}'))
×
99
                continue
×
100
            
101
            out_html_list.append(html.H6(f'{"---"*level}{k1}'))
×
102
        
103
        if isinstance(v1, list):
×
104
            out_html_list.append(html.Div([
×
105
                                    html.H6(f'{"---"*(level+1)}{i}') for i in v1]))
106
            
107
        elif isinstance(v1, dict):
×
108
            out_html_list = dict_to_html(v1, out_html_list, level+1)
×
109
    
110

111
    return out_html_list
×
112

113

114
def read_cm(cm_file):
1✔
115
    """
116
    Function originally from:
117
    https://github.com/WISDEM/WEIS/blob/main/examples/16_postprocessing/rev_DLCs_WEIS.ipynb
118

119
    Parameters
120
    __________
121
    cm_file : The file path for case matrix
122

123
    Returns
124
    _______
125
    cm : The dataframe of case matrix
126
    dlc_inds : The indices dictionary indicating where corresponding dlc is used for each run
127
    """
128
    cm_dict = load_yaml(cm_file, package=1)
×
129
    cnames = []
×
130
    for c in list(cm_dict.keys()):
×
131
        if isinstance(c, ry.comments.CommentedKeySeq):
×
132
            cnames.append(tuple(c))
×
133
        else:
134
            cnames.append(c)
×
135
    cm = pd.DataFrame(cm_dict, columns=cnames)
×
136

137
    return cm
×
138

139
def parse_contents(data):
1✔
140
    """
141
    Function from:
142
    https://github.com/WISDEM/WEIS/blob/main/examples/09_design_of_experiments/postprocess_results.py
143
    """
144
    collected_data = {}
1✔
145
    for key in data.keys():
1✔
146
        if key not in collected_data.keys():
1✔
147
            collected_data[key] = []
1✔
148

149
        for key_idx, _ in enumerate(data[key]):
1✔
150
            if isinstance(data[key][key_idx], int):
1✔
151
                collected_data[key].append(np.array(data[key][key_idx]))
×
152
            elif len(data[key][key_idx]) == 1:
1✔
153
                try:
1✔
154
                    collected_data[key].append(np.array(data[key][key_idx][0]))
1✔
155
                except:
×
156
                    collected_data[key].append(np.array(data[key][key_idx]))
×
157
            else:
158
                collected_data[key].append(np.array(data[key][key_idx]))
1✔
159

160
    df = pd.DataFrame.from_dict(collected_data)
1✔
161

162
    return df
1✔
163

164

165
def load_vars_file(fn_vars):
1✔
166
    """
167
    load a json file of problem variables as output from WEIS (as problem_vars.json)
168

169
    parameters:
170
    -----------
171
    fn_vars: str
172
        a filename to read
173

174
    returns:
175
    --------
176
    vars : dict[dict]
177
        a dictionary of dictionaries holding the problem_vars from WEIS
178
    """
179

180
    with open(fn_vars, "r") as fjson:
×
181
        # unpack in a useful form
182
        vars = {k: dict(v) for k, v in json.load(fjson).items()}
×
183
    return vars
×
184

185

186
def compare_om_data(
1✔
187
    dataOM_1,
188
    dataOM_2,
189
    fn_1="data 1",
190
    fn_2="data 2",
191
    verbose=False,
192
):
193
    """
194
    compare openmdao data dictionaries to find the in-common (and not) keys
195

196
    args:
197
        dataOM_1: dict
198
            an openmdao data dictionary
199
        dataOM_2: dict
200
            an openmdao data dictionary
201
        fn_1: str (optional)
202
            display name for the first data dictionary
203
        fn_2: str (optional)
204
            display name for the second data dictionary
205
        verbose : bool (optional, default: False)
206
            if we want to print what's happening
207

208
    returns:
209
        keys_all: set
210
            intersection (i.e. common) keys between the two OM data dictionaries
211
        diff_keys_12: set
212
            directional difference of keys between first and second OM data dicts
213
        diff_keys_21: set
214
            directional difference of keys between second and first OM data dicts
215
    """
216

217
    diff_keys_12 = set(dataOM_1).difference(dataOM_2)
×
218
    diff_keys_21 = set(dataOM_2).difference(dataOM_1)
×
219
    if len(diff_keys_12):
×
220
        if verbose:
×
221
            print(f"the following keys are only in {fn_1}:")
×
222
    for key_m in diff_keys_12:
×
223
        if verbose:
×
224
            print(f"\t{key_m}")
×
225
    if len(diff_keys_21):
×
226
        if verbose:
×
227
            print(f"the following keys are only in {fn_2}:")
×
228
    for key_m in diff_keys_21:
×
229
        if verbose:
×
230
            print(f"\t{key_m}")
×
231
    keys_all = set(dataOM_1).intersection(dataOM_2)
×
232
    if verbose:
×
233
        print(f"the following keys are in both {fn_1} and {fn_2}:")
×
234
    for key_m in keys_all:
×
235
        if verbose:
×
236
            print(f"\t{key_m}")
×
237

238
    return keys_all, diff_keys_12, diff_keys_21
×
239

240
def load_OMsql(
1✔
241
    log,
242
    parse_multi=False,
243
    meta=None,
244
    verbose=False,
245
):
246
    """
247
    load the openmdao sql file produced by a WEIS run into a dictionary
248

249
    parameters:
250
    -----------
251
        log : str
252
            filename of the log sql database that should be loaded
253
        parse_multi : bool
254
            switch to turn on rank/iteration parsing and storage
255
        meta : str
256
            filename of the meta log sql database that should be loaded
257
        verbose : bool (optional, default: False)
258
            if we want to print what's happening
259

260
    returns:
261
        rec_data: dict
262
            dictionary of the data recorded by openMDAO
263

264
    """
265

266
    # heads-up print
267
    if verbose:
1✔
268
        print(f"loading {log}")
×
269

270
    # create an openmdao reader for recorded output data
271
    cr = om.CaseReader(log, metadata_filename=meta)
1✔
272

273
    # create a dict for output data that's been recorded
274
    rec_data = {}
1✔
275
    # loop over the cases
276
    for case in cr.get_cases("driver"):
1✔
277
        if parse_multi:
1✔
278
            rankNo = case.name.split(":")[0]
×
279
            assert rankNo.startswith("rank")
×
280
            rankNo = int(rankNo[4:])
×
281
            iterNo = int(case.name.split("|")[-1])
×
282

283
        # for each key in the outputs
284
        for key in case.outputs.keys():
1✔
285

286
            if key not in rec_data:
1✔
287
                # if this key isn't present, create a new list
288
                rec_data[key] = []
1✔
289

290
            if hasattr(case[key], 'len') and len(case[key]) != 1:
1✔
291
                # convert to a numpy array if possible and add the data to the list
292
                rec_data[key].append(np.array(case[key]))
×
293
            else:
294
                rec_data[key].append(case[key])
1✔
295

296
        if parse_multi:
1✔
297
            # add rank/iter metadata
298
            for key in ["rank", "iter"]:  # for each key in the outputs
×
299
                if key not in rec_data:  # if this key isn't present, create a new list
×
300
                    rec_data[key] = []
×
301
            rec_data["rank"].append(rankNo)
×
302
            rec_data["iter"].append(iterNo)
×
303

304
    return rec_data  # return the output
1✔
305

306

307
def load_OMsql_multi(
1✔
308
    log_fmt,
309
    meta_in=None,
310
    process_multi = True,
311
    verbose=False,
312
):
313
    """
314
    load the multi-processor openmdao sql files produced by WEIS into a dict
315

316
    parameters:
317
    -----------
318
        log_fmt : str
319
            format string for the process-wise WEIS/OM log files
320
        meta_in : str (optional, default: None)
321
            filename string of the meta log file (will override automatic discovery)
322
        post_multi : bool (optional, default: True)
323
            postprocess in parallel using the multiprocessing library
324
        verbose : bool (optional, default: False)
325
            if we want to print what's happening
326

327
    returns:
328
    --------
329
        data_dict : dict
330
            dictionary of all the datapoints extracted from the WEIS/OM log files
331
    """
332

333
    # use glob to find the logs that match the format string
334
    opt_logs = sorted(
×
335
        glob.glob(log_fmt),
336
        key = lambda v : int(v.split("_")[-1])
337
            if (v.split("_")[-1] != "meta")
338
            else 1e8,
339
    )
340
    if len(opt_logs) < 1:
×
341
        raise FileExistsError("No output logs to postprocess!")
×
342

343
    # remove the "meta" log from the collection
344
    meta_found = None
×
345
    for idx, log in enumerate(opt_logs):
×
346
        if "meta" in log:
×
347
            meta_found = log  # save the meta file
×
348
            opt_logs.pop(idx)  # remove the meta log from the list
×
349
            break
×
350

351
    # handle meta logfile discovery... not sure what it actually does
352
    if meta_in is not None:
×
353
        meta = meta_in  # if a meta is given, override
×
354
    elif meta_found is not None:
×
355
        meta = meta_found  # if a meta is not given but one is found, use that
×
356
    else:
357
        meta = None  # otherwise, run without a meta
×
358

359
    # extract the ranks from the sql files
360
    sql_ranks = [ol.split("_")[-1] for ol in opt_logs]
×
361

362
    # run multiprocessing
363
    if process_multi:
×
364
        cores = mp.cpu_count()
×
365
        pool = mp.Pool(min(len(opt_logs), cores))
×
366

367
        # load sql file
368
        outdata = pool.starmap(load_OMsql, [(log, True, meta, verbose) for log in opt_logs])
×
369
        pool.close()
×
370
        pool.join()
×
371
    else: # no multiprocessing
372
        outdata = [load_OMsql(log, parse_multi=True, verbose=verbose, meta=meta) for log in opt_logs]
×
373

374
    # create a dictionary and turn it into a dataframe for convenience
375
    collected_data = {}
×
376
    ndarray_keys = []
×
377
    for sql_rank, data in zip(sql_ranks, outdata):
×
378
        for key in data.keys():
×
379
            if key not in collected_data.keys():
×
380
                collected_data[key] = []
×
381
            if key == "rank": # adjust the rank based on sql file rank
×
382
                data[key] = [int(sql_rank) for _ in data[key]]
×
383
            for idx_key, _ in enumerate(data[key]):
×
384
                if isinstance(data[key][idx_key], int):
×
385
                    collected_data[key].append(int(np.array(data[key][idx_key])))
×
386
                elif isinstance(data[key][idx_key], float):
×
387
                    collected_data[key].append(float(np.array(data[key][idx_key])))
×
388
                elif len(data[key][idx_key]) == 1:
×
389
                    collected_data[key].append(float(np.array(data[key][idx_key])))
×
390
                    # try:
391
                    #     collected_data[key].append(np.array(data[key][idx_key][0]))
392
                    # except:
393
                    #     collected_data[key].append(np.array(data[key][idx_key]))
394
                else:
395
                    collected_data[key].append(np.array(data[key][idx_key]).tolist())
×
396
                    ndarray_keys.append(key)
×
397
    df = pd.DataFrame(collected_data)
×
398

399
    # return a dictionary of the data that was extracted
400
    return df.to_dict(orient="list")
×
401

402

403
def consolidate_multi(
1✔
404
    dataOMmulti,
405
    vars_dict,
406
    feas_tol=1e-5,
407
):
408
    """
409
    load the multi-processor openmdao sql files and squash them to the
410
    per-iteration best-feasible result
411

412
    parameters:
413
    -----------
414
        dataOMmulti : dict
415
            dictionary of all the datapoints extracted from the multiprocess
416
            WEIS/OM log files
417
        vars_dict:
418
            experiment design variables to be analyzed
419
        feas_tol : float (optional)
420
            tolerance for feasibility analysis
421
    returns:
422
    --------
423
        dataOMbest_DE : dict
424
            dictionary of the per-iteration best-feasible simulations
425
    """
426

427
    dfOMmulti = pd.DataFrame(dataOMmulti)
×
428
    tfeas, cfeas = get_feasible_iterations(dataOMmulti, vars_dict, feas_tol=feas_tol)
×
429

430
    dfOMmulti = dfOMmulti[tfeas].reset_index()
×
431

432
    dataOMbest_DE = dfOMmulti.groupby("iter").apply(
×
433
        lambda grp : grp.loc[grp["floatingse.system_structural_mass"].idxmin()],
434
        include_groups=False,
435
    ).to_dict()
436

437
    for key in dataOMbest_DE.keys():
×
438
        dataOMbest_DE[key] = np.array(list(dataOMbest_DE[key].values()))
×
439

440
    return dataOMbest_DE
×
441

442

443
def get_feasible_iterations(
1✔
444
    dataOM,
445
    vars_dict,
446
    feas_tol=1e-5,
447
):
448
    """
449
    get iteration-wise total and per-constraint feasibility from an experiment
450

451
    args:
452
        dataOM: dict
453
            openmdao data dictionary
454
        vars_dict:
455
            experiment design variables for checking
456

457
    returns:
458
        total_feasibility: np.ndarray[bool]
459
            iteration-wise total feasibility indications
460
        feasibility_constraintwise: dict[np.ndarray[bool]]
461
            dictionary to map from constraint names to iteration-wise feasibility indications for that constraint
462
    """
463

464
    # assert len(vars_dict["objectives"].values()) == 1, "can't handle multi-objective... yet. -cfrontin"
465
    objective_name = list(vars_dict["objectives"].values())[0]["name"]
×
466

467
    feasibility_constraintwise = dict()
×
468
    total_feasibility = np.ones_like(np.array(dataOM[objective_name]).reshape(-1,1), dtype=bool)
×
469
    for k, v in vars_dict["constraints"].items():
×
470
        feasibility = np.ones_like(dataOM[objective_name], dtype=bool).reshape(-1, 1)
×
471
        values = np.array(dataOM[v["name"]])
×
472
        if len(values.shape) == 1:
×
473
            values = values.reshape(-1,1)
×
474
        if v.get("upper") is not None:
×
475
            feasibility = np.logical_and(feasibility, np.all(np.less_equal(values, (1+feas_tol)*v["upper"]), axis=1).reshape(-1, 1))
×
476
        if v.get("lower") is not None:
×
477
            feasibility = np.logical_and(feasibility, np.all(np.greater_equal(values, (1-feas_tol)*v["lower"]), axis=1).reshape(-1, 1))
×
478
        feasibility_constraintwise[v["name"]] = feasibility
×
479
        total_feasibility = np.logical_and(total_feasibility, feasibility)
×
480
    return total_feasibility, feasibility_constraintwise
×
481

482

483
def verify_vars(
1✔
484
    vars_1,
485
    *vars_i,
486
):
487
    """
488
    verifies format of DVs, constraints, objective variable file
489
    guarantees a list of experiments has the same variables
490
    adjusts unbounded constraints
491
    returns verified list of vars
492
    """
493

494
    for vars_2 in vars_i:
×
495
        if vars_2 is not None:
×
496
            for k0 in set(vars_1.keys()).union(vars_2):
×
497
                assert k0 in vars_1
×
498
                assert k0 in vars_2
×
499
                for k1 in set(vars_1[k0].keys()).union(vars_2[k0].keys()):
×
500
                    assert k1 in vars_1[k0]
×
501
                    assert k1 in vars_2[k0]
×
502
                    for k2 in set(vars_1[k0][k1].keys()).union(vars_2[k0][k1].keys()):
×
503
                        assert k2 in vars_1[k0][k1]
×
504
                        assert k2 in vars_2[k0][k1]
×
505
                        if k2 == "val":
×
506
                            continue
×
507
                        if isinstance(vars_1[k0][k1][k2], str):
×
508
                            assert vars_1[k0][k1][k2] == vars_2[k0][k1][k2]
×
509
                        elif vars_1[k0][k1][k2] is not None:
×
510
                            assert np.all(np.isclose(vars_1[k0][k1][k2], vars_2[k0][k1][k2]))
×
511
                        else:
512
                            assert (vars_1[k0][k1][k2] is None) and (vars_2[k0][k1][k2] is None)
×
513

514
    vars_unified = vars_1.copy()
×
515
    for k0 in vars_unified.keys():
×
516
        for k1 in vars_unified[k0].keys():
×
517
            if (vars_unified[k0][k1].get("lower") is not None) and (vars_unified[k0][k1].get("lower") < -1e28):
×
518
                vars_unified[k0][k1]["lower"] = -np.inf
×
519
            if (vars_unified[k0][k1].get("upper") is not None) and (vars_unified[k0][k1].get("upper") > 1e28):
×
520
                vars_unified[k0][k1]["upper"] = np.inf
×
521

522
    return vars_unified
×
523

524

525
def prettyprint_variables(
1✔
526
    keys_all,
527
    keys_obj,
528
    keys_DV,
529
    keys_constr,
530
):
531
    """
532
    print the variables we have with a prefix showing whether they are an
533
    objective variable (**), design variabie (--), constraint (<>), or unknown
534
    (??)
535
    """
536

537
    # print them nicely
538
    print()
×
539
    [
×
540
        print(
541
            f"** {key}"
542
            if key in keys_obj
543
            else f"-- {key}" if key in keys_DV else f"<> {key}" if key in keys_constr else f"?? {key}"
544
        )
545
        for key in keys_all
546
    ]
547
    print()
×
548

549
def read_per_iteration(iteration, stats_paths):
1✔
550

551
    stats_path_matched = [x for x in stats_paths if f'iteration_{iteration}' in x][0]
×
552
    iteration_path = '/'.join(stats_path_matched.split('/')[:-1])
×
553
    stats = pd.read_pickle(stats_path_matched)
×
554
    # dels = pd.read_pickle(iteration_path+'/DELs.p')
555
    # fst_vt = pd.read_pickle(iteration_path+'/fst_vt.p')
556
    print('iteration path with ', iteration, ': ', stats_path_matched)
×
557

558
    return stats, iteration_path
×
559

560

561
def get_timeseries_data(run_num, stats, iteration_path):
1✔
562
    
563
    stats = stats.reset_index()     # make 'index' column that has elements of 'IEA_22_Semi_00, ...'
×
564
    filename_from_stats = stats.loc[run_num, 'index'].to_string()      # filenames are not same - stats: IEA_22_Semi_83 / timeseries/: IEA_22_Semi_0_83.p
×
565
    
566
    # TODO: Need to clean up later with unified format..
567
    if filename_from_stats.split('_')[-1].startswith('0'):
×
568
        filename = ('_'.join(filename_from_stats.split('_')[:-1])+'_0_'+filename_from_stats.split('_')[-1][1:]+'.p').strip()
×
569
    else:
570
        filename = ('_'.join(filename_from_stats.split('_')[:-1])+'_0_'+filename_from_stats.split('_')[-1]+'.p').strip()
×
571
    
572
    if not os.path.exists('/'.join([iteration_path, 'timeseries', filename])):
×
573
        # examples/17_IEA22_Optimization/17_IEA22_OptStudies/of_COBYLA/openfast_runs/iteration_0/timeseries/IEA_22_Semi_0.p
574
        filename = ('_'.join(filename_from_stats.split('_')[2:-1])+'_'+str(int(filename_from_stats.split('_')[-1]))+'.p').strip()
×
575
    
576
    timeseries_path = '/'.join([iteration_path, 'timeseries', filename])
×
577
    timeseries_data = pd.read_pickle(timeseries_path)
×
578

579
    return filename, timeseries_data
×
580

581

582
def empty_figure():
1✔
583
    '''
584
    Draw empty figure showing nothing once initialized
585
    '''
586
    fig = go.Figure(go.Scatter(x=[], y=[]))
×
587
    fig.update_layout(template=None)
×
588
    fig.update_xaxes(showgrid=False, showticklabels=False, zeroline=False)
×
589
    fig.update_yaxes(showgrid=False, showticklabels=False, zeroline=False)
×
590

591
    return fig
×
592

593

594
def toggle(click, is_open):
1✔
595
    if click:
1✔
596
        return not is_open
1✔
597
    return is_open
×
598

599

600
def store_dataframes(var_files):
1✔
601
    dfs = {}
×
602
    for _, file_path in var_files.items():
×
603
        df = pd.read_csv(file_path, skiprows=[0,1,2,3,4,5,7], sep='\s+')
×
604
        dfs[file_path] = df.to_dict('records')
×
605
    
606
    return dfs
×
607

608

609
def get_file_info(file_path):
1✔
610
    file_name = file_path.split('/')[-1]
×
611
    file_abs_path = os.path.abspath(file_path)
×
612
    file_size = round(os.path.getsize(file_path) / (1024**2), 2)
×
613
    creation_time = os.path.getctime(file_path)
×
614
    modification_time = os.path.getmtime(file_path)
×
615

616
    file_info = {
×
617
        'file_name': file_name,
618
        'file_abs_path': file_abs_path,
619
        'file_size': file_size,
620
        'creation_time': creation_time,
621
        'modification_time': modification_time
622
    }
623

624
    return file_info
×
625

626

627
def find_file_path_from_tree(nested_dict, filename, prepath=()):
1✔
628
    # Works for multi-keyed files
629
    # Sample outputs: ('outputDirStructure', 'sample_test') ('outputDirStructure', 'sample_multi')
630
    for k, v in nested_dict.items():
1✔
631
        path = prepath + (k,)
1✔
632
        if v == filename:
1✔
633
            yield path + (v, )
×
634
        elif isinstance(v, list) and filename in v:
1✔
635
            yield path + (filename, )
1✔
636
        elif hasattr(v, 'items'):
1✔
637
            yield from find_file_path_from_tree(v, filename, path)
1✔
638

639
def find_iterations(nested_dict, prepath=()):
1✔
640
    for k, v in nested_dict.items():
×
641
        path = prepath + (k,)
×
642
        if 'iteration' in k:
×
643
            yield int(re.findall(r'\d+', k)[0])
×
644
        elif hasattr(v, 'items'):
×
645
            yield from find_iterations(v, path)
×
646

647

648
def update_yaml(input_dict, yaml_filepath):
1✔
649
    with open(yaml_filepath, 'w') as outfile:
×
650
        yaml.dump(input_dict, outfile, default_flow_style=False)
×
651

652

653
def read_cost_variables(labels, refturb_variables):
1✔
654
    # Read tcc cost-related variables from CSV file
655

656
    cost_matrix = [['Main Turbine Components', 'Cost']]
×
657

658
    for l in labels:
×
659
        cost_matrix.append([l, eval(refturb_variables[f'tcc.{l}_cost']['values'])[0]])
×
660
    
661
    return cost_matrix
×
662

663
def convert_dict_values_to_list(input_dict):
1✔
664
    return {k: [v.tolist()] if isinstance(v, np.ndarray) else v for k, v in input_dict.items()}
1✔
665
    
666
def generate_raft_img(raft_design_dir, plot_dir, log_data):
1✔
667
    '''
668
    Temporary function to visualize raft 3d plot using matplotlib.
669
    TODO: to build interactive 3d plot using plotly
670
    '''
671
    os.makedirs(plot_dir,exist_ok=True)
1✔
672

673
    if isinstance(log_data, list):
1✔
674
        log_data = convert_dict_values_to_list(log_data[0])
1✔
675

676
    opt_outs = {}
1✔
677
    opt_outs['max_pitch'] = np.squeeze(np.array(log_data['raft.Max_PtfmPitch']))
1✔
678
    n_plots = opt_outs['max_pitch'].size     # Change from len(opt_outs['max_pitch']) to solve single element np.array values
1✔
679
    print('n_plots: ', n_plots)
1✔
680
    
681
    matplotlib.use('agg')
1✔
682
    for i_plot in range(n_plots):
1✔
683
        # Set up subplots
684
        fig = plt.figure()
1✔
685
        fig.patch.set_facecolor('white')
1✔
686
        ax = plt.axes(projection='3d')
1✔
687
        
688
        with open(os.path.join(raft_design_dir,f'raft_design_{i_plot}.pkl'),'rb') as f:
1✔
689
            design = pickle.load(f)
1✔
690

691
        # TODO: Found typo on gamma value at 1_raft_opt example
692
        if design['turbine']['tower']['gamma'] == np.array([0.]):
1✔
693
            design['turbine']['tower']['gamma'] = 0.0       # Change it from array([0.])
1✔
694
        
695
        # set up the model
696
        model1 = raft.Model(design)
1✔
697
        model1.analyzeUnloaded(
1✔
698
            ballast= False, 
699
            heave_tol = 1.0
700
            )
701

702
        model1.fowtList[0].r6[4] = np.radians(opt_outs['max_pitch'][i_plot])
1✔
703
        
704
        _, ax = model1.plot(ax=ax)
×
705

706
        ax.azim = -88.63636363636361
×
707
        ax.elev = 27.662337662337674
×
708
        ax.set_xlim3d((-110.90447789470043, 102.92063063344857))
×
709
        ax.set_ylim3d((64.47420067304586, 311.37818252335893))
×
710
        ax.set_zlim3d((-88.43591080818854, -57.499893019459606))
×
711
        
712
        image_filename = os.path.join(plot_dir,f'ptfm_{i_plot}.png')
×
713
        plt.savefig(image_filename, bbox_inches='tight')
×
714
        print('saved ', image_filename)
×
715
        plt.close()
×
716

717

718
def remove_duplicated_legends(fig):
1✔
719
    names = set()
×
720
    fig.for_each_trace(
×
721
        lambda trace:
722
            trace.update(showlegend=False)
723
            if (trace.name in names) else names.add(trace.name))
724
    
725
    return fig
×
726

727

728
def set_colors():
1✔
729
    cols = plotly.colors.DEFAULT_PLOTLY_COLORS
1✔
730
    return cols
1✔
731

732
############################
733
# Viz Utils for WindIO
734
############################
735
def render_meshes():
1✔
736
    meshes = []
×
737

738
    cylinder = {'center': [1,2,3], 'direction': [1,1,1], 'radius': 1, 'height': 2}
×
739
    sphere = {'center': [0,0,0], 'direction': [0,0,1], 'radius': 0.5}
×
740
    plane = {'center': [0,0,0], 'direction': [0,0,1]}
×
741
    line = {'pointa': [-0.5,0,0], 'pointb':[0.5,0,0]}
×
742
    box = {'bounds': [-1.0,1.0,-1.0,1.0,-1.0,1.0]}
×
743
    
744
    # Define structured points with numpy
745
    x = np.arange(-10, 10, 0.25)    # (80,)
×
746
    y = np.arange(-10, 10, 0.25)    # (80,)
×
747
    x, y = np.meshgrid(x, y)        # both (80, 80)
×
748
    r = np.sqrt(x**2, y**2)
×
749
    z = np.sin(r)
×
750
    points = (x, y, z)
×
751

752
    mesh_cylinder = render_cylinder(cylinder)
×
753
    mesh_sphere = render_sphere(sphere)
×
754
    mesh_plane = render_plane(plane)
×
755
    mesh_line = render_line(line)
×
756
    mesh_box = render_box(box)
×
757
    mesh_random = render_our_own(points)
×
758

759
    meshes.append(mesh_cylinder)
×
760
    meshes.append(mesh_sphere)
×
761
    meshes.append(mesh_plane)
×
762
    meshes.append(mesh_line)
×
763
    meshes.append(mesh_box)
×
764
    meshes.append(mesh_random)
×
765

766
    return meshes
×
767

768

769
def render_cylinder(cylinder):
1✔
770
    cylinder = pv.Cylinder(
×
771
        center=cylinder['center'], direction=cylinder['direction'], radius=cylinder['radius'], height=cylinder['height']
772
    )
773
    mesh_state = to_mesh_state(cylinder)
×
774

775
    content = dash_vtk.View([
×
776
        dash_vtk.GeometryRepresentation(
777
            children=[dash_vtk.Mesh(state=mesh_state)],
778
            showCubeAxes=True,      # Show origins
779
        )
780
    ])
781

782
    return content
×
783

784
def render_sphere(sphere):
1✔
785
    sphere = pv.Sphere(
×
786
        center=sphere['center'], direction=sphere['direction'], radius=sphere['radius']
787
    )
788
    mesh_state = to_mesh_state(sphere)
×
789

790
    content = dash_vtk.View([
×
791
        dash_vtk.GeometryRepresentation(
792
            children=[dash_vtk.Mesh(state=mesh_state)],
793
            showCubeAxes=True,      # Show origins
794
        )
795
    ])
796

797
    return content
×
798

799

800
def render_plane(plane):
1✔
801
    plane = pv.Plane(
×
802
        center=plane['center'], direction=plane['direction']
803
    )
804
    mesh_state = to_mesh_state(plane)
×
805

806
    content = dash_vtk.View([
×
807
        dash_vtk.GeometryRepresentation(
808
            children=[dash_vtk.Mesh(state=mesh_state)],
809
            showCubeAxes=True,      # Show origins
810
        )
811
    ])
812

813
    return content
×
814

815

816
def render_line(line):
1✔
817
    line = pv.Line(
×
818
        pointa=line['pointa'], pointb=line['pointb']
819
    )
820
    mesh_state = to_mesh_state(line)
×
821

822
    content = dash_vtk.View([
×
823
        dash_vtk.GeometryRepresentation(
824
            children=[dash_vtk.Mesh(state=mesh_state)],
825
            showCubeAxes=True,      # Show origins
826
        )
827
    ])
828

829
    return content
×
830

831

832
def render_box(box):
1✔
833
    box = pv.Box(
×
834
        bounds=box['bounds']
835
    )
836
    mesh_state = to_mesh_state(box)
×
837

838
    content = dash_vtk.View([
×
839
        dash_vtk.GeometryRepresentation(
840
            children=[dash_vtk.Mesh(state=mesh_state)],
841
            showCubeAxes=True,      # Show origins
842
        )
843
    ])
844

845
    return content
×
846

847

848
def render_our_own(points):
1✔
849
    '''
850
    Create and fill the VTK Data Object with your own data using VTK library and pyvista high level api
851

852
    Reference: https://tutorial.pyvista.org/tutorial/06_vtk/b_create_vtk.html
853
    https://docs.pyvista.org/examples/00-load/create-tri-surface
854
    https://docs.pyvista.org/api/core/_autosummary/pyvista.polydatafilters.reconstruct_surface#pyvista.PolyDataFilters.reconstruct_surface
855
    '''
856

857
    # Join the points
858
    x, y, z = points
×
859
    values = np.c_[x.ravel(), y.ravel(), z.ravel()]     # (6400, 3) where each column is x, y, z coords
×
860
    coords = numpy_to_vtk(values)
×
861
    cloud = pv.PolyData(coords)
×
862
    # mesh = cloud.delaunay_2d()          # From point cloud, apply a 2D Delaunary filter to generate a 2d surface from a set of points on a plane.
863
    mesh = cloud.delaunay_3d()
×
864

865

866
    # Work for sin-plane but not for cylinder..
867
    '''
×
868
    # Join the points
869
    x, y, z = points
870
    values = np.c_[x.ravel(), y.ravel(), z.ravel()]     # (6400, 3) where each column is x, y, z coords
871
    coords = numpy_to_vtk(values)
872
    
873
    points = vtk.vtkPoints()
874
    points.SetData(coords)
875

876
    grid = vtk.vtkStructuredGrid()
877
    grid.SetDimensions(*z.shape, 1)     # *zshape: (80 80) for sin-plane / 1000 for cylinder
878
    grid.SetPoints(points)
879

880
    # Add point data
881
    arr = numpy_to_vtk(z.ravel())
882
    arr.SetName("z")
883
    grid.GetPointData().SetScalars(arr)
884
    '''
885

886
    mesh_state = to_mesh_state(mesh)
×
887

888
    content = dash_vtk.View([
×
889
        dash_vtk.GeometryRepresentation(
890
            mapper={'orientationArray': 'Normals'},
891
            children=[dash_vtk.Mesh(state=mesh_state)],
892
            showCubeAxes=True,      # Show origins
893
        )
894
    ])
895
    
896
    return content
×
897

898
def render_our_own_delaunay(points):
1✔
899
    '''
900
    Create and fill the VTK Data Object with your own data using VTK library and pyvista high level api
901

902
    Reference: https://tutorial.pyvista.org/tutorial/06_vtk/b_create_vtk.html
903
    https://docs.pyvista.org/examples/00-load/create-tri-surface
904
    https://docs.pyvista.org/api/core/_autosummary/pyvista.polydatafilters.reconstruct_surface#pyvista.PolyDataFilters.reconstruct_surface
905
    '''
906

907
    # Join the points
908
    x, y, z = points
×
909
    values = np.c_[x.ravel(), y.ravel(), z.ravel()]     # (6400, 3) where each column is x, y, z coords
×
910
    coords = numpy_to_vtk(values)
×
911
    cloud = pv.PolyData(coords)
×
912
    # mesh = cloud.delaunay_2d()          # From point cloud, apply a 2D Delaunary filter to generate a 2d surface from a set of points on a plane.
913
    mesh = cloud.delaunay_3d()
×
914

915
    mesh_state = to_mesh_state(mesh)
×
916

917
    return mesh_state
×
918

919
def find_rows(df_dict, df_type='geometry'):
1✔
920

921
    df = pd.DataFrame(df_dict)
×
922
    
923
    return df[df['Type'] == df_type].to_dict('list')         # {'File Path': ['a.yaml', 'c.yaml'], 'Label': ['abc', 'ghi'], 'Type': [1, 1]}
×
924

925
    # return {k: v[idx] for idx in range(len(df['Type'])) for k, v in df.items() if df['Type'][idx]==df_type}
926

927

928

929
def load_geometry_data(geometry_paths):
1✔
930
    # Read geometry input file and load airfoils data
931
    # wt_options = sch.load_geometry_yaml('/projects/weis/sryu/visualization_cases/1_raft_opt/IEA-22-280-RWT.yaml')       # For HPC
932
    # wt_options = sch.load_geometry_yaml('/Users/sryu/Desktop/FY24/WEIS-Visualization/data/visualization_cases/1_raft_opt/IEA-22-280-RWT.yaml')       # For Local
933
    airfoils, geom_comps, wt_options_by_file = {}, {}, {}
1✔
934
    # for row in geometry_paths:
935
    #     wt_options = sch.load_geometry_yaml(row['File Path'])
936
    #     airfoils[row['Label']] = wt_options['airfoils']
937
    #     geom_comps[row['Label']] = wt_options['components']
938

939
    for filepath, filelabel, _ in zip(*geometry_paths.values()):
1✔
940
        wt_options = sch.load_geometry_yaml(filepath)
1✔
941
        airfoils[filelabel] = wt_options['airfoils']
1✔
942
        geom_comps[filelabel] = wt_options['components']
1✔
943
        wt_options_by_file[filelabel] = wt_options
1✔
944

945

946
    return airfoils, geom_comps, wt_options_by_file
1✔
947

948
###################################################
949
# Not needed below.. Will be deleted later
950
###################################################
951
def load_mesh(file_path):
1✔
952
    '''
953
    Read either STL or VTK file and load the 3D mesh
954
    '''
955
    # Read STL file generated by WindIO Tool
956
    # Sample files to test
957
    # file_path = '/Users/sryu/Desktop/FY24/windio2cad/nrel5mw-semi_oc4.stl'
958
    # file_path = '/Users/sryu/Desktop/FY24/ACDC/project1/case01/vtk/01_NREL_5MW-ED.Mode1.LinTime1.ED_BladeLn2Mesh_motion1.010.vtp'
959
    if file_path.endswith('.stl'):
×
960
        reader = vtk.vtkSTLReader()
×
961
    elif file_path.endswith('.vtp'):
×
962
        reader = vtk.vtkXMLPolyDataReader()
×
963
    
964
    reader.SetFileName(file_path)
×
965
    reader.Update()
×
966

967
    # Get dataset and build a mesh structure to pass it as a Mesh
968
    dataset = reader.GetOutput()
×
969
    mesh_state = to_mesh_state(dataset)
×
970

971
    content = dash_vtk.View([
×
972
        dash_vtk.GeometryRepresentation([
973
            dash_vtk.Mesh(state=mesh_state)
974
        ])
975
    ])
976

977
    return content
×
978

979

980
def render_terrain():
1✔
981
    '''
982
    This is an example of VTK rendering of point cloud. Only for reference.. Will be deleted later.
983
    '''
984
    # Get point cloud data from PyVista
985
    uniformGrid = pv.examples.download_crater_topo()
×
986
    subset = uniformGrid.extract_subset((500, 900, 400, 800, 0, 0), (5, 5, 1))
×
987

988
    # Update warp
989
    terrain = subset.warp_by_scalar(factor=1)
×
990
    polydata = terrain.extract_geometry()
×
991
    points = polydata.points.ravel()                        # points: [1.81750e+06 5.64600e+06 1.55213e+03 ... 1.82350e+06 5.65200e+06 1.91346e+03] / shape is (19683,) with dtype=float32
×
992
    polys = vtk_to_numpy(polydata.GetPolys().GetData())     # polys: [   4    0    1 ... 6479 6560 6559] / shape is (32000,) with dtype=int64
×
993
    elevation = polydata["scalar1of1"]
×
994
    color_range = [np.amin(elevation), np.amax(elevation)]
×
995

996
    content = dash_vtk.View(
×
997
        pickingModes=["hover"],
998
        children=[
999
            dash_vtk.GeometryRepresentation(
1000
                id="vtk-representation",
1001
                children=[
1002
                    dash_vtk.PolyData(
1003
                        id="vtk-polydata",
1004
                        points=points,
1005
                        polys=polys,
1006
                        children=[
1007
                            dash_vtk.PointData(
1008
                                [
1009
                                    dash_vtk.DataArray(
1010
                                        id="vtk-array",
1011
                                        registration="setScalars",
1012
                                        name="elevation",
1013
                                        values=elevation,
1014
                                    )
1015
                                ]
1016
                            )
1017
                        ],
1018
                    )
1019
                ],
1020
                colorMapPreset="erdc_blue2green_muted",
1021
                colorDataRange=color_range,
1022
                property={"edgeVisibility": True},
1023
                showCubeAxes=True,
1024
                cubeAxesStyle={"axisLabels": ["", "", "Altitude"]},
1025
            ),
1026
            dash_vtk.GeometryRepresentation(
1027
                id="pick-rep",
1028
                actor={"visibility": False},
1029
                children=[
1030
                    dash_vtk.Algorithm(
1031
                        id="pick-sphere",
1032
                        vtkClass="vtkSphereSource",
1033
                        state={"radius": 100},
1034
                    )
1035
                ],
1036
            ),
1037
        ],
1038
    )
1039
    
1040
    return content
×
1041

1042

1043
def render_volume():
1✔
1044
    '''
1045
    This is an example of a randome volume generation. Only for reference.. Will be deleted later.
1046
    '''
1047
    import random
×
1048

1049
    content = dash_vtk.View(
×
1050
        children=dash_vtk.VolumeDataRepresentation(
1051
            spacing=[1, 1, 1],
1052
            dimensions=[10, 10, 10],
1053
            origin=[0, 0, 0],
1054
            scalars=[random.random() for _ in range(1000)],
1055
            rescaleColorMap=False,
1056
        )
1057
    )
1058
    
1059
    return content
×
1060

1061

1062
def render_mesh_with_grid():
1✔
1063
    '''
1064
    Create and fill the VTK Data Object with your own data using VTK library and pyvista high level api
1065

1066
    Reference: https://tutorial.pyvista.org/tutorial/06_vtk/b_create_vtk.html
1067
    '''
1068
    # Define structured points with numpy
1069
    x = np.arange(-10, 10, 0.25)    # (80,)
×
1070
    y = np.arange(-10, 10, 0.25)    # (80,)
×
1071
    x, y = np.meshgrid(x, y)        # both (80, 80)
×
1072
    r = np.sqrt(x**2, y**2)
×
1073
    z = np.sin(r)
×
1074

1075
    # Join the points
1076
    values = np.c_[x.ravel(), y.ravel(), z.ravel()]     # (6400, 3) where each column is x, y, z coords
×
1077
    coords = numpy_to_vtk(values)
×
1078
    
1079
    points = vtk.vtkPoints()
×
1080
    points.SetData(coords)
×
1081

1082
    grid = vtk.vtkStructuredGrid()
×
1083
    grid.SetDimensions(*z.shape, 1)
×
1084
    grid.SetPoints(points)
×
1085

1086
    # Add point data
1087
    arr = numpy_to_vtk(z.ravel())
×
1088
    arr.SetName("z")
×
1089
    grid.GetPointData().SetScalars(arr)
×
1090

1091
    mesh_state = to_mesh_state(grid)
×
1092

1093
    content = dash_vtk.View([
×
1094
        dash_vtk.GeometryRepresentation(
1095
            mapper={'orientationArray': 'Normals'},
1096
            children=[dash_vtk.Mesh(state=mesh_state)],
1097
            showCubeAxes=True,      # Show origins
1098
        )
1099
    ])
1100
    
1101
    return content
×
1102

1103

1104
def render_mesh_with_faces():
1✔
1105
    '''
1106
    Create and fill the VTK Data Object with your own data using VTK library and pyvista high level api.
1107
    '''
1108
    points = np.array(
×
1109
        [
1110
            [0.0480, 0.0349, 0.9982],
1111
            [0.0305, 0.0411, 0.9987],
1112
            [0.0207, 0.0329, 0.9992],
1113
            [0.0218, 0.0158, 0.9996],
1114
            [0.0377, 0.0095, 0.9992],
1115
            [0.0485, 0.0163, 0.9987],
1116
            [0.0572, 0.0603, 0.9965],
1117
            [0.0390, 0.0666, 0.9970],
1118
            [0.0289, 0.0576, 0.9979],
1119
            [0.0582, 0.0423, 0.9974],
1120
            [0.0661, 0.0859, 0.9941],
1121
            [0.0476, 0.0922, 0.9946],
1122
            [0.0372, 0.0827, 0.9959],
1123
            [0.0674, 0.0683, 0.9954],
1124
        ],
1125
    )
1126

1127
    face_a = [6, 0, 1, 2, 3, 4, 5]
×
1128
    face_b = [6, 6, 7, 8, 1, 0, 9]
×
1129
    face_c = [6, 10, 11, 12, 7, 6, 13]
×
1130
    faces = np.concatenate((face_a, face_b, face_c))
×
1131

1132
    mesh = pv.PolyData(points, faces)
×
1133
    mesh_state = to_mesh_state(mesh)
×
1134

1135
    content = dash_vtk.View([
×
1136
        dash_vtk.GeometryRepresentation(
1137
            children=[dash_vtk.Mesh(state=mesh_state)],
1138
            showCubeAxes=True,      # Show origins
1139
        )
1140
    ])
1141
    
1142
    return content
×
1143

STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc