• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

WISDEM / WEIS / 13463516513

19 Feb 2025 08:25PM UTC coverage: 54.799% (-2.1%) from 56.892%
13463516513

push

github

web-flow
Merge pull request #355 from WISDEM/develop

Update main

2 of 500 new or added lines in 3 files covered. (0.4%)

5 existing lines in 2 files now uncovered.

6594 of 12033 relevant lines covered (54.8%)

0.55 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/weis/visualization/utils.py
1
'''
2
Various functions for help visualizing WEIS outputs
3
'''
4
from openfast_io.FileTools import load_yaml
×
NEW
5
import weis.inputs as sch
×
6
import pandas as pd
×
7
import numpy as np
×
8
import openmdao.api as om
×
9
import glob
×
10
import json
×
11
import multiprocessing as mp
×
12
import plotly.graph_objects as go
×
13
import os
×
14
import io
×
15
import yaml
×
16
import re
×
17
import socket
×
18
from dash import html
×
19
from matplotlib.gridspec import GridSpec
×
20
import matplotlib.pyplot as plt
×
21
import pickle
×
22
import raft
×
23
from raft.helpers import *
×
24

NEW
25
import vtk
×
NEW
26
import dash_vtk
×
NEW
27
from dash_vtk.utils import to_mesh_state
×
NEW
28
import pyvista as pv
×
29

30
try:
×
31
    import ruamel_yaml as ry
×
32
except Exception:
×
33
    try:
×
34
        import ruamel.yaml as ry
×
35
    except Exception:
×
36
        raise ImportError('No module named ruamel.yaml or ruamel_yaml')
×
37

NEW
38
try:
×
NEW
39
    from vtkmodules.util.numpy_support import numpy_to_vtk, vtk_to_numpy
×
NEW
40
except:
×
NEW
41
    from vtk.util.numpy_support import numpy_to_vtk, vtk_to_numpy
×
42

43

44
def checkPort(port, host="0.0.0.0"):
×
45
    # check port availability and then close the socket
46
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
×
47
    result = False
×
48
    try:
×
49
        sock.bind((host, port))
×
50
        result = True
×
51
    except:
×
52
        result = False
×
53

54
    sock.close()
×
55
    return result
×
56

57

58
def parse_yaml(file_path):
×
59
    '''
60
    Parse the data contents in dictionary format
61
    '''
62
    # TODO: Encountering the error for parsing hyperlink data - either skip or add that?
63
    #       load_yaml doesn't work as well..
64
    # print('Reading the input yaml file..')
65
    try:
×
66
        # dict = load_yaml(file_path, 1)
67
        with io.open(file_path, 'r') as stream:
×
68
            dict = yaml.safe_load(stream)
×
69
            # try:
70
            #     dict = yaml.safe_load(stream)
71
            # except yaml.YAMLError as exc:
72
            #     from subprocess import call
73
            #     call(["yamllint", "-f", "parsable", file_path])
74
        
UNCOV
75
        dict['yamlPath'] = file_path
×
76
        # print('input file dict:\n', dict)
77
        return dict
×
78
    
79
    except FileNotFoundError:
×
80
        print('Could not locate the input yaml file..')
×
81
        exit()
×
82
    
83
    except Exception as e:
×
84
        print(e)
×
85
        exit()
×
86

87

88
def dict_to_html(data, out_html_list, level):
×
89
    '''
90
    Show the nested dictionary data to html
91
    '''
92
    
93
    for k1, v1 in data.items():
×
94
        if not k1 in ['dirs', 'files']:
×
95
            if not isinstance(v1, list) and not isinstance(v1, dict):
×
96
                out_html_list.append(html.H6(f'{"---"*level}{k1}: {v1}'))
×
97
                continue
×
98
            
99
            out_html_list.append(html.H6(f'{"---"*level}{k1}'))
×
100
        
101
        if isinstance(v1, list):
×
102
            out_html_list.append(html.Div([
×
103
                                    html.H6(f'{"---"*(level+1)}{i}') for i in v1]))
104
            
105
        elif isinstance(v1, dict):
×
106
            out_html_list = dict_to_html(v1, out_html_list, level+1)
×
107
    
108

109
    return out_html_list
×
110

111

112
def read_cm(cm_file):
×
113
    """
114
    Function originally from:
115
    https://github.com/WISDEM/WEIS/blob/main/examples/16_postprocessing/rev_DLCs_WEIS.ipynb
116

117
    Parameters
118
    __________
119
    cm_file : The file path for case matrix
120

121
    Returns
122
    _______
123
    cm : The dataframe of case matrix
124
    dlc_inds : The indices dictionary indicating where corresponding dlc is used for each run
125
    """
126
    cm_dict = load_yaml(cm_file, package=1)
×
127
    cnames = []
×
128
    for c in list(cm_dict.keys()):
×
129
        if isinstance(c, ry.comments.CommentedKeySeq):
×
130
            cnames.append(tuple(c))
×
131
        else:
132
            cnames.append(c)
×
133
    cm = pd.DataFrame(cm_dict, columns=cnames)
×
134

135
    return cm
×
136

137
def parse_contents(data):
×
138
    """
139
    Function from:
140
    https://github.com/WISDEM/WEIS/blob/main/examples/09_design_of_experiments/postprocess_results.py
141
    """
142
    collected_data = {}
×
143
    for key in data.keys():
×
144
        if key not in collected_data.keys():
×
145
            collected_data[key] = []
×
146

147
        for key_idx, _ in enumerate(data[key]):
×
148
            if isinstance(data[key][key_idx], int):
×
149
                collected_data[key].append(np.array(data[key][key_idx]))
×
150
            elif len(data[key][key_idx]) == 1:
×
151
                try:
×
152
                    collected_data[key].append(np.array(data[key][key_idx][0]))
×
153
                except:
×
154
                    collected_data[key].append(np.array(data[key][key_idx]))
×
155
            else:
156
                collected_data[key].append(np.array(data[key][key_idx]))
×
157

158
    df = pd.DataFrame.from_dict(collected_data)
×
159

160
    return df
×
161

162

163
def load_vars_file(fn_vars):
×
164
    """
165
    load a json file of problem variables as output from WEIS (as problem_vars.json)
166

167
    parameters:
168
    -----------
169
    fn_vars: str
170
        a filename to read
171

172
    returns:
173
    --------
174
    vars : dict[dict]
175
        a dictionary of dictionaries holding the problem_vars from WEIS
176
    """
177

NEW
178
    with open(fn_vars, "r") as fjson:
×
179
        # unpack in a useful form
NEW
180
        vars = {k: dict(v) for k, v in json.load(fjson).items()}
×
UNCOV
181
    return vars
×
182

183

184
def compare_om_data(
×
185
    dataOM_1,
186
    dataOM_2,
187
    fn_1="data 1",
188
    fn_2="data 2",
189
    verbose=False,
190
):
191
    """
192
    compare openmdao data dictionaries to find the in-common (and not) keys
193

194
    args:
195
        dataOM_1: dict
196
            an openmdao data dictionary
197
        dataOM_2: dict
198
            an openmdao data dictionary
199
        fn_1: str (optional)
200
            display name for the first data dictionary
201
        fn_2: str (optional)
202
            display name for the second data dictionary
203
        verbose : bool (optional, default: False)
204
            if we want to print what's happening
205

206
    returns:
207
        keys_all: set
208
            intersection (i.e. common) keys between the two OM data dictionaries
209
        diff_keys_12: set
210
            directional difference of keys between first and second OM data dicts
211
        diff_keys_21: set
212
            directional difference of keys between second and first OM data dicts
213
    """
214

215
    diff_keys_12 = set(dataOM_1).difference(dataOM_2)
×
216
    diff_keys_21 = set(dataOM_2).difference(dataOM_1)
×
217
    if len(diff_keys_12):
×
218
        if verbose:
×
219
            print(f"the following keys are only in {fn_1}:")
×
220
    for key_m in diff_keys_12:
×
221
        if verbose:
×
222
            print(f"\t{key_m}")
×
223
    if len(diff_keys_21):
×
224
        if verbose:
×
225
            print(f"the following keys are only in {fn_2}:")
×
226
    for key_m in diff_keys_21:
×
227
        if verbose:
×
228
            print(f"\t{key_m}")
×
229
    keys_all = set(dataOM_1).intersection(dataOM_2)
×
230
    if verbose:
×
231
        print(f"the following keys are in both {fn_1} and {fn_2}:")
×
232
    for key_m in keys_all:
×
233
        if verbose:
×
234
            print(f"\t{key_m}")
×
235

236
    return keys_all, diff_keys_12, diff_keys_21
×
237

NEW
238
def load_OMsql(log):
×
239
    """
240
    Function from :
241
    https://github.com/WISDEM/WEIS/blob/main/examples/09_design_of_experiments/postprocess_results.py
242
    """
243
    # logging.info("loading ", log)
NEW
244
    cr = om.CaseReader(log)
×
NEW
245
    rec_data = {}
×
NEW
246
    cases = cr.get_cases('driver')
×
NEW
247
    for case in cases:
×
NEW
248
        for key in case.outputs.keys():
×
NEW
249
            if key not in rec_data:
×
NEW
250
                rec_data[key] = []
×
NEW
251
            rec_data[key].append(case[key])
×
252
    
NEW
253
    return rec_data
×
254

NEW
255
def load_OMsql_temp(
×
256
    log,
257
    parse_multi=False,
258
    meta=None,
259
    verbose=False,
260
):
261
    """
262
    load the openmdao sql file produced by a WEIS run into a dictionary
263

264
    parameters:
265
    -----------
266
        log : str
267
            filename of the log sql database that should be loaded
268
        parse_multi : bool
269
            switch to turn on rank/iteration parsing and storage
270
        meta : str
271
            filename of the meta log sql database that should be loaded
272
        verbose : bool (optional, default: False)
273
            if we want to print what's happening
274

275
    returns:
276
        rec_data: dict
277
            dictionary of the data recorded by openMDAO
278

279
    """
280

281
    # heads-up print
282
    if verbose:
×
283
        print(f"loading {log}")
×
284

285
    # create an openmdao reader for recorded output data
286
    cr = om.CaseReader(log, metadata_filename=meta)
×
287

288
    # create a dict for output data that's been recorded
289
    rec_data = {}
×
290
    # loop over the cases
291
    for case in cr.get_cases("driver"):
×
292
        if parse_multi:
×
293
            rankNo = case.name.split(":")[0]
×
294
            assert rankNo.startswith("rank")
×
295
            rankNo = int(rankNo[4:])
×
296
            iterNo = int(case.name.split("|")[-1])
×
297

298
        # for each key in the outputs
299
        for key in case.outputs.keys():
×
300

301
            if key not in rec_data:
×
302
                # if this key isn't present, create a new list
303
                rec_data[key] = []
×
304
            
NEW
305
            if hasattr(case[key], '__len__'):
×
NEW
306
                if len(case[key]) == 1:
×
307
                    # otherwise coerce to float if possible and add the data to the list
NEW
308
                    rec_data[key].append(float(case[key]))
×
309
                else:
310
                    # otherwise a numpy array if possible and add the data to the list
NEW
311
                    rec_data[key].append(np.array(case[key]))
×
312
            else:
313
                rec_data[key].append(case[key])
×
314

315
        if parse_multi:
×
316
            # add rank/iter metadata
317
            for key in ["rank", "iter"]:  # for each key in the outputs
×
318
                if key not in rec_data:  # if this key isn't present, create a new list
×
319
                    rec_data[key] = []
×
320
            rec_data["rank"].append(rankNo)
×
321
            rec_data["iter"].append(iterNo)
×
322

323
    return rec_data  # return the output
×
324

325

326
def load_OMsql_multi(
×
327
    log_fmt,
328
    meta_in=None,
329
    process_multi = True,
330
    verbose=False,
331
):
332
    """
333
    load the multi-processor openmdao sql files produced by WEIS into a dict
334

335
    parameters:
336
    -----------
337
        log_fmt : str
338
            format string for the process-wise WEIS/OM log files
339
        meta_in : str (optional, default: None)
340
            filename string of the meta log file (will override automatic discovery)
341
        post_multi : bool (optional, default: True)
342
            postprocess in parallel using the multiprocessing library
343
        verbose : bool (optional, default: False)
344
            if we want to print what's happening
345

346
    returns:
347
    --------
348
        data_dict : dict
349
            dictionary of all the datapoints extracted from the WEIS/OM log files
350
    """
351

352
    # use glob to find the logs that match the format string
353
    opt_logs = sorted(
×
354
        glob.glob(log_fmt),
355
        key = lambda v : int(v.split("_")[-1])
356
            if (v.split("_")[-1] != "meta")
357
            else 1e8,
358
    )
359
    if len(opt_logs) < 1:
×
360
        raise FileExistsError("No output logs to postprocess!")
×
361

362
    # remove the "meta" log from the collection
363
    meta_found = None
×
364
    for idx, log in enumerate(opt_logs):
×
365
        if "meta" in log:
×
366
            meta_found = log  # save the meta file
×
367
            opt_logs.pop(idx)  # remove the meta log from the list
×
368
            break
×
369

370
    # handle meta logfile discovery... not sure what it actually does
371
    if meta_in is not None:
×
372
        meta = meta_in  # if a meta is given, override
×
373
    elif meta_found is not None:
×
374
        meta = meta_found  # if a meta is not given but one is found, use that
×
375
    else:
376
        meta = None  # otherwise, run without a meta
×
377

378
    # extract the ranks from the sql files
379
    sql_ranks = [ol.split("_")[-1] for ol in opt_logs]
×
380

381
    # run multiprocessing
382
    if process_multi:
×
383
        cores = mp.cpu_count()
×
384
        pool = mp.Pool(min(len(opt_logs), cores))
×
385

386
        # load sql file
387
        outdata = pool.starmap(load_OMsql, [(log, True, meta, verbose) for log in opt_logs])
×
388
        pool.close()
×
389
        pool.join()
×
390
    else: # no multiprocessing
391
        outdata = [load_OMsql(log, parse_multi=True, verbose=verbose, meta=meta) for log in opt_logs]
×
392

393
    # create a dictionary and turn it into a dataframe for convenience
394
    collected_data = {}
×
395
    ndarray_keys = []
×
396
    for sql_rank, data in zip(sql_ranks, outdata):
×
397
        for key in data.keys():
×
398
            if key not in collected_data.keys():
×
399
                collected_data[key] = []
×
400
            if key == "rank": # adjust the rank based on sql file rank
×
401
                data[key] = [int(sql_rank) for _ in data[key]]
×
402
            for idx_key, _ in enumerate(data[key]):
×
403
                if isinstance(data[key][idx_key], int):
×
404
                    collected_data[key].append(int(np.array(data[key][idx_key])))
×
405
                elif isinstance(data[key][idx_key], float):
×
406
                    collected_data[key].append(float(np.array(data[key][idx_key])))
×
407
                elif len(data[key][idx_key]) == 1:
×
408
                    collected_data[key].append(float(np.array(data[key][idx_key])))
×
409
                    # try:
410
                    #     collected_data[key].append(np.array(data[key][idx_key][0]))
411
                    # except:
412
                    #     collected_data[key].append(np.array(data[key][idx_key]))
413
                else:
414
                    collected_data[key].append(np.array(data[key][idx_key]).tolist())
×
415
                    ndarray_keys.append(key)
×
416
    df = pd.DataFrame(collected_data)
×
417

418
    # return a dictionary of the data that was extracted
419
    return df.to_dict(orient="list")
×
420

421

422
def consolidate_multi(
×
423
    dataOMmulti,
424
    vars_dict,
425
    feas_tol=1e-5,
426
):
427
    """
428
    load the multi-processor openmdao sql files and squash them to the
429
    per-iteration best-feasible result
430

431
    parameters:
432
    -----------
433
        dataOMmulti : dict
434
            dictionary of all the datapoints extracted from the multiprocess
435
            WEIS/OM log files
436
        vars_dict:
437
            experiment design variables to be analyzed
438
        feas_tol : float (optional)
439
            tolerance for feasibility analysis
440
    returns:
441
    --------
442
        dataOMbest_DE : dict
443
            dictionary of the per-iteration best-feasible simulations
444
    """
445

UNCOV
446
    dfOMmulti = pd.DataFrame(dataOMmulti)
×
447
    tfeas, cfeas = get_feasible_iterations(dataOMmulti, vars_dict, feas_tol=feas_tol)
×
448

449
    dfOMmulti = dfOMmulti[tfeas].reset_index()
×
450

451
    dataOMbest_DE = dfOMmulti.groupby("iter").apply(
×
452
        lambda grp : grp.loc[grp["floatingse.system_structural_mass"].idxmin()],
453
        include_groups=False,
454
    ).to_dict()
455

456
    for key in dataOMbest_DE.keys():
×
457
        dataOMbest_DE[key] = np.array(list(dataOMbest_DE[key].values()))
×
458

459
    return dataOMbest_DE
×
460

461

462
def get_feasible_iterations(
×
463
    dataOM,
464
    vars_dict,
465
    feas_tol=1e-5,
466
):
467
    """
468
    get iteration-wise total and per-constraint feasibility from an experiment
469

470
    args:
471
        dataOM: dict
472
            openmdao data dictionary
473
        vars_dict:
474
            experiment design variables for checking
475

476
    returns:
477
        total_feasibility: np.ndarray[bool]
478
            iteration-wise total feasibility indications
479
        feasibility_constraintwise: dict[np.ndarray[bool]]
480
            dictionary to map from constraint names to iteration-wise feasibility indications for that constraint
481
    """
482

483
    # assert len(vars_dict["objectives"].values()) == 1, "can't handle multi-objective... yet. -cfrontin"
484
    objective_name = list(vars_dict["objectives"].values())[0]["name"]
×
485

486
    feasibility_constraintwise = dict()
×
487
    total_feasibility = np.ones_like(np.array(dataOM[objective_name]).reshape(-1,1), dtype=bool)
×
488
    for k, v in vars_dict["constraints"].items():
×
489
        feasibility = np.ones_like(dataOM[objective_name], dtype=bool).reshape(-1, 1)
×
490
        values = np.array(dataOM[v["name"]])
×
491
        if len(values.shape) == 1:
×
492
            values = values.reshape(-1,1)
×
493
        if v.get("upper") is not None:
×
494
            feasibility = np.logical_and(feasibility, np.all(np.less_equal(values, (1+feas_tol)*v["upper"]), axis=1).reshape(-1, 1))
×
495
        if v.get("lower") is not None:
×
496
            feasibility = np.logical_and(feasibility, np.all(np.greater_equal(values, (1-feas_tol)*v["lower"]), axis=1).reshape(-1, 1))
×
497
        feasibility_constraintwise[v["name"]] = feasibility
×
498
        total_feasibility = np.logical_and(total_feasibility, feasibility)
×
499
    return total_feasibility, feasibility_constraintwise
×
500

501

502
def verify_vars(
×
503
    vars_1,
504
    *vars_i,
505
):
506
    """
507
    verifies format of DVs, constraints, objective variable file
508
    guarantees a list of experiments has the same variables
509
    adjusts unbounded constraints
510
    returns verified list of vars
511
    """
512

513
    for vars_2 in vars_i:
×
514
        if vars_2 is not None:
×
515
            for k0 in set(vars_1.keys()).union(vars_2):
×
516
                assert k0 in vars_1
×
517
                assert k0 in vars_2
×
518
                for k1 in set(vars_1[k0].keys()).union(vars_2[k0].keys()):
×
519
                    assert k1 in vars_1[k0]
×
520
                    assert k1 in vars_2[k0]
×
521
                    for k2 in set(vars_1[k0][k1].keys()).union(vars_2[k0][k1].keys()):
×
522
                        assert k2 in vars_1[k0][k1]
×
523
                        assert k2 in vars_2[k0][k1]
×
524
                        if k2 == "val":
×
525
                            continue
×
526
                        if isinstance(vars_1[k0][k1][k2], str):
×
527
                            assert vars_1[k0][k1][k2] == vars_2[k0][k1][k2]
×
528
                        elif vars_1[k0][k1][k2] is not None:
×
529
                            assert np.all(np.isclose(vars_1[k0][k1][k2], vars_2[k0][k1][k2]))
×
530
                        else:
531
                            assert (vars_1[k0][k1][k2] is None) and (vars_2[k0][k1][k2] is None)
×
532

533
    vars_unified = vars_1.copy()
×
534
    for k0 in vars_unified.keys():
×
535
        for k1 in vars_unified[k0].keys():
×
536
            if (vars_unified[k0][k1].get("lower") is not None) and (vars_unified[k0][k1].get("lower") < -1e28):
×
537
                vars_unified[k0][k1]["lower"] = -np.inf
×
538
            if (vars_unified[k0][k1].get("upper") is not None) and (vars_unified[k0][k1].get("upper") > 1e28):
×
539
                vars_unified[k0][k1]["upper"] = np.inf
×
540

541
    return vars_unified
×
542

543

544
def prettyprint_variables(
×
545
    keys_all,
546
    keys_obj,
547
    keys_DV,
548
    keys_constr,
549
):
550
    """
551
    print the variables we have with a prefix showing whether they are an
552
    objective variable (**), design variabie (--), constraint (<>), or unknown
553
    (??)
554
    """
555

556
    # print them nicely
557
    print()
×
558
    [
×
559
        print(
560
            f"** {key}"
561
            if key in keys_obj
562
            else f"-- {key}" if key in keys_DV else f"<> {key}" if key in keys_constr else f"?? {key}"
563
        )
564
        for key in keys_all
565
    ]
566
    print()
×
567

568
def read_per_iteration(iteration, stats_paths):
×
569

570
    stats_path_matched = [x for x in stats_paths if f'iteration_{iteration}' in x][0]
×
571
    iteration_path = '/'.join(stats_path_matched.split('/')[:-1])
×
572
    stats = pd.read_pickle(stats_path_matched)
×
573
    # dels = pd.read_pickle(iteration_path+'/DELs.p')
574
    # fst_vt = pd.read_pickle(iteration_path+'/fst_vt.p')
575
    print('iteration path with ', iteration, ': ', stats_path_matched)
×
576

577
    return stats, iteration_path
×
578

579

580
def get_timeseries_data(run_num, stats, iteration_path):
×
581
    
582
    stats = stats.reset_index()     # make 'index' column that has elements of 'IEA_22_Semi_00, ...'
×
583
    filename = stats.loc[run_num, 'index'].to_string()      # filenames are not same - stats: IEA_22_Semi_83 / timeseries/: IEA_22_Semi_0_83.p
×
584
    if filename.split('_')[-1].startswith('0'):
×
585
        filename = ('_'.join(filename.split('_')[:-1])+'_0_'+filename.split('_')[-1][1:]+'.p').strip()
×
586
    else:
587
        filename = ('_'.join(filename.split('_')[:-1])+'_0_'+filename.split('_')[-1]+'.p').strip()
×
588
    
589
    # visualization_demo/openfast_runs/rank_0/iteration_0/timeseries/IEA_22_Semi_0_0.p
590
    timeseries_path = '/'.join([iteration_path, 'timeseries', filename])
×
591
    timeseries_data = pd.read_pickle(timeseries_path)
×
592

593
    return filename, timeseries_data
×
594

595

596
def empty_figure():
×
597
    '''
598
    Draw empty figure showing nothing once initialized
599
    '''
600
    fig = go.Figure(go.Scatter(x=[], y=[]))
×
601
    fig.update_layout(template=None)
×
602
    fig.update_xaxes(showgrid=False, showticklabels=False, zeroline=False)
×
603
    fig.update_yaxes(showgrid=False, showticklabels=False, zeroline=False)
×
604

605
    return fig
×
606

607

608
def toggle(click, is_open):
×
609
    if click:
×
610
        return not is_open
×
611
    return is_open
×
612

613

614
def store_dataframes(var_files):
×
NEW
615
    dfs = {}
×
NEW
616
    for _, file_path in var_files.items():
×
617
        df = pd.read_csv(file_path, skiprows=[0,1,2,3,4,5,7], sep='\s+')
×
NEW
618
        dfs[file_path] = df.to_dict('records')
×
619
    
UNCOV
620
    return dfs
×
621

622

623
def get_file_info(file_path):
×
624
    file_name = file_path.split('/')[-1]
×
625
    file_abs_path = os.path.abspath(file_path)
×
626
    file_size = round(os.path.getsize(file_path) / (1024**2), 2)
×
627
    creation_time = os.path.getctime(file_path)
×
628
    modification_time = os.path.getmtime(file_path)
×
629

630
    file_info = {
×
631
        'file_name': file_name,
632
        'file_abs_path': file_abs_path,
633
        'file_size': file_size,
634
        'creation_time': creation_time,
635
        'modification_time': modification_time
636
    }
637

638
    return file_info
×
639

640

641
def find_file_path_from_tree(nested_dict, filename, prepath=()):
×
642
    # Works for multi-keyed files
643
    # Sample outputs: ('outputDirStructure', 'sample_test') ('outputDirStructure', 'sample_multi')
644
    for k, v in nested_dict.items():
×
645
        path = prepath + (k,)
×
646
        if v == filename:
×
647
            yield path + (v, )
×
648
        elif isinstance(v, list) and filename in v:
×
649
            yield path + (filename, )
×
650
        elif hasattr(v, 'items'):
×
651
            yield from find_file_path_from_tree(v, filename, path)
×
652

653
def find_iterations(nested_dict, prepath=()):
×
654
    for k, v in nested_dict.items():
×
655
        path = prepath + (k,)
×
656
        if 'iteration' in k:
×
657
            yield int(re.findall(r'\d+', k)[0])
×
658
        elif hasattr(v, 'items'):
×
659
            yield from find_iterations(v, path)
×
660

661

662
def update_yaml(input_dict, yaml_filepath):
×
663
    with open(yaml_filepath, 'w') as outfile:
×
664
        yaml.dump(input_dict, outfile, default_flow_style=False)
×
665

666

667
def read_cost_variables(labels, refturb_variables):
×
668
    # Read tcc cost-related variables from CSV file
669

670
    cost_matrix = [['Main Turbine Components', 'Cost']]
×
671

672
    for l in labels:
×
673
        cost_matrix.append([l, eval(refturb_variables[f'tcc.{l}_cost']['values'])[0]])
×
674
    
675
    return cost_matrix
×
676

677

678
def generate_raft_img(raft_design_dir, plot_dir, log_data):
×
679
    '''
680
    Temporary function to visualize raft 3d plot using matplotlib.
681
    TODO: to build interactive 3d plot using plotly
682
    '''
683
    n_plots = len(os.listdir(raft_design_dir))
×
684
    print('n_plots: ', n_plots)
×
685
    os.makedirs(plot_dir,exist_ok=True)
×
686

687
    opt_outs = {}
×
688
    opt_outs['max_pitch'] = np.squeeze(np.array(log_data['raft.Max_PtfmPitch']))
×
689

690
    for i_plot in range(n_plots):
×
691
        # Set up subplots
692
        fig = plt.figure()
×
693
        fig.patch.set_facecolor('white')
×
694
        ax = plt.axes(projection='3d')
×
695
        
696
        with open(os.path.join(raft_design_dir,f'raft_design_{i_plot}.pkl'),'rb') as f:
×
697
            design = pickle.load(f)
×
698

699
        # TODO: Found typo on gamma value at 1_raft_opt example
700
        if design['turbine']['tower']['gamma'] == np.array([0.]):
×
701
            design['turbine']['tower']['gamma'] = 0.0       # Change it from array([0.])
×
702
        
703
        # set up the model
704
        model1 = raft.Model(design)
×
705
        model1.analyzeUnloaded(
×
706
            ballast= False, 
707
            heave_tol = 1.0
708
            )
709

710
        model1.fowtList[0].r6[4] = np.radians(opt_outs['max_pitch'][i_plot])
×
711
        
712
        _, ax = model1.plot(ax=ax)
×
713

714
        ax.azim = -88.63636363636361
×
715
        ax.elev = 27.662337662337674
×
716
        ax.set_xlim3d((-110.90447789470043, 102.92063063344857))
×
717
        ax.set_ylim3d((64.47420067304586, 311.37818252335893))
×
718
        ax.set_zlim3d((-88.43591080818854, -57.499893019459606))
×
719
        
720
        image_filename = os.path.join(plot_dir,f'ptfm_{i_plot}.png')
×
721
        plt.savefig(image_filename, bbox_inches='tight')
×
722
        print('saved ', image_filename)
×
723

724

NEW
725
def remove_duplicated_legends(fig):
×
NEW
726
    names = set()
×
NEW
727
    fig.for_each_trace(
×
728
        lambda trace:
729
            trace.update(showlegend=False)
730
            if (trace.name in names) else names.add(trace.name))
731
    
NEW
732
    return fig
×
733

734

735
############################
736
# Viz Utils for WindIO
737
############################
NEW
738
def render_meshes():
×
NEW
739
    meshes = []
×
740

NEW
741
    cylinder = {'center': [1,2,3], 'direction': [1,1,1], 'radius': 1, 'height': 2}
×
NEW
742
    sphere = {'center': [0,0,0], 'direction': [0,0,1], 'radius': 0.5}
×
NEW
743
    plane = {'center': [0,0,0], 'direction': [0,0,1]}
×
NEW
744
    line = {'pointa': [-0.5,0,0], 'pointb':[0.5,0,0]}
×
NEW
745
    box = {'bounds': [-1.0,1.0,-1.0,1.0,-1.0,1.0]}
×
746
    
747
    # Define structured points with numpy
NEW
748
    x = np.arange(-10, 10, 0.25)    # (80,)
×
NEW
749
    y = np.arange(-10, 10, 0.25)    # (80,)
×
NEW
750
    x, y = np.meshgrid(x, y)        # both (80, 80)
×
NEW
751
    r = np.sqrt(x**2, y**2)
×
NEW
752
    z = np.sin(r)
×
NEW
753
    points = (x, y, z)
×
754

NEW
755
    mesh_cylinder = render_cylinder(cylinder)
×
NEW
756
    mesh_sphere = render_sphere(sphere)
×
NEW
757
    mesh_plane = render_plane(plane)
×
NEW
758
    mesh_line = render_line(line)
×
NEW
759
    mesh_box = render_box(box)
×
NEW
760
    mesh_random = render_our_own(points)
×
761

NEW
762
    meshes.append(mesh_cylinder)
×
NEW
763
    meshes.append(mesh_sphere)
×
NEW
764
    meshes.append(mesh_plane)
×
NEW
765
    meshes.append(mesh_line)
×
NEW
766
    meshes.append(mesh_box)
×
NEW
767
    meshes.append(mesh_random)
×
768

NEW
769
    return meshes
×
770

771

NEW
772
def render_cylinder(cylinder):
×
NEW
773
    cylinder = pv.Cylinder(
×
774
        center=cylinder['center'], direction=cylinder['direction'], radius=cylinder['radius'], height=cylinder['height']
775
    )
NEW
776
    mesh_state = to_mesh_state(cylinder)
×
777

NEW
778
    content = dash_vtk.View([
×
779
        dash_vtk.GeometryRepresentation(
780
            children=[dash_vtk.Mesh(state=mesh_state)],
781
            showCubeAxes=True,      # Show origins
782
        )
783
    ])
784

NEW
785
    return content
×
786

NEW
787
def render_sphere(sphere):
×
NEW
788
    sphere = pv.Sphere(
×
789
        center=sphere['center'], direction=sphere['direction'], radius=sphere['radius']
790
    )
NEW
791
    mesh_state = to_mesh_state(sphere)
×
792

NEW
793
    content = dash_vtk.View([
×
794
        dash_vtk.GeometryRepresentation(
795
            children=[dash_vtk.Mesh(state=mesh_state)],
796
            showCubeAxes=True,      # Show origins
797
        )
798
    ])
799

NEW
800
    return content
×
801

802

NEW
803
def render_plane(plane):
×
NEW
804
    plane = pv.Plane(
×
805
        center=plane['center'], direction=plane['direction']
806
    )
NEW
807
    mesh_state = to_mesh_state(plane)
×
808

NEW
809
    content = dash_vtk.View([
×
810
        dash_vtk.GeometryRepresentation(
811
            children=[dash_vtk.Mesh(state=mesh_state)],
812
            showCubeAxes=True,      # Show origins
813
        )
814
    ])
815

NEW
816
    return content
×
817

818

NEW
819
def render_line(line):
×
NEW
820
    line = pv.Line(
×
821
        pointa=line['pointa'], pointb=line['pointb']
822
    )
NEW
823
    mesh_state = to_mesh_state(line)
×
824

NEW
825
    content = dash_vtk.View([
×
826
        dash_vtk.GeometryRepresentation(
827
            children=[dash_vtk.Mesh(state=mesh_state)],
828
            showCubeAxes=True,      # Show origins
829
        )
830
    ])
831

NEW
832
    return content
×
833

834

NEW
835
def render_box(box):
×
NEW
836
    box = pv.Box(
×
837
        bounds=box['bounds']
838
    )
NEW
839
    mesh_state = to_mesh_state(box)
×
840

NEW
841
    content = dash_vtk.View([
×
842
        dash_vtk.GeometryRepresentation(
843
            children=[dash_vtk.Mesh(state=mesh_state)],
844
            showCubeAxes=True,      # Show origins
845
        )
846
    ])
847

NEW
848
    return content
×
849

850

NEW
851
def render_our_own(points):
×
852
    '''
853
    Create and fill the VTK Data Object with your own data using VTK library and pyvista high level api
854

855
    Reference: https://tutorial.pyvista.org/tutorial/06_vtk/b_create_vtk.html
856
    https://docs.pyvista.org/examples/00-load/create-tri-surface
857
    https://docs.pyvista.org/api/core/_autosummary/pyvista.polydatafilters.reconstruct_surface#pyvista.PolyDataFilters.reconstruct_surface
858
    '''
859

860
    # Join the points
NEW
861
    x, y, z = points
×
NEW
862
    values = np.c_[x.ravel(), y.ravel(), z.ravel()]     # (6400, 3) where each column is x, y, z coords
×
NEW
863
    coords = numpy_to_vtk(values)
×
NEW
864
    cloud = pv.PolyData(coords)
×
865
    # mesh = cloud.delaunay_2d()          # From point cloud, apply a 2D Delaunary filter to generate a 2d surface from a set of points on a plane.
NEW
866
    mesh = cloud.delaunay_3d()
×
867

868

869
    # Work for sin-plane but not for cylinder..
NEW
870
    '''
×
871
    # Join the points
872
    x, y, z = points
873
    values = np.c_[x.ravel(), y.ravel(), z.ravel()]     # (6400, 3) where each column is x, y, z coords
874
    coords = numpy_to_vtk(values)
875
    
876
    points = vtk.vtkPoints()
877
    points.SetData(coords)
878

879
    grid = vtk.vtkStructuredGrid()
880
    grid.SetDimensions(*z.shape, 1)     # *zshape: (80 80) for sin-plane / 1000 for cylinder
881
    grid.SetPoints(points)
882

883
    # Add point data
884
    arr = numpy_to_vtk(z.ravel())
885
    arr.SetName("z")
886
    grid.GetPointData().SetScalars(arr)
887
    '''
888

NEW
889
    mesh_state = to_mesh_state(mesh)
×
890

NEW
891
    content = dash_vtk.View([
×
892
        dash_vtk.GeometryRepresentation(
893
            mapper={'orientationArray': 'Normals'},
894
            children=[dash_vtk.Mesh(state=mesh_state)],
895
            showCubeAxes=True,      # Show origins
896
        )
897
    ])
898
    
NEW
899
    return content
×
900

NEW
901
def render_our_own_delaunay(points):
×
902
    '''
903
    Create and fill the VTK Data Object with your own data using VTK library and pyvista high level api
904

905
    Reference: https://tutorial.pyvista.org/tutorial/06_vtk/b_create_vtk.html
906
    https://docs.pyvista.org/examples/00-load/create-tri-surface
907
    https://docs.pyvista.org/api/core/_autosummary/pyvista.polydatafilters.reconstruct_surface#pyvista.PolyDataFilters.reconstruct_surface
908
    '''
909

910
    # Join the points
NEW
911
    x, y, z = points
×
NEW
912
    values = np.c_[x.ravel(), y.ravel(), z.ravel()]     # (6400, 3) where each column is x, y, z coords
×
NEW
913
    coords = numpy_to_vtk(values)
×
NEW
914
    cloud = pv.PolyData(coords)
×
915
    # mesh = cloud.delaunay_2d()          # From point cloud, apply a 2D Delaunary filter to generate a 2d surface from a set of points on a plane.
NEW
916
    mesh = cloud.delaunay_3d()
×
917

NEW
918
    mesh_state = to_mesh_state(mesh)
×
919

NEW
920
    return mesh_state
×
921

NEW
922
def find_rows(df_dict, df_type='geometry'):
×
923

NEW
924
    df = pd.DataFrame(df_dict)
×
925
    
NEW
926
    return df[df['Type'] == df_type].to_dict('list')         # {'File Path': ['a.yaml', 'c.yaml'], 'Label': ['abc', 'ghi'], 'Type': [1, 1]}
×
927

928
    # return {k: v[idx] for idx in range(len(df['Type'])) for k, v in df.items() if df['Type'][idx]==df_type}
929

930

931

NEW
932
def load_geometry_data(geometry_paths):
×
933
    # Read geometry input file and load airfoils data
934
    # wt_options = sch.load_geometry_yaml('/projects/weis/sryu/visualization_cases/1_raft_opt/IEA-22-280-RWT.yaml')       # For HPC
935
    # wt_options = sch.load_geometry_yaml('/Users/sryu/Desktop/FY24/WEIS-Visualization/data/visualization_cases/1_raft_opt/IEA-22-280-RWT.yaml')       # For Local
NEW
936
    airfoils, geom_comps, wt_options_by_file = {}, {}, {}
×
937
    # for row in geometry_paths:
938
    #     wt_options = sch.load_geometry_yaml(row['File Path'])
939
    #     airfoils[row['Label']] = wt_options['airfoils']
940
    #     geom_comps[row['Label']] = wt_options['components']
941

NEW
942
    for filepath, filelabel, _ in zip(*geometry_paths.values()):
×
NEW
943
        wt_options = sch.load_geometry_yaml(filepath)
×
NEW
944
        airfoils[filelabel] = wt_options['airfoils']
×
NEW
945
        geom_comps[filelabel] = wt_options['components']
×
NEW
946
        wt_options_by_file[filelabel] = wt_options
×
947

948

NEW
949
    return airfoils, geom_comps, wt_options_by_file
×
950

951
###################################################
952
# Not needed below.. Will be deleted later
953
###################################################
NEW
954
def load_mesh(file_path):
×
955
    '''
956
    Read either STL or VTK file and load the 3D mesh
957
    '''
958
    # Read STL file generated by WindIO Tool
959
    # Sample files to test
960
    # file_path = '/Users/sryu/Desktop/FY24/windio2cad/nrel5mw-semi_oc4.stl'
961
    # file_path = '/Users/sryu/Desktop/FY24/ACDC/project1/case01/vtk/01_NREL_5MW-ED.Mode1.LinTime1.ED_BladeLn2Mesh_motion1.010.vtp'
NEW
962
    if file_path.endswith('.stl'):
×
NEW
963
        reader = vtk.vtkSTLReader()
×
NEW
964
    elif file_path.endswith('.vtp'):
×
NEW
965
        reader = vtk.vtkXMLPolyDataReader()
×
966
    
NEW
967
    reader.SetFileName(file_path)
×
NEW
968
    reader.Update()
×
969

970
    # Get dataset and build a mesh structure to pass it as a Mesh
NEW
971
    dataset = reader.GetOutput()
×
NEW
972
    mesh_state = to_mesh_state(dataset)
×
973

NEW
974
    content = dash_vtk.View([
×
975
        dash_vtk.GeometryRepresentation([
976
            dash_vtk.Mesh(state=mesh_state)
977
        ])
978
    ])
979

NEW
980
    return content
×
981

982

NEW
983
def render_terrain():
×
984
    '''
985
    This is an example of VTK rendering of point cloud. Only for reference.. Will be deleted later.
986
    '''
987
    # Get point cloud data from PyVista
NEW
988
    uniformGrid = pv.examples.download_crater_topo()
×
NEW
989
    subset = uniformGrid.extract_subset((500, 900, 400, 800, 0, 0), (5, 5, 1))
×
990

991
    # Update warp
NEW
992
    terrain = subset.warp_by_scalar(factor=1)
×
NEW
993
    polydata = terrain.extract_geometry()
×
NEW
994
    points = polydata.points.ravel()                        # points: [1.81750e+06 5.64600e+06 1.55213e+03 ... 1.82350e+06 5.65200e+06 1.91346e+03] / shape is (19683,) with dtype=float32
×
NEW
995
    polys = vtk_to_numpy(polydata.GetPolys().GetData())     # polys: [   4    0    1 ... 6479 6560 6559] / shape is (32000,) with dtype=int64
×
NEW
996
    elevation = polydata["scalar1of1"]
×
NEW
997
    color_range = [np.amin(elevation), np.amax(elevation)]
×
998

NEW
999
    content = dash_vtk.View(
×
1000
        pickingModes=["hover"],
1001
        children=[
1002
            dash_vtk.GeometryRepresentation(
1003
                id="vtk-representation",
1004
                children=[
1005
                    dash_vtk.PolyData(
1006
                        id="vtk-polydata",
1007
                        points=points,
1008
                        polys=polys,
1009
                        children=[
1010
                            dash_vtk.PointData(
1011
                                [
1012
                                    dash_vtk.DataArray(
1013
                                        id="vtk-array",
1014
                                        registration="setScalars",
1015
                                        name="elevation",
1016
                                        values=elevation,
1017
                                    )
1018
                                ]
1019
                            )
1020
                        ],
1021
                    )
1022
                ],
1023
                colorMapPreset="erdc_blue2green_muted",
1024
                colorDataRange=color_range,
1025
                property={"edgeVisibility": True},
1026
                showCubeAxes=True,
1027
                cubeAxesStyle={"axisLabels": ["", "", "Altitude"]},
1028
            ),
1029
            dash_vtk.GeometryRepresentation(
1030
                id="pick-rep",
1031
                actor={"visibility": False},
1032
                children=[
1033
                    dash_vtk.Algorithm(
1034
                        id="pick-sphere",
1035
                        vtkClass="vtkSphereSource",
1036
                        state={"radius": 100},
1037
                    )
1038
                ],
1039
            ),
1040
        ],
1041
    )
1042
    
NEW
1043
    return content
×
1044

1045

NEW
1046
def render_volume():
×
1047
    '''
1048
    This is an example of a randome volume generation. Only for reference.. Will be deleted later.
1049
    '''
NEW
1050
    import random
×
1051

NEW
1052
    content = dash_vtk.View(
×
1053
        children=dash_vtk.VolumeDataRepresentation(
1054
            spacing=[1, 1, 1],
1055
            dimensions=[10, 10, 10],
1056
            origin=[0, 0, 0],
1057
            scalars=[random.random() for _ in range(1000)],
1058
            rescaleColorMap=False,
1059
        )
1060
    )
1061
    
NEW
1062
    return content
×
1063

1064

NEW
1065
def render_mesh_with_grid():
×
1066
    '''
1067
    Create and fill the VTK Data Object with your own data using VTK library and pyvista high level api
1068

1069
    Reference: https://tutorial.pyvista.org/tutorial/06_vtk/b_create_vtk.html
1070
    '''
1071
    # Define structured points with numpy
NEW
1072
    x = np.arange(-10, 10, 0.25)    # (80,)
×
NEW
1073
    y = np.arange(-10, 10, 0.25)    # (80,)
×
NEW
1074
    x, y = np.meshgrid(x, y)        # both (80, 80)
×
NEW
1075
    r = np.sqrt(x**2, y**2)
×
NEW
1076
    z = np.sin(r)
×
1077

1078
    # Join the points
NEW
1079
    values = np.c_[x.ravel(), y.ravel(), z.ravel()]     # (6400, 3) where each column is x, y, z coords
×
NEW
1080
    coords = numpy_to_vtk(values)
×
1081
    
NEW
1082
    points = vtk.vtkPoints()
×
NEW
1083
    points.SetData(coords)
×
1084

NEW
1085
    grid = vtk.vtkStructuredGrid()
×
NEW
1086
    grid.SetDimensions(*z.shape, 1)
×
NEW
1087
    grid.SetPoints(points)
×
1088

1089
    # Add point data
NEW
1090
    arr = numpy_to_vtk(z.ravel())
×
NEW
1091
    arr.SetName("z")
×
NEW
1092
    grid.GetPointData().SetScalars(arr)
×
1093

NEW
1094
    mesh_state = to_mesh_state(grid)
×
1095

NEW
1096
    content = dash_vtk.View([
×
1097
        dash_vtk.GeometryRepresentation(
1098
            mapper={'orientationArray': 'Normals'},
1099
            children=[dash_vtk.Mesh(state=mesh_state)],
1100
            showCubeAxes=True,      # Show origins
1101
        )
1102
    ])
1103
    
NEW
1104
    return content
×
1105

1106

NEW
1107
def render_mesh_with_faces():
×
1108
    '''
1109
    Create and fill the VTK Data Object with your own data using VTK library and pyvista high level api.
1110
    '''
NEW
1111
    points = np.array(
×
1112
        [
1113
            [0.0480, 0.0349, 0.9982],
1114
            [0.0305, 0.0411, 0.9987],
1115
            [0.0207, 0.0329, 0.9992],
1116
            [0.0218, 0.0158, 0.9996],
1117
            [0.0377, 0.0095, 0.9992],
1118
            [0.0485, 0.0163, 0.9987],
1119
            [0.0572, 0.0603, 0.9965],
1120
            [0.0390, 0.0666, 0.9970],
1121
            [0.0289, 0.0576, 0.9979],
1122
            [0.0582, 0.0423, 0.9974],
1123
            [0.0661, 0.0859, 0.9941],
1124
            [0.0476, 0.0922, 0.9946],
1125
            [0.0372, 0.0827, 0.9959],
1126
            [0.0674, 0.0683, 0.9954],
1127
        ],
1128
    )
1129

NEW
1130
    face_a = [6, 0, 1, 2, 3, 4, 5]
×
NEW
1131
    face_b = [6, 6, 7, 8, 1, 0, 9]
×
NEW
1132
    face_c = [6, 10, 11, 12, 7, 6, 13]
×
NEW
1133
    faces = np.concatenate((face_a, face_b, face_c))
×
1134

NEW
1135
    mesh = pv.PolyData(points, faces)
×
NEW
1136
    mesh_state = to_mesh_state(mesh)
×
1137

NEW
1138
    content = dash_vtk.View([
×
1139
        dash_vtk.GeometryRepresentation(
1140
            children=[dash_vtk.Mesh(state=mesh_state)],
1141
            showCubeAxes=True,      # Show origins
1142
        )
1143
    ])
1144
    
NEW
1145
    return content
×
1146

STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc