• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

WISDEM / WEIS / 10204541058

01 Aug 2024 07:27PM UTC coverage: 79.113% (+0.03%) from 79.079%
10204541058

Pull #305

github

web-flow
Merge f47454a78 into 1767dafbb
Pull Request #305: Optimization cleanup to fix and conform to viz changes.

0 of 5 new or added lines in 2 files covered. (0.0%)

325 existing lines in 5 files now uncovered.

21601 of 27304 relevant lines covered (79.11%)

0.79 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/weis/visualization/utils.py
1
'''
2
Various functions for help visualizing WEIS outputs
3
'''
4
from weis.aeroelasticse.FileTools import load_yaml
×
5
import pandas as pd
×
6
import numpy as np
×
7
import openmdao.api as om
×
8
import glob
×
9
import json
×
10
import multiprocessing as mp
×
11
import plotly.graph_objects as go
×
12
import os
×
13
import io
×
14
import yaml
×
15
import re
×
16
import socket
×
17
from dash import html
×
18
from matplotlib.gridspec import GridSpec
×
19
import matplotlib.pyplot as plt
×
20
import pickle
×
21
import raft
×
22
from raft.helpers import *
×
23

24
try:
×
25
    import ruamel_yaml as ry
×
26
except Exception:
×
27
    try:
×
28
        import ruamel.yaml as ry
×
29
    except Exception:
×
30
        raise ImportError('No module named ruamel.yaml or ruamel_yaml')
×
31

32

UNCOV
33
def checkPort(port, host="0.0.0.0"):
×
34
    # check port availability and then close the socket
UNCOV
35
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
×
36
    result = False
×
37
    try:
×
38
        sock.bind((host, port))
×
39
        result = True
×
40
    except:
×
41
        result = False
×
42

UNCOV
43
    sock.close()
×
44
    return result
×
45

46

UNCOV
47
def parse_yaml(file_path):
×
48
    '''
49
    Parse the data contents in dictionary format
50
    '''
51
    # print('Reading the input yaml file..')
UNCOV
52
    try:
×
53
        with io.open(file_path, 'r') as stream:
×
54
            dict = yaml.safe_load(stream)
×
55

56
        dict['yamlPath'] = file_path
×
57
        # print('input file dict:\n', dict)
UNCOV
58
        return dict
×
59

60
    except FileNotFoundError:
×
61
        print('Could not locate the input yaml file..')
×
62
        exit()
×
63

64
    except Exception as e:
×
65
        print(e)
×
66
        exit()
×
67

68

UNCOV
69
def dict_to_html(data, out_html_list, level):
×
70
    '''
71
    Show the nested dictionary data to html
72
    '''
73

74
    for k1, v1 in data.items():
×
75
        if not k1 in ['dirs', 'files']:
×
76
            if not isinstance(v1, list) and not isinstance(v1, dict):
×
77
                out_html_list.append(html.H6(f'{"---"*level}{k1}: {v1}'))
×
78
                continue
×
79

80
            out_html_list.append(html.H6(f'{"---"*level}{k1}'))
×
81

82
        if isinstance(v1, list):
×
83
            out_html_list.append(html.Div([
×
84
                                    html.H6(f'{"---"*(level+1)}{i}') for i in v1]))
85

86
        elif isinstance(v1, dict):
×
87
            out_html_list = dict_to_html(v1, out_html_list, level+1)
×
88

89

UNCOV
90
    return out_html_list
×
91

92

UNCOV
93
def read_cm(cm_file):
×
94
    """
95
    Function originally from:
96
    https://github.com/WISDEM/WEIS/blob/main/examples/16_postprocessing/rev_DLCs_WEIS.ipynb
97

98
    Parameters
99
    __________
100
    cm_file : The file path for case matrix
101

102
    Returns
103
    _______
104
    cm : The dataframe of case matrix
105
    dlc_inds : The indices dictionary indicating where corresponding dlc is used for each run
106
    """
UNCOV
107
    cm_dict = load_yaml(cm_file, package=1)
×
108
    cnames = []
×
109
    for c in list(cm_dict.keys()):
×
110
        if isinstance(c, ry.comments.CommentedKeySeq):
×
111
            cnames.append(tuple(c))
×
112
        else:
UNCOV
113
            cnames.append(c)
×
114
    cm = pd.DataFrame(cm_dict, columns=cnames)
×
115

UNCOV
116
    return cm
×
117

UNCOV
118
def parse_contents(data):
×
119
    """
120
    Function from:
121
    https://github.com/WISDEM/WEIS/blob/main/examples/09_design_of_experiments/postprocess_results.py
122
    """
UNCOV
123
    collected_data = {}
×
124
    for key in data.keys():
×
125
        if key not in collected_data.keys():
×
126
            collected_data[key] = []
×
127

UNCOV
128
        for key_idx, _ in enumerate(data[key]):
×
129
            if isinstance(data[key][key_idx], int):
×
130
                collected_data[key].append(np.array(data[key][key_idx]))
×
131
            elif len(data[key][key_idx]) == 1:
×
132
                try:
×
133
                    collected_data[key].append(np.array(data[key][key_idx][0]))
×
134
                except:
×
135
                    collected_data[key].append(np.array(data[key][key_idx]))
×
136
            else:
UNCOV
137
                collected_data[key].append(np.array(data[key][key_idx]))
×
138

UNCOV
139
    df = pd.DataFrame.from_dict(collected_data)
×
140

UNCOV
141
    return df
×
142

143

UNCOV
144
def load_vars_file(fn_vars):
×
145
    """
146
    load a json file of problem variables as output from WEIS (as problem_vars.json)
147

148
    parameters:
149
    -----------
150
    fn_vars: str
151
        a filename to read
152

153
    returns:
154
    --------
155
    vars : dict[dict]
156
        a dictionary of dictionaries holding the problem_vars from WEIS
157
    """
158

UNCOV
159
    with open(fn_vars, "r") as fjson:
×
160
        # unpack in a useful form
161
        vars = {k: dict(v) for k, v in json.load(fjson).items()}
×
162
    return vars
×
163

164

165
def compare_om_data(
×
166
    dataOM_1,
167
    dataOM_2,
168
    fn_1="data 1",
169
    fn_2="data 2",
170
    verbose=False,
171
):
172
    """
173
    compare openmdao data dictionaries to find the in-common (and not) keys
174

175
    args:
176
        dataOM_1: dict
177
            an openmdao data dictionary
178
        dataOM_2: dict
179
            an openmdao data dictionary
180
        fn_1: str (optional)
181
            display name for the first data dictionary
182
        fn_2: str (optional)
183
            display name for the second data dictionary
184
        verbose : bool (optional, default: False)
185
            if we want to print what's happening
186

187
    returns:
188
        keys_all: set
189
            intersection (i.e. common) keys between the two OM data dictionaries
190
        diff_keys_12: set
191
            directional difference of keys between first and second OM data dicts
192
        diff_keys_21: set
193
            directional difference of keys between second and first OM data dicts
194
    """
195

UNCOV
196
    diff_keys_12 = set(dataOM_1).difference(dataOM_2)
×
UNCOV
197
    diff_keys_21 = set(dataOM_2).difference(dataOM_1)
×
UNCOV
198
    if len(diff_keys_12):
×
UNCOV
199
        if verbose:
×
UNCOV
200
            print(f"the following keys are only in {fn_1}:")
×
UNCOV
201
    for key_m in diff_keys_12:
×
UNCOV
202
        if verbose:
×
UNCOV
203
            print(f"\t{key_m}")
×
204
    if len(diff_keys_21):
×
205
        if verbose:
×
206
            print(f"the following keys are only in {fn_2}:")
×
207
    for key_m in diff_keys_21:
×
208
        if verbose:
×
209
            print(f"\t{key_m}")
×
210
    keys_all = set(dataOM_1).intersection(dataOM_2)
×
211
    if verbose:
×
212
        print(f"the following keys are in both {fn_1} and {fn_2}:")
×
213
    for key_m in keys_all:
×
214
        if verbose:
×
215
            print(f"\t{key_m}")
×
216

217
    return keys_all, diff_keys_12, diff_keys_21
×
218

NEW
219
def load_OMsql(
×
220
    log,
221
    parse_multi=False,
222
    meta=None,
223
    verbose=False,
224
):
225
    """
226
    load the openmdao sql file produced by a WEIS run into a dictionary
227

228
    parameters:
229
    -----------
230
        log : str
231
            filename of the log sql database that should be loaded
232
        parse_multi : bool
233
            switch to turn on rank/iteration parsing and storage
234
        meta : str
235
            filename of the meta log sql database that should be loaded
236
        verbose : bool (optional, default: False)
237
            if we want to print what's happening
238

239
    returns:
240
        rec_data: dict
241
            dictionary of the data recorded by openMDAO
242

243
    """
244

245
    # heads-up print
UNCOV
246
    if verbose:
×
UNCOV
247
        print(f"loading {log}")
×
248

249
    # create an openmdao reader for recorded output data
UNCOV
250
    cr = om.CaseReader(log, metadata_filename=meta)
×
251

252
    # create a dict for output data that's been recorded
UNCOV
253
    rec_data = {}
×
254
    # loop over the cases
255
    for case in cr.get_cases("driver"):
×
UNCOV
256
        if parse_multi:
×
UNCOV
257
            rankNo = case.name.split(":")[0]
×
258
            assert rankNo.startswith("rank")
×
UNCOV
259
            rankNo = int(rankNo[4:])
×
UNCOV
260
            iterNo = int(case.name.split("|")[-1])
×
261

262
        # for each key in the outputs
263
        for key in case.outputs.keys():
×
264

265
            if key not in rec_data:
×
266
                # if this key isn't present, create a new list
267
                rec_data[key] = []
×
268

NEW
UNCOV
269
            if hasattr(case[key], '__len__') and len(case[key]) != 1:
×
270
                # convert to a numpy array if possible and add the data to the list
NEW
271
                rec_data[key].append(np.array(case[key]))
×
272
            else:
273
                rec_data[key].append(case[key])
×
274

275
        if parse_multi:
×
276
            # add rank/iter metadata
277
            for key in ["rank", "iter"]:  # for each key in the outputs
×
278
                if key not in rec_data:  # if this key isn't present, create a new list
×
279
                    rec_data[key] = []
×
280
            rec_data["rank"].append(rankNo)
×
281
            rec_data["iter"].append(iterNo)
×
282

283
    return rec_data  # return the output
×
284

285

286
def load_OMsql_multi(
×
287
    log_fmt,
288
    meta_in=None,
289
    process_multi = True,
290
    verbose=False,
291
):
292
    """
293
    load the multi-processor openmdao sql files produced by WEIS into a dict
294

295
    parameters:
296
    -----------
297
        log_fmt : str
298
            format string for the process-wise WEIS/OM log files
299
        meta_in : str (optional, default: None)
300
            filename string of the meta log file (will override automatic discovery)
301
        post_multi : bool (optional, default: True)
302
            postprocess in parallel using the multiprocessing library
303
        verbose : bool (optional, default: False)
304
            if we want to print what's happening
305

306
    returns:
307
    --------
308
        data_dict : dict
309
            dictionary of all the datapoints extracted from the WEIS/OM log files
310
    """
311

312
    # use glob to find the logs that match the format string
UNCOV
313
    opt_logs = sorted(
×
314
        glob.glob(log_fmt),
315
        key = lambda v : int(v.split("_")[-1])
316
            if (v.split("_")[-1] != "meta")
317
            else 1e8,
318
    )
UNCOV
319
    if len(opt_logs) < 1:
×
UNCOV
320
        raise FileExistsError("No output logs to postprocess!")
×
321

322
    # remove the "meta" log from the collection
UNCOV
323
    meta_found = None
×
UNCOV
324
    for idx, log in enumerate(opt_logs):
×
UNCOV
325
        if "meta" in log:
×
UNCOV
326
            meta_found = log  # save the meta file
×
327
            opt_logs.pop(idx)  # remove the meta log from the list
×
328
            break
×
329

330
    # handle meta logfile discovery... not sure what it actually does
331
    if meta_in is not None:
×
332
        meta = meta_in  # if a meta is given, override
×
333
    elif meta_found is not None:
×
334
        meta = meta_found  # if a meta is not given but one is found, use that
×
335
    else:
336
        meta = None  # otherwise, run without a meta
×
337

338
    # extract the ranks from the sql files
339
    sql_ranks = [ol.split("_")[-1] for ol in opt_logs]
×
340

341
    # run multiprocessing
342
    if process_multi:
×
UNCOV
343
        cores = mp.cpu_count()
×
344
        pool = mp.Pool(min(len(opt_logs), cores))
×
345

346
        # load sql file
347
        outdata = pool.starmap(load_OMsql, [(log, True, meta, verbose) for log in opt_logs])
×
UNCOV
348
        pool.close()
×
UNCOV
349
        pool.join()
×
350
    else: # no multiprocessing
351
        outdata = [load_OMsql(log, parse_multi=True, verbose=verbose, meta=meta) for log in opt_logs]
×
352

353
    # create a dictionary and turn it into a dataframe for convenience
UNCOV
354
    collected_data = {}
×
355
    ndarray_keys = []
×
356
    for sql_rank, data in zip(sql_ranks, outdata):
×
357
        for key in data.keys():
×
UNCOV
358
            if key not in collected_data.keys():
×
359
                collected_data[key] = []
×
UNCOV
360
            if key == "rank": # adjust the rank based on sql file rank
×
UNCOV
361
                data[key] = [int(sql_rank) for _ in data[key]]
×
362
            for idx_key, _ in enumerate(data[key]):
×
363
                if isinstance(data[key][idx_key], int):
×
364
                    collected_data[key].append(int(np.array(data[key][idx_key])))
×
365
                elif isinstance(data[key][idx_key], float):
×
366
                    collected_data[key].append(float(np.array(data[key][idx_key])))
×
367
                elif len(data[key][idx_key]) == 1:
×
368
                    collected_data[key].append(float(np.array(data[key][idx_key])))
×
369
                    # try:
370
                    #     collected_data[key].append(np.array(data[key][idx_key][0]))
371
                    # except:
372
                    #     collected_data[key].append(np.array(data[key][idx_key]))
373
                else:
374
                    collected_data[key].append(np.array(data[key][idx_key]).tolist())
×
375
                    ndarray_keys.append(key)
×
376
    df = pd.DataFrame(collected_data)
×
377

378
    # return a dictionary of the data that was extracted
UNCOV
379
    return df.to_dict(orient="list")
×
380

381

382
def consolidate_multi(
×
383
    dataOMmulti,
384
    vars_dict,
385
    feas_tol=1e-5,
386
):
387
    """
388
    load the multi-processor openmdao sql files and squash them to the
389
    per-iteration best-feasible result
390

391
    parameters:
392
    -----------
393
        dataOMmulti : dict
394
            dictionary of all the datapoints extracted from the multiprocess
395
            WEIS/OM log files
396
        vars_dict:
397
            experiment design variables to be analyzed
398
        feas_tol : float (optional)
399
            tolerance for feasibility analysis
400
    returns:
401
    --------
402
        dataOMbest_DE : dict
403
            dictionary of the per-iteration best-feasible simulations
404
    """
405

UNCOV
406
    dfOMmulti = pd.DataFrame(dataOMmulti)
×
UNCOV
407
    tfeas, cfeas = get_feasible_iterations(dataOMmulti, vars_dict, feas_tol=feas_tol)
×
408

UNCOV
409
    dfOMmulti = dfOMmulti[tfeas].reset_index()
×
410

UNCOV
411
    dataOMbest_DE = dfOMmulti.groupby("iter").apply(
×
412
        lambda grp : grp.loc[grp["floatingse.system_structural_mass"].idxmin()],
413
        include_groups=False,
414
    ).to_dict()
415

416
    for key in dataOMbest_DE.keys():
×
UNCOV
417
        dataOMbest_DE[key] = np.array(list(dataOMbest_DE[key].values()))
×
418

UNCOV
419
    return dataOMbest_DE
×
420

421

UNCOV
422
def get_feasible_iterations(
×
423
    dataOM,
424
    vars_dict,
425
    feas_tol=1e-5,
426
):
427
    """
428
    get iteration-wise total and per-constraint feasibility from an experiment
429

430
    args:
431
        dataOM: dict
432
            openmdao data dictionary
433
        vars_dict:
434
            experiment design variables for checking
435

436
    returns:
437
        total_feasibility: np.ndarray[bool]
438
            iteration-wise total feasibility indications
439
        feasibility_constraintwise: dict[np.ndarray[bool]]
440
            dictionary to map from constraint names to iteration-wise feasibility indications for that constraint
441
    """
442

443
    # assert len(vars_dict["objectives"].values()) == 1, "can't handle multi-objective... yet. -cfrontin"
UNCOV
444
    objective_name = list(vars_dict["objectives"].values())[0]["name"]
×
445

UNCOV
446
    feasibility_constraintwise = dict()
×
UNCOV
447
    total_feasibility = np.ones_like(np.array(dataOM[objective_name]).reshape(-1,1), dtype=bool)
×
UNCOV
448
    for k, v in vars_dict["constraints"].items():
×
UNCOV
449
        feasibility = np.ones_like(dataOM[objective_name], dtype=bool).reshape(-1, 1)
×
UNCOV
450
        values = np.array(dataOM[v["name"]])
×
UNCOV
451
        if len(values.shape) == 1:
×
UNCOV
452
            values = values.reshape(-1,1)
×
453
        if v.get("upper") is not None:
×
UNCOV
454
            feasibility = np.logical_and(feasibility, np.all(np.less_equal(values, (1+feas_tol)*v["upper"]), axis=1).reshape(-1, 1))
×
455
        if v.get("lower") is not None:
×
456
            feasibility = np.logical_and(feasibility, np.all(np.greater_equal(values, (1-feas_tol)*v["lower"]), axis=1).reshape(-1, 1))
×
457
        feasibility_constraintwise[v["name"]] = feasibility
×
458
        total_feasibility = np.logical_and(total_feasibility, feasibility)
×
459
    return total_feasibility, feasibility_constraintwise
×
460

461

462
def verify_vars(
×
463
    vars_1,
464
    *vars_i,
465
):
466
    """
467
    verifies format of DVs, constraints, objective variable file
468
    guarantees a list of experiments has the same variables
469
    adjusts unbounded constraints
470
    returns verified list of vars
471
    """
472

UNCOV
473
    for vars_2 in vars_i:
×
UNCOV
474
        if vars_2 is not None:
×
UNCOV
475
            for k0 in set(vars_1.keys()).union(vars_2):
×
UNCOV
476
                assert k0 in vars_1
×
UNCOV
477
                assert k0 in vars_2
×
UNCOV
478
                for k1 in set(vars_1[k0].keys()).union(vars_2[k0].keys()):
×
UNCOV
479
                    assert k1 in vars_1[k0]
×
UNCOV
480
                    assert k1 in vars_2[k0]
×
UNCOV
481
                    for k2 in set(vars_1[k0][k1].keys()).union(vars_2[k0][k1].keys()):
×
482
                        assert k2 in vars_1[k0][k1]
×
483
                        assert k2 in vars_2[k0][k1]
×
484
                        if k2 == "val":
×
485
                            continue
×
486
                        if isinstance(vars_1[k0][k1][k2], str):
×
487
                            assert vars_1[k0][k1][k2] == vars_2[k0][k1][k2]
×
488
                        elif vars_1[k0][k1][k2] is not None:
×
489
                            assert np.all(np.isclose(vars_1[k0][k1][k2], vars_2[k0][k1][k2]))
×
490
                        else:
491
                            assert (vars_1[k0][k1][k2] is None) and (vars_2[k0][k1][k2] is None)
×
492

493
    vars_unified = vars_1.copy()
×
494
    for k0 in vars_unified.keys():
×
495
        for k1 in vars_unified[k0].keys():
×
496
            if (vars_unified[k0][k1].get("lower") is not None) and (vars_unified[k0][k1].get("lower") < -1e28):
×
497
                vars_unified[k0][k1]["lower"] = -np.inf
×
498
            if (vars_unified[k0][k1].get("upper") is not None) and (vars_unified[k0][k1].get("upper") > 1e28):
×
UNCOV
499
                vars_unified[k0][k1]["upper"] = np.inf
×
500

UNCOV
501
    return vars_unified
×
502

503

504
def prettyprint_variables(
×
505
    keys_all,
506
    keys_obj,
507
    keys_DV,
508
    keys_constr,
509
):
510
    """
511
    print the variables we have with a prefix showing whether they are an
512
    objective variable (**), design variabie (--), constraint (<>), or unknown
513
    (??)
514
    """
515

516
    # print them nicely
UNCOV
517
    print()
×
UNCOV
518
    [
×
519
        print(
520
            f"** {key}"
521
            if key in keys_obj
522
            else f"-- {key}" if key in keys_DV else f"<> {key}" if key in keys_constr else f"?? {key}"
523
        )
524
        for key in keys_all
525
    ]
526
    print()
×
527

UNCOV
528
def read_per_iteration(iteration, stats_paths):
×
529

UNCOV
530
    stats_path_matched = [x for x in stats_paths if f'iteration_{iteration}' in x][0]
×
UNCOV
531
    iteration_path = '/'.join(stats_path_matched.split('/')[:-1])
×
UNCOV
532
    stats = pd.read_pickle(stats_path_matched)
×
533
    # dels = pd.read_pickle(iteration_path+'/DELs.p')
534
    # fst_vt = pd.read_pickle(iteration_path+'/fst_vt.p')
535
    print('iteration path with ', iteration, ': ', stats_path_matched)
×
536

537
    return stats, iteration_path
×
538

539

540
def get_timeseries_data(run_num, stats, iteration_path):
×
541

UNCOV
542
    stats = stats.reset_index()     # make 'index' column that has elements of 'IEA_22_Semi_00, ...'
×
UNCOV
543
    filename = stats.loc[run_num, 'index'].to_string()      # filenames are not same - stats: IEA_22_Semi_83 / timeseries/: IEA_22_Semi_0_83.p
×
544
    if filename.split('_')[-1].startswith('0'):
×
UNCOV
545
        filename = ('_'.join(filename.split('_')[:-1])+'_0_'+filename.split('_')[-1][1:]+'.p').strip()
×
546
    else:
UNCOV
547
        filename = ('_'.join(filename.split('_')[:-1])+'_0_'+filename.split('_')[-1]+'.p').strip()
×
548

549
    # visualization_demo/openfast_runs/rank_0/iteration_0/timeseries/IEA_22_Semi_0_0.p
550
    timeseries_path = '/'.join([iteration_path, 'timeseries', filename])
×
551
    timeseries_data = pd.read_pickle(timeseries_path)
×
552

553
    return filename, timeseries_data
×
554

555

556
def empty_figure():
×
557
    '''
558
    Draw empty figure showing nothing once initialized
559
    '''
560
    fig = go.Figure(go.Scatter(x=[], y=[]))
×
UNCOV
561
    fig.update_layout(template=None)
×
562
    fig.update_xaxes(showgrid=False, showticklabels=False, zeroline=False)
×
UNCOV
563
    fig.update_yaxes(showgrid=False, showticklabels=False, zeroline=False)
×
564

565
    return fig
×
566

567

UNCOV
568
def toggle(click, is_open):
×
569
    if click:
×
570
        return not is_open
×
571
    return is_open
×
572

573

574
def store_dataframes(var_files):
×
UNCOV
575
    dfs = []
×
UNCOV
576
    for idx, file_path in var_files.items():
×
577
        if file_path == 'None':
×
578
            dfs.append({idx: []})
×
579
            continue
×
580
        df = pd.read_csv(file_path, skiprows=[0,1,2,3,4,5,7], sep='\s+')
×
UNCOV
581
        dfs.append({idx: df.to_dict('records')})
×
582

583
    return dfs
×
584

585

586
def get_file_info(file_path):
×
587
    file_name = file_path.split('/')[-1]
×
588
    file_abs_path = os.path.abspath(file_path)
×
589
    file_size = round(os.path.getsize(file_path) / (1024**2), 2)
×
590
    creation_time = os.path.getctime(file_path)
×
591
    modification_time = os.path.getmtime(file_path)
×
592

UNCOV
593
    file_info = {
×
594
        'file_name': file_name,
595
        'file_abs_path': file_abs_path,
596
        'file_size': file_size,
597
        'creation_time': creation_time,
598
        'modification_time': modification_time
599
    }
600

UNCOV
601
    return file_info
×
602

603

UNCOV
604
def find_file_path_from_tree(nested_dict, filename, prepath=()):
×
605
    # Works for multi-keyed files
606
    # Sample outputs: ('outputDirStructure', 'sample_test') ('outputDirStructure', 'sample_multi')
UNCOV
607
    for k, v in nested_dict.items():
×
UNCOV
608
        path = prepath + (k,)
×
UNCOV
609
        if v == filename:
×
610
            yield path + (v, )
×
UNCOV
611
        elif isinstance(v, list) and filename in v:
×
UNCOV
612
            yield path + (filename, )
×
613
        elif hasattr(v, 'items'):
×
UNCOV
614
            yield from find_file_path_from_tree(v, filename, path)
×
615

616
def find_iterations(nested_dict, prepath=()):
×
617
    for k, v in nested_dict.items():
×
618
        path = prepath + (k,)
×
619
        if 'iteration' in k:
×
620
            yield int(re.findall(r'\d+', k)[0])
×
621
        elif hasattr(v, 'items'):
×
622
            yield from find_iterations(v, path)
×
623

624

625
def update_yaml(input_dict, yaml_filepath):
×
626
    with open(yaml_filepath, 'w') as outfile:
×
627
        yaml.dump(input_dict, outfile, default_flow_style=False)
×
628

629

630
def read_cost_variables(labels, refturb_variables):
×
631
    # Read tcc cost-related variables from CSV file
632

UNCOV
633
    cost_matrix = [['Main Turbine Components', 'Cost']]
×
634

635
    for l in labels:
×
636
        cost_matrix.append([l, eval(refturb_variables[f'tcc.{l}_cost']['values'])[0]])
×
637

UNCOV
638
    return cost_matrix
×
639

640

UNCOV
641
def generate_raft_img(raft_design_dir, plot_dir, log_data):
×
642
    '''
643
    Temporary function to visualize raft 3d plot using matplotlib.
644
    TODO: to build interactive 3d plot using plotly
645
    '''
646
    n_plots = len(os.listdir(raft_design_dir))
×
647
    print('n_plots: ', n_plots)
×
UNCOV
648
    os.makedirs(plot_dir,exist_ok=True)
×
649

650
    opt_outs = {}
×
UNCOV
651
    opt_outs['max_pitch'] = np.squeeze(np.array(log_data['raft.Max_PtfmPitch']))
×
652

UNCOV
653
    for i_plot in range(n_plots):
×
654
        # Set up subplots
655
        fig = plt.figure()
×
656
        fig.patch.set_facecolor('white')
×
657
        ax = plt.axes(projection='3d')
×
658

659
        with open(os.path.join(raft_design_dir,f'raft_design_{i_plot}.pkl'),'rb') as f:
×
660
            design = pickle.load(f)
×
661

662
        # TODO: Found typo on gamma value at 1_raft_opt example
UNCOV
663
        if design['turbine']['tower']['gamma'] == np.array([0.]):
×
664
            design['turbine']['tower']['gamma'] = 0.0       # Change it from array([0.])
×
665

666
        # set up the model
667
        model1 = raft.Model(design)
×
668
        model1.analyzeUnloaded(
×
669
            ballast= False,
670
            heave_tol = 1.0
671
            )
672

673
        model1.fowtList[0].r6[4] = np.radians(opt_outs['max_pitch'][i_plot])
×
674

UNCOV
675
        _, ax = model1.plot(ax=ax)
×
676

677
        ax.azim = -88.63636363636361
×
678
        ax.elev = 27.662337662337674
×
UNCOV
679
        ax.set_xlim3d((-110.90447789470043, 102.92063063344857))
×
UNCOV
680
        ax.set_ylim3d((64.47420067304586, 311.37818252335893))
×
UNCOV
681
        ax.set_zlim3d((-88.43591080818854, -57.499893019459606))
×
682

683
        image_filename = os.path.join(plot_dir,f'ptfm_{i_plot}.png')
×
684
        plt.savefig(image_filename, bbox_inches='tight')
×
UNCOV
685
        print('saved ', image_filename)
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc