• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

WISDEM / WEIS / 10927734534

18 Sep 2024 06:09PM UTC coverage: 79.235% (-0.4%) from 79.668%
10927734534

Pull #315

github

web-flow
Merge 397ba5241 into f779fa594
Pull Request #315: WEIS 1.3.1

21 of 180 new or added lines in 4 files covered. (11.67%)

8 existing lines in 4 files now uncovered.

21647 of 27320 relevant lines covered (79.23%)

0.79 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/weis/visualization/utils.py
1
'''
2
Various functions for help visualizing WEIS outputs
3
'''
4
from weis.aeroelasticse.FileTools import load_yaml
×
5
import pandas as pd
×
6
import numpy as np
×
7
import openmdao.api as om
×
8
import glob
×
9
import json
×
10
import multiprocessing as mp
×
NEW
11
import plotly.graph_objects as go
×
NEW
12
import os
×
NEW
13
import io
×
NEW
14
import yaml
×
NEW
15
import re
×
NEW
16
import socket
×
NEW
17
from dash import html
×
NEW
18
from matplotlib.gridspec import GridSpec
×
NEW
19
import matplotlib.pyplot as plt
×
NEW
20
import pickle
×
NEW
21
import raft
×
NEW
22
from raft.helpers import *
×
NEW
23
from weis.dtqpy import objective
×
24

25
try:
×
26
    import ruamel_yaml as ry
×
27
except Exception:
×
28
    try:
×
29
        import ruamel.yaml as ry
×
30
    except Exception:
×
31
        raise ImportError('No module named ruamel.yaml or ruamel_yaml')
×
32

33

NEW
34
def checkPort(port, host="0.0.0.0"):
×
35
    # check port availability and then close the socket
NEW
36
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
×
NEW
37
    result = False
×
NEW
38
    try:
×
NEW
39
        sock.bind((host, port))
×
NEW
40
        result = True
×
NEW
41
    except:
×
NEW
42
        result = False
×
43

NEW
44
    sock.close()
×
NEW
45
    return result
×
46

47

NEW
48
def parse_yaml(file_path):
×
49
    '''
50
    Parse the data contents in dictionary format
51
    '''
52
    # print('Reading the input yaml file..')
NEW
53
    try:
×
NEW
54
        with io.open(file_path, 'r') as stream:
×
NEW
55
            dict = yaml.safe_load(stream)
×
56

NEW
57
        dict['yamlPath'] = file_path
×
58
        # print('input file dict:\n', dict)
NEW
59
        return dict
×
60

NEW
61
    except FileNotFoundError:
×
NEW
62
        print('Could not locate the input yaml file..')
×
NEW
63
        exit()
×
64

NEW
65
    except Exception as e:
×
NEW
66
        print(e)
×
NEW
67
        exit()
×
68

69

NEW
70
def dict_to_html(data, out_html_list, level):
×
71
    '''
72
    Show the nested dictionary data to html
73
    '''
74

NEW
75
    for k1, v1 in data.items():
×
NEW
76
        if not k1 in ['dirs', 'files']:
×
NEW
77
            if not isinstance(v1, list) and not isinstance(v1, dict):
×
NEW
78
                out_html_list.append(html.H6(f'{"---"*level}{k1}: {v1}'))
×
NEW
79
                continue
×
80

NEW
81
            out_html_list.append(html.H6(f'{"---"*level}{k1}'))
×
82

NEW
83
        if isinstance(v1, list):
×
NEW
84
            out_html_list.append(html.Div([
×
85
                                    html.H6(f'{"---"*(level+1)}{i}') for i in v1]))
86

NEW
87
        elif isinstance(v1, dict):
×
NEW
88
            out_html_list = dict_to_html(v1, out_html_list, level+1)
×
89

90

NEW
91
    return out_html_list
×
92

93

UNCOV
94
def read_cm(cm_file):
×
95
    """
96
    Function originally from:
97
    https://github.com/WISDEM/WEIS/blob/main/examples/16_postprocessing/rev_DLCs_WEIS.ipynb
98

99
    Parameters
100
    __________
101
    cm_file : The file path for case matrix
102

103
    Returns
104
    _______
105
    cm : The dataframe of case matrix
106
    dlc_inds : The indices dictionary indicating where corresponding dlc is used for each run
107
    """
108
    cm_dict = load_yaml(cm_file, package=1)
×
109
    cnames = []
×
110
    for c in list(cm_dict.keys()):
×
111
        if isinstance(c, ry.comments.CommentedKeySeq):
×
112
            cnames.append(tuple(c))
×
113
        else:
114
            cnames.append(c)
×
115
    cm = pd.DataFrame(cm_dict, columns=cnames)
×
116

117
    return cm
×
118

119
def parse_contents(data):
×
120
    """
121
    Function from:
122
    https://github.com/WISDEM/WEIS/blob/main/examples/09_design_of_experiments/postprocess_results.py
123
    """
124
    collected_data = {}
×
125
    for key in data.keys():
×
126
        if key not in collected_data.keys():
×
127
            collected_data[key] = []
×
128

129
        for key_idx, _ in enumerate(data[key]):
×
130
            if isinstance(data[key][key_idx], int):
×
131
                collected_data[key].append(np.array(data[key][key_idx]))
×
132
            elif len(data[key][key_idx]) == 1:
×
133
                try:
×
134
                    collected_data[key].append(np.array(data[key][key_idx][0]))
×
135
                except:
×
136
                    collected_data[key].append(np.array(data[key][key_idx]))
×
137
            else:
138
                collected_data[key].append(np.array(data[key][key_idx]))
×
139

140
    df = pd.DataFrame.from_dict(collected_data)
×
141

142
    return df
×
143

144

145
def load_vars_file(fn_vars):
×
146
    """
147
    load a json file of problem variables as output from WEIS (as problem_vars.json)
148

149
    parameters:
150
    -----------
151
    fn_vars: str
152
        a filename to read
153

154
    returns:
155
    --------
156
    vars : dict[dict]
157
        a dictionary of dictionaries holding the problem_vars from WEIS
158
    """
159

NEW
160
    rawvars = load_yaml(fn_vars)
×
NEW
161
    vars = {}
×
NEW
162
    for k, v in rawvars.items():
×
NEW
163
        for (_, v2) in v:
×
NEW
164
            for k3, v3 in v2.items():
×
NEW
165
                if k3 in ["lower", "upper"]:
×
NEW
166
                    v2[k3] = float(v3)
×
NEW
167
                if k3 == "val":
×
NEW
168
                    v2[k3] = np.array(v3)
×
NEW
169
        vars[k] = dict(v)
×
UNCOV
170
    return vars
×
171

172

173
def compare_om_data(
×
174
    dataOM_1,
175
    dataOM_2,
176
    fn_1="data 1",
177
    fn_2="data 2",
178
    verbose=False,
179
):
180
    """
181
    compare openmdao data dictionaries to find the in-common (and not) keys
182

183
    args:
184
        dataOM_1: dict
185
            an openmdao data dictionary
186
        dataOM_2: dict
187
            an openmdao data dictionary
188
        fn_1: str (optional)
189
            display name for the first data dictionary
190
        fn_2: str (optional)
191
            display name for the second data dictionary
192
        verbose : bool (optional, default: False)
193
            if we want to print what's happening
194

195
    returns:
196
        keys_all: set
197
            intersection (i.e. common) keys between the two OM data dictionaries
198
        diff_keys_12: set
199
            directional difference of keys between first and second OM data dicts
200
        diff_keys_21: set
201
            directional difference of keys between second and first OM data dicts
202
    """
203

204
    diff_keys_12 = set(dataOM_1).difference(dataOM_2)
×
205
    diff_keys_21 = set(dataOM_2).difference(dataOM_1)
×
206
    if len(diff_keys_12):
×
207
        if verbose:
×
208
            print(f"the following keys are only in {fn_1}:")
×
209
    for key_m in diff_keys_12:
×
210
        if verbose:
×
211
            print(f"\t{key_m}")
×
212
    if len(diff_keys_21):
×
213
        if verbose:
×
214
            print(f"the following keys are only in {fn_2}:")
×
215
    for key_m in diff_keys_21:
×
216
        if verbose:
×
217
            print(f"\t{key_m}")
×
218
    keys_all = set(dataOM_1).intersection(dataOM_2)
×
219
    if verbose:
×
220
        print(f"the following keys are in both {fn_1} and {fn_2}:")
×
221
    for key_m in keys_all:
×
222
        if verbose:
×
223
            print(f"\t{key_m}")
×
224

225
    return keys_all, diff_keys_12, diff_keys_21
×
226

UNCOV
227
def load_OMsql(
×
228
    log,
229
    parse_multi=False,
230
    meta=None,
231
    verbose=False,
232
):
233
    """
234
    load the openmdao sql file produced by a WEIS run into a dictionary
235

236
    parameters:
237
    -----------
238
        log : str
239
            filename of the log sql database that should be loaded
240
        parse_multi : bool
241
            switch to turn on rank/iteration parsing and storage
242
        meta : str
243
            filename of the meta log sql database that should be loaded
244
        verbose : bool (optional, default: False)
245
            if we want to print what's happening
246

247
    returns:
248
        rec_data: dict
249
            dictionary of the data recorded by openMDAO
250

251
    """
252

253
    # heads-up print
254
    if verbose:
×
255
        print(f"loading {log}")
×
256

257
    # create an openmdao reader for recorded output data
258
    cr = om.CaseReader(log, metadata_filename=meta)
×
259

260
    # create a dict for output data that's been recorded
261
    rec_data = {}
×
262
    # loop over the cases
263
    for case in cr.get_cases("driver"):
×
264
        if parse_multi:
×
265
            rankNo = case.name.split(":")[0]
×
266
            assert rankNo.startswith("rank")
×
267
            rankNo = int(rankNo[4:])
×
268
            iterNo = int(case.name.split("|")[-1])
×
269

270
        # for each key in the outputs
271
        for key in case.outputs.keys():
×
272

273
            if key not in rec_data:
×
274
                # if this key isn't present, create a new list
275
                rec_data[key] = []
×
276

NEW
277
            if hasattr(case[key], '__len__') and len(case[key]) != 1:
×
278
                # convert to a numpy array if possible and add the data to the list
UNCOV
279
                rec_data[key].append(np.array(case[key]))
×
280
            else:
NEW
281
                rec_data[key].append(case[key])
×
282

283
        if parse_multi:
×
284
            # add rank/iter metadata
285
            for key in ["rank", "iter"]:  # for each key in the outputs
×
286
                if key not in rec_data:  # if this key isn't present, create a new list
×
287
                    rec_data[key] = []
×
288
            rec_data["rank"].append(rankNo)
×
289
            rec_data["iter"].append(iterNo)
×
290

291
    return rec_data  # return the output
×
292

293

294
def load_OMsql_multi(
×
295
    log_fmt,
296
    meta_in=None,
297
    process_multi = True,
298
    verbose=False,
299
):
300
    """
301
    load the multi-processor openmdao sql files produced by WEIS into a dict
302

303
    parameters:
304
    -----------
305
        log_fmt : str
306
            format string for the process-wise WEIS/OM log files
307
        meta_in : str (optional, default: None)
308
            filename string of the meta log file (will override automatic discovery)
309
        post_multi : bool (optional, default: True)
310
            postprocess in parallel using the multiprocessing library
311
        verbose : bool (optional, default: False)
312
            if we want to print what's happening
313

314
    returns:
315
    --------
316
        data_dict : dict
317
            dictionary of all the datapoints extracted from the WEIS/OM log files
318
    """
319

320
    # use glob to find the logs that match the format string
321
    opt_logs = sorted(
×
322
        glob.glob(log_fmt),
323
        key = lambda v : int(v.split("_")[-1])
324
            if (v.split("_")[-1] != "meta")
325
            else 1e8,
326
    )
327
    if len(opt_logs) < 1:
×
328
        raise FileExistsError("No output logs to postprocess!")
×
329

330
    # remove the "meta" log from the collection
331
    meta_found = None
×
332
    for idx, log in enumerate(opt_logs):
×
333
        if "meta" in log:
×
334
            meta_found = log  # save the meta file
×
335
            opt_logs.pop(idx)  # remove the meta log from the list
×
336
            break
×
337

338
    # handle meta logfile discovery... not sure what it actually does
339
    if meta_in is not None:
×
340
        meta = meta_in  # if a meta is given, override
×
341
    elif meta_found is not None:
×
342
        meta = meta_found  # if a meta is not given but one is found, use that
×
343
    else:
344
        meta = None  # otherwise, run without a meta
×
345

346
    # extract the ranks from the sql files
347
    sql_ranks = [ol.split("_")[-1] for ol in opt_logs]
×
348

349
    # run multiprocessing
350
    if process_multi:
×
351
        cores = mp.cpu_count()
×
352
        pool = mp.Pool(min(len(opt_logs), cores))
×
353

354
        # load sql file
355
        outdata = pool.starmap(load_OMsql, [(log, True, meta, verbose) for log in opt_logs])
×
356
        pool.close()
×
357
        pool.join()
×
358
    else: # no multiprocessing
359
        outdata = [load_OMsql(log, parse_multi=True, verbose=verbose, meta=meta) for log in opt_logs]
×
360

361
    # create a dictionary and turn it into a dataframe for convenience
362
    collected_data = {}
×
363
    ndarray_keys = []
×
364
    for sql_rank, data in zip(sql_ranks, outdata):
×
365
        for key in data.keys():
×
366
            if key not in collected_data.keys():
×
367
                collected_data[key] = []
×
368
            if key == "rank": # adjust the rank based on sql file rank
×
369
                data[key] = [int(sql_rank) for _ in data[key]]
×
370
            for idx_key, _ in enumerate(data[key]):
×
371
                if isinstance(data[key][idx_key], int):
×
372
                    collected_data[key].append(int(np.array(data[key][idx_key])))
×
373
                elif isinstance(data[key][idx_key], float):
×
374
                    collected_data[key].append(float(np.array(data[key][idx_key])))
×
375
                elif len(data[key][idx_key]) == 1:
×
376
                    collected_data[key].append(float(np.array(data[key][idx_key])))
×
377
                    # try:
378
                    #     collected_data[key].append(np.array(data[key][idx_key][0]))
379
                    # except:
380
                    #     collected_data[key].append(np.array(data[key][idx_key]))
381
                else:
382
                    collected_data[key].append(np.array(data[key][idx_key]).tolist())
×
383
                    ndarray_keys.append(key)
×
384
    df = pd.DataFrame(collected_data)
×
385

386
    # return a dictionary of the data that was extracted
387
    return df.to_dict(orient="list")
×
388

389

390
def consolidate_multi(
×
391
    dataOMmulti,
392
    vars_dict,
393
    feas_tol=1e-5,
394
):
395
    """
396
    load the multi-processor openmdao sql files and squash them to the
397
    per-iteration best-feasible result
398

399
    parameters:
400
    -----------
401
        dataOMmulti : dict
402
            dictionary of all the datapoints extracted from the multiprocess
403
            WEIS/OM log files
404
        vars_dict:
405
            experiment design variables to be analyzed
406
        feas_tol : float (optional)
407
            tolerance for feasibility analysis
408
    returns:
409
    --------
410
        dataOMbest_DE : dict
411
            dictionary of the per-iteration best-feasible simulations
412
    """
NEW
413
    objective_name = list(vars_dict["objectives"].values())[0]["name"]
×
414

415
    dfOMmulti = pd.DataFrame(dataOMmulti)
×
416
    tfeas, cfeas = get_feasible_iterations(dataOMmulti, vars_dict, feas_tol=feas_tol)
×
417

418
    dfOMmulti = dfOMmulti[tfeas].reset_index()
×
419

420
    dataOMbest_DE = dfOMmulti.groupby("iter").apply(
×
421
        lambda grp : grp.loc[grp[objective_name].idxmin()],
422
        include_groups=False,
423
    ).to_dict()
424

425
    for key in dataOMbest_DE.keys():
×
426
        dataOMbest_DE[key] = np.array(list(dataOMbest_DE[key].values()))
×
427

428
    return dataOMbest_DE
×
429

430

431
def get_feasible_iterations(
×
432
    dataOM,
433
    vars_dict,
434
    feas_tol=1e-5,
435
):
436
    """
437
    get iteration-wise total and per-constraint feasibility from an experiment
438

439
    args:
440
        dataOM: dict
441
            openmdao data dictionary
442
        vars_dict:
443
            experiment design variables for checking
444

445
    returns:
446
        total_feasibility: np.ndarray[bool]
447
            iteration-wise total feasibility indications
448
        feasibility_constraintwise: dict[np.ndarray[bool]]
449
            dictionary to map from constraint names to iteration-wise feasibility indications for that constraint
450
    """
451

452
    # assert len(vars_dict["objectives"].values()) == 1, "can't handle multi-objective... yet. -cfrontin"
453
    objective_name = list(vars_dict["objectives"].values())[0]["name"]
×
454

455
    feasibility_constraintwise = dict()
×
456
    total_feasibility = np.ones_like(np.array(dataOM[objective_name]).reshape(-1,1), dtype=bool)
×
457
    for k, v in vars_dict["constraints"].items():
×
458
        feasibility = np.ones_like(dataOM[objective_name], dtype=bool).reshape(-1, 1)
×
459
        values = np.array(dataOM[v["name"]])
×
460
        if len(values.shape) == 1:
×
461
            values = values.reshape(-1,1)
×
462
        if v.get("upper") is not None:
×
463
            feasibility = np.logical_and(feasibility, np.all(np.less_equal(values, (1+feas_tol)*v["upper"]), axis=1).reshape(-1, 1))
×
464
        if v.get("lower") is not None:
×
465
            feasibility = np.logical_and(feasibility, np.all(np.greater_equal(values, (1-feas_tol)*v["lower"]), axis=1).reshape(-1, 1))
×
466
        feasibility_constraintwise[v["name"]] = feasibility
×
467
        total_feasibility = np.logical_and(total_feasibility, feasibility)
×
468
    return total_feasibility, feasibility_constraintwise
×
469

470

471
def verify_vars(
×
472
    vars_1,
473
    *vars_i,
474
):
475
    """
476
    verifies format of DVs, constraints, objective variable file
477
    guarantees a list of experiments has the same variables
478
    adjusts unbounded constraints
479
    returns verified list of vars
480
    """
481

482
    for vars_2 in vars_i:
×
483
        if vars_2 is not None:
×
484
            for k0 in set(vars_1.keys()).union(vars_2):
×
485
                assert k0 in vars_1
×
486
                assert k0 in vars_2
×
487
                for k1 in set(vars_1[k0].keys()).union(vars_2[k0].keys()):
×
488
                    assert k1 in vars_1[k0]
×
489
                    assert k1 in vars_2[k0]
×
490
                    for k2 in set(vars_1[k0][k1].keys()).union(vars_2[k0][k1].keys()):
×
491
                        assert k2 in vars_1[k0][k1]
×
492
                        assert k2 in vars_2[k0][k1]
×
493
                        if k2 == "val":
×
494
                            continue
×
495
                        if isinstance(vars_1[k0][k1][k2], str):
×
496
                            assert vars_1[k0][k1][k2] == vars_2[k0][k1][k2]
×
497
                        elif vars_1[k0][k1][k2] is not None:
×
498
                            assert np.all(np.isclose(vars_1[k0][k1][k2], vars_2[k0][k1][k2]))
×
499
                        else:
500
                            assert (vars_1[k0][k1][k2] is None) and (vars_2[k0][k1][k2] is None)
×
501

502
    vars_unified = vars_1.copy()
×
503
    for k0 in vars_unified.keys():
×
504
        for k1 in vars_unified[k0].keys():
×
505
            if (vars_unified[k0][k1].get("lower") is not None) and (vars_unified[k0][k1].get("lower") < -1e28):
×
506
                vars_unified[k0][k1]["lower"] = -np.inf
×
507
            if (vars_unified[k0][k1].get("upper") is not None) and (vars_unified[k0][k1].get("upper") > 1e28):
×
508
                vars_unified[k0][k1]["upper"] = np.inf
×
509

510
    return vars_unified
×
511

512

513
def prettyprint_variables(
×
514
    keys_all,
515
    keys_obj,
516
    keys_DV,
517
    keys_constr,
518
):
519
    """
520
    print the variables we have with a prefix showing whether they are an
521
    objective variable (**), design variabie (--), constraint (<>), or unknown
522
    (??)
523
    """
524

525
    # print them nicely
526
    print()
×
527
    [
×
528
        print(
529
            f"** {key}"
530
            if key in keys_obj
531
            else f"-- {key}" if key in keys_DV else f"<> {key}" if key in keys_constr else f"?? {key}"
532
        )
533
        for key in keys_all
534
    ]
535
    print()
×
536

NEW
537
def read_per_iteration(iteration, stats_paths):
×
538

NEW
539
    stats_path_matched = [x for x in stats_paths if f'iteration_{iteration}' in x][0]
×
NEW
540
    iteration_path = '/'.join(stats_path_matched.split('/')[:-1])
×
NEW
541
    stats = pd.read_pickle(stats_path_matched)
×
542
    # dels = pd.read_pickle(iteration_path+'/DELs.p')
543
    # fst_vt = pd.read_pickle(iteration_path+'/fst_vt.p')
NEW
544
    print('iteration path with ', iteration, ': ', stats_path_matched)
×
545

NEW
546
    return stats, iteration_path
×
547

548

NEW
549
def get_timeseries_data(run_num, stats, iteration_path):
×
550

NEW
551
    stats = stats.reset_index()     # make 'index' column that has elements of 'IEA_22_Semi_00, ...'
×
NEW
552
    filename = stats.loc[run_num, 'index'].to_string()      # filenames are not same - stats: IEA_22_Semi_83 / timeseries/: IEA_22_Semi_0_83.p
×
NEW
553
    if filename.split('_')[-1].startswith('0'):
×
NEW
554
        filename = ('_'.join(filename.split('_')[:-1])+'_0_'+filename.split('_')[-1][1:]+'.p').strip()
×
555
    else:
NEW
556
        filename = ('_'.join(filename.split('_')[:-1])+'_0_'+filename.split('_')[-1]+'.p').strip()
×
557

558
    # visualization_demo/openfast_runs/rank_0/iteration_0/timeseries/IEA_22_Semi_0_0.p
NEW
559
    timeseries_path = '/'.join([iteration_path, 'timeseries', filename])
×
NEW
560
    timeseries_data = pd.read_pickle(timeseries_path)
×
561

NEW
562
    return filename, timeseries_data
×
563

564

NEW
565
def empty_figure():
×
566
    '''
567
    Draw empty figure showing nothing once initialized
568
    '''
NEW
569
    fig = go.Figure(go.Scatter(x=[], y=[]))
×
NEW
570
    fig.update_layout(template=None)
×
NEW
571
    fig.update_xaxes(showgrid=False, showticklabels=False, zeroline=False)
×
NEW
572
    fig.update_yaxes(showgrid=False, showticklabels=False, zeroline=False)
×
573

NEW
574
    return fig
×
575

576

NEW
577
def toggle(click, is_open):
×
NEW
578
    if click:
×
NEW
579
        return not is_open
×
NEW
580
    return is_open
×
581

582

NEW
583
def store_dataframes(var_files):
×
NEW
584
    dfs = []
×
NEW
585
    for idx, file_path in var_files.items():
×
NEW
586
        if file_path == 'None':
×
NEW
587
            dfs.append({idx: []})
×
NEW
588
            continue
×
NEW
589
        df = pd.read_csv(file_path, skiprows=[0,1,2,3,4,5,7], sep='\s+')
×
NEW
590
        dfs.append({idx: df.to_dict('records')})
×
591

NEW
592
    return dfs
×
593

594

NEW
595
def get_file_info(file_path):
×
NEW
596
    file_name = file_path.split('/')[-1]
×
NEW
597
    file_abs_path = os.path.abspath(file_path)
×
NEW
598
    file_size = round(os.path.getsize(file_path) / (1024**2), 2)
×
NEW
599
    creation_time = os.path.getctime(file_path)
×
NEW
600
    modification_time = os.path.getmtime(file_path)
×
601

NEW
602
    file_info = {
×
603
        'file_name': file_name,
604
        'file_abs_path': file_abs_path,
605
        'file_size': file_size,
606
        'creation_time': creation_time,
607
        'modification_time': modification_time
608
    }
609

NEW
610
    return file_info
×
611

612

NEW
613
def find_file_path_from_tree(nested_dict, filename, prepath=()):
×
614
    # Works for multi-keyed files
615
    # Sample outputs: ('outputDirStructure', 'sample_test') ('outputDirStructure', 'sample_multi')
NEW
616
    for k, v in nested_dict.items():
×
NEW
617
        path = prepath + (k,)
×
NEW
618
        if v == filename:
×
NEW
619
            yield path + (v, )
×
NEW
620
        elif isinstance(v, list) and filename in v:
×
NEW
621
            yield path + (filename, )
×
NEW
622
        elif hasattr(v, 'items'):
×
NEW
623
            yield from find_file_path_from_tree(v, filename, path)
×
624

NEW
625
def find_iterations(nested_dict, prepath=()):
×
NEW
626
    for k, v in nested_dict.items():
×
NEW
627
        path = prepath + (k,)
×
NEW
628
        if 'iteration' in k:
×
NEW
629
            yield int(re.findall(r'\d+', k)[0])
×
NEW
630
        elif hasattr(v, 'items'):
×
NEW
631
            yield from find_iterations(v, path)
×
632

633

NEW
634
def update_yaml(input_dict, yaml_filepath):
×
NEW
635
    with open(yaml_filepath, 'w') as outfile:
×
NEW
636
        yaml.dump(input_dict, outfile, default_flow_style=False)
×
637

638

NEW
639
def read_cost_variables(labels, refturb_variables):
×
640
    # Read tcc cost-related variables from CSV file
641

NEW
642
    cost_matrix = [['Main Turbine Components', 'Cost']]
×
643

NEW
644
    for l in labels:
×
NEW
645
        cost_matrix.append([l, eval(refturb_variables[f'tcc.{l}_cost']['values'])[0]])
×
646

NEW
647
    return cost_matrix
×
648

649

NEW
650
def generate_raft_img(raft_design_dir, plot_dir, log_data):
×
651
    '''
652
    Temporary function to visualize raft 3d plot using matplotlib.
653
    TODO: to build interactive 3d plot using plotly
654
    '''
NEW
655
    n_plots = len(os.listdir(raft_design_dir))
×
NEW
656
    print('n_plots: ', n_plots)
×
NEW
657
    os.makedirs(plot_dir,exist_ok=True)
×
658

NEW
659
    opt_outs = {}
×
NEW
660
    opt_outs['max_pitch'] = np.squeeze(np.array(log_data['raft.Max_PtfmPitch']))
×
661

NEW
662
    for i_plot in range(n_plots):
×
663
        # Set up subplots
NEW
664
        fig = plt.figure()
×
NEW
665
        fig.patch.set_facecolor('white')
×
NEW
666
        ax = plt.axes(projection='3d')
×
667

NEW
668
        with open(os.path.join(raft_design_dir,f'raft_design_{i_plot}.pkl'),'rb') as f:
×
NEW
669
            design = pickle.load(f)
×
670

671
        # TODO: Found typo on gamma value at 1_raft_opt example
NEW
672
        if design['turbine']['tower']['gamma'] == np.array([0.]):
×
NEW
673
            design['turbine']['tower']['gamma'] = 0.0       # Change it from array([0.])
×
674

675
        # set up the model
NEW
676
        model1 = raft.Model(design)
×
NEW
677
        model1.analyzeUnloaded(
×
678
            ballast= False,
679
            heave_tol = 1.0
680
            )
681

NEW
682
        model1.fowtList[0].r6[4] = np.radians(opt_outs['max_pitch'][i_plot])
×
683

NEW
684
        _, ax = model1.plot(ax=ax)
×
685

NEW
686
        ax.azim = -88.63636363636361
×
NEW
687
        ax.elev = 27.662337662337674
×
NEW
688
        ax.set_xlim3d((-110.90447789470043, 102.92063063344857))
×
NEW
689
        ax.set_ylim3d((64.47420067304586, 311.37818252335893))
×
NEW
690
        ax.set_zlim3d((-88.43591080818854, -57.499893019459606))
×
691

NEW
692
        image_filename = os.path.join(plot_dir,f'ptfm_{i_plot}.png')
×
NEW
693
        plt.savefig(image_filename, bbox_inches='tight')
×
NEW
694
        print('saved ', image_filename)
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc