• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

WISDEM / WEIS / 11732463081

07 Nov 2024 10:33PM UTC coverage: 78.73% (-0.4%) from 79.083%
11732463081

Pull #308

github

web-flow
Merge 754c2dcc4 into f779fa594
Pull Request #308: DLC Generation - Refactor and New Cases

467 of 677 new or added lines in 11 files covered. (68.98%)

389 existing lines in 9 files now uncovered.

21406 of 27189 relevant lines covered (78.73%)

0.79 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/weis/visualization/utils.py
1
'''
2
Various functions for help visualizing WEIS outputs
3
'''
4
from weis.aeroelasticse.FileTools import load_yaml
×
5
import pandas as pd
×
6
import numpy as np
×
7
import openmdao.api as om
×
8
import glob
×
9
import json
×
10
import multiprocessing as mp
×
11
import plotly.graph_objects as go
×
12
import os
×
13
import io
×
14
import yaml
×
15
import re
×
16
import socket
×
17
from dash import html
×
18
from matplotlib.gridspec import GridSpec
×
19
import matplotlib.pyplot as plt
×
20
import pickle
×
21
import raft
×
22
from raft.helpers import *
×
23

UNCOV
24
try:
×
25
    import ruamel_yaml as ry
×
26
except Exception:
×
27
    try:
×
28
        import ruamel.yaml as ry
×
29
    except Exception:
×
30
        raise ImportError('No module named ruamel.yaml or ruamel_yaml')
×
31

32

UNCOV
33
def checkPort(port, host="0.0.0.0"):
×
34
    # check port availability and then close the socket
UNCOV
35
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
×
36
    result = False
×
37
    try:
×
38
        sock.bind((host, port))
×
39
        result = True
×
40
    except:
×
41
        result = False
×
42

UNCOV
43
    sock.close()
×
44
    return result
×
45

46

UNCOV
47
def parse_yaml(file_path):
×
48
    '''
49
    Parse the data contents in dictionary format
50
    '''
51
    # print('Reading the input yaml file..')
UNCOV
52
    try:
×
53
        with io.open(file_path, 'r') as stream:
×
54
            dict = yaml.safe_load(stream)
×
55
        
UNCOV
56
        dict['yamlPath'] = file_path
×
57
        # print('input file dict:\n', dict)
UNCOV
58
        return dict
×
59
    
UNCOV
60
    except FileNotFoundError:
×
61
        print('Could not locate the input yaml file..')
×
62
        exit()
×
63
    
UNCOV
64
    except Exception as e:
×
65
        print(e)
×
66
        exit()
×
67

68

UNCOV
69
def dict_to_html(data, out_html_list, level):
×
70
    '''
71
    Show the nested dictionary data to html
72
    '''
73
    
UNCOV
74
    for k1, v1 in data.items():
×
75
        if not k1 in ['dirs', 'files']:
×
76
            if not isinstance(v1, list) and not isinstance(v1, dict):
×
77
                out_html_list.append(html.H6(f'{"---"*level}{k1}: {v1}'))
×
78
                continue
×
79
            
UNCOV
80
            out_html_list.append(html.H6(f'{"---"*level}{k1}'))
×
81
        
UNCOV
82
        if isinstance(v1, list):
×
83
            out_html_list.append(html.Div([
×
84
                                    html.H6(f'{"---"*(level+1)}{i}') for i in v1]))
85
            
UNCOV
86
        elif isinstance(v1, dict):
×
87
            out_html_list = dict_to_html(v1, out_html_list, level+1)
×
88
    
89

UNCOV
90
    return out_html_list
×
91

92

UNCOV
93
def read_cm(cm_file):
×
94
    """
95
    Function originally from:
96
    https://github.com/WISDEM/WEIS/blob/main/examples/16_postprocessing/rev_DLCs_WEIS.ipynb
97

98
    Parameters
99
    __________
100
    cm_file : The file path for case matrix
101

102
    Returns
103
    _______
104
    cm : The dataframe of case matrix
105
    dlc_inds : The indices dictionary indicating where corresponding dlc is used for each run
106
    """
UNCOV
107
    cm_dict = load_yaml(cm_file, package=1)
×
108
    cnames = []
×
109
    for c in list(cm_dict.keys()):
×
110
        if isinstance(c, ry.comments.CommentedKeySeq):
×
111
            cnames.append(tuple(c))
×
112
        else:
UNCOV
113
            cnames.append(c)
×
114
    cm = pd.DataFrame(cm_dict, columns=cnames)
×
115

UNCOV
116
    return cm
×
117

UNCOV
118
def parse_contents(data):
×
119
    """
120
    Function from:
121
    https://github.com/WISDEM/WEIS/blob/main/examples/09_design_of_experiments/postprocess_results.py
122
    """
UNCOV
123
    collected_data = {}
×
124
    for key in data.keys():
×
125
        if key not in collected_data.keys():
×
126
            collected_data[key] = []
×
127

UNCOV
128
        for key_idx, _ in enumerate(data[key]):
×
129
            if isinstance(data[key][key_idx], int):
×
130
                collected_data[key].append(np.array(data[key][key_idx]))
×
131
            elif len(data[key][key_idx]) == 1:
×
132
                try:
×
133
                    collected_data[key].append(np.array(data[key][key_idx][0]))
×
134
                except:
×
135
                    collected_data[key].append(np.array(data[key][key_idx]))
×
136
            else:
UNCOV
137
                collected_data[key].append(np.array(data[key][key_idx]))
×
138

UNCOV
139
    df = pd.DataFrame.from_dict(collected_data)
×
140

UNCOV
141
    return df
×
142

143

UNCOV
144
def load_vars_file(fn_vars):
×
145
    """
146
    load a json file of problem variables as output from WEIS (as problem_vars.json)
147

148
    parameters:
149
    -----------
150
    fn_vars: str
151
        a filename to read
152

153
    returns:
154
    --------
155
    vars : dict[dict]
156
        a dictionary of dictionaries holding the problem_vars from WEIS
157
    """
158

UNCOV
159
    with open(fn_vars, "r") as fjson:
×
160
        # unpack in a useful form
161
        vars = {k: dict(v) for k, v in json.load(fjson).items()}
×
162
    return vars
×
163

164

165
def compare_om_data(
×
166
    dataOM_1,
167
    dataOM_2,
168
    fn_1="data 1",
169
    fn_2="data 2",
170
    verbose=False,
171
):
172
    """
173
    compare openmdao data dictionaries to find the in-common (and not) keys
174

175
    args:
176
        dataOM_1: dict
177
            an openmdao data dictionary
178
        dataOM_2: dict
179
            an openmdao data dictionary
180
        fn_1: str (optional)
181
            display name for the first data dictionary
182
        fn_2: str (optional)
183
            display name for the second data dictionary
184
        verbose : bool (optional, default: False)
185
            if we want to print what's happening
186

187
    returns:
188
        keys_all: set
189
            intersection (i.e. common) keys between the two OM data dictionaries
190
        diff_keys_12: set
191
            directional difference of keys between first and second OM data dicts
192
        diff_keys_21: set
193
            directional difference of keys between second and first OM data dicts
194
    """
195

UNCOV
196
    diff_keys_12 = set(dataOM_1).difference(dataOM_2)
×
UNCOV
197
    diff_keys_21 = set(dataOM_2).difference(dataOM_1)
×
UNCOV
198
    if len(diff_keys_12):
×
UNCOV
199
        if verbose:
×
UNCOV
200
            print(f"the following keys are only in {fn_1}:")
×
UNCOV
201
    for key_m in diff_keys_12:
×
UNCOV
202
        if verbose:
×
UNCOV
203
            print(f"\t{key_m}")
×
204
    if len(diff_keys_21):
×
205
        if verbose:
×
206
            print(f"the following keys are only in {fn_2}:")
×
207
    for key_m in diff_keys_21:
×
208
        if verbose:
×
209
            print(f"\t{key_m}")
×
210
    keys_all = set(dataOM_1).intersection(dataOM_2)
×
211
    if verbose:
×
212
        print(f"the following keys are in both {fn_1} and {fn_2}:")
×
213
    for key_m in keys_all:
×
214
        if verbose:
×
215
            print(f"\t{key_m}")
×
216

217
    return keys_all, diff_keys_12, diff_keys_21
×
218

219
def load_OMsql(log):
×
220
    """
221
    Function from :
222
    https://github.com/WISDEM/WEIS/blob/main/examples/09_design_of_experiments/postprocess_results.py
223
    """
224
    # logging.info("loading ", log)
225
    cr = om.CaseReader(log)
×
UNCOV
226
    rec_data = {}
×
227
    cases = cr.get_cases('driver')
×
UNCOV
228
    for case in cases:
×
UNCOV
229
        for key in case.outputs.keys():
×
UNCOV
230
            if key not in rec_data:
×
UNCOV
231
                rec_data[key] = []
×
UNCOV
232
            rec_data[key].append(case[key])
×
233
    
UNCOV
234
    return rec_data
×
235

UNCOV
236
def load_OMsql_temp(
×
237
    log,
238
    parse_multi=False,
239
    meta=None,
240
    verbose=False,
241
):
242
    """
243
    load the openmdao sql file produced by a WEIS run into a dictionary
244

245
    parameters:
246
    -----------
247
        log : str
248
            filename of the log sql database that should be loaded
249
        parse_multi : bool
250
            switch to turn on rank/iteration parsing and storage
251
        meta : str
252
            filename of the meta log sql database that should be loaded
253
        verbose : bool (optional, default: False)
254
            if we want to print what's happening
255

256
    returns:
257
        rec_data: dict
258
            dictionary of the data recorded by openMDAO
259

260
    """
261

262
    # heads-up print
263
    if verbose:
×
264
        print(f"loading {log}")
×
265

266
    # create an openmdao reader for recorded output data
267
    cr = om.CaseReader(log, metadata_filename=meta)
×
268

269
    # create a dict for output data that's been recorded
UNCOV
270
    rec_data = {}
×
271
    # loop over the cases
UNCOV
272
    for case in cr.get_cases("driver"):
×
273
        if parse_multi:
×
UNCOV
274
            rankNo = case.name.split(":")[0]
×
275
            assert rankNo.startswith("rank")
×
UNCOV
276
            rankNo = int(rankNo[4:])
×
277
            iterNo = int(case.name.split("|")[-1])
×
278

279
        # for each key in the outputs
UNCOV
280
        for key in case.outputs.keys():
×
281

UNCOV
282
            if key not in rec_data:
×
283
                # if this key isn't present, create a new list
UNCOV
284
                rec_data[key] = []
×
285
            
286
            if hasattr(case[key], '__len__'):
×
287
                if len(case[key]) == 1:
×
288
                    # otherwise coerce to float if possible and add the data to the list
289
                    rec_data[key].append(float(case[key]))
×
290
                else:
291
                    # otherwise a numpy array if possible and add the data to the list
UNCOV
292
                    rec_data[key].append(np.array(case[key]))
×
293
            else:
294
                rec_data[key].append(case[key])
×
295

UNCOV
296
        if parse_multi:
×
297
            # add rank/iter metadata
UNCOV
298
            for key in ["rank", "iter"]:  # for each key in the outputs
×
UNCOV
299
                if key not in rec_data:  # if this key isn't present, create a new list
×
UNCOV
300
                    rec_data[key] = []
×
UNCOV
301
            rec_data["rank"].append(rankNo)
×
UNCOV
302
            rec_data["iter"].append(iterNo)
×
303

UNCOV
304
    return rec_data  # return the output
×
305

306

UNCOV
307
def load_OMsql_multi(
×
308
    log_fmt,
309
    meta_in=None,
310
    process_multi = True,
311
    verbose=False,
312
):
313
    """
314
    load the multi-processor openmdao sql files produced by WEIS into a dict
315

316
    parameters:
317
    -----------
318
        log_fmt : str
319
            format string for the process-wise WEIS/OM log files
320
        meta_in : str (optional, default: None)
321
            filename string of the meta log file (will override automatic discovery)
322
        post_multi : bool (optional, default: True)
323
            postprocess in parallel using the multiprocessing library
324
        verbose : bool (optional, default: False)
325
            if we want to print what's happening
326

327
    returns:
328
    --------
329
        data_dict : dict
330
            dictionary of all the datapoints extracted from the WEIS/OM log files
331
    """
332

333
    # use glob to find the logs that match the format string
334
    opt_logs = sorted(
×
335
        glob.glob(log_fmt),
336
        key = lambda v : int(v.split("_")[-1])
337
            if (v.split("_")[-1] != "meta")
338
            else 1e8,
339
    )
340
    if len(opt_logs) < 1:
×
341
        raise FileExistsError("No output logs to postprocess!")
×
342

343
    # remove the "meta" log from the collection
344
    meta_found = None
×
UNCOV
345
    for idx, log in enumerate(opt_logs):
×
UNCOV
346
        if "meta" in log:
×
347
            meta_found = log  # save the meta file
×
UNCOV
348
            opt_logs.pop(idx)  # remove the meta log from the list
×
UNCOV
349
            break
×
350

351
    # handle meta logfile discovery... not sure what it actually does
352
    if meta_in is not None:
×
UNCOV
353
        meta = meta_in  # if a meta is given, override
×
UNCOV
354
    elif meta_found is not None:
×
355
        meta = meta_found  # if a meta is not given but one is found, use that
×
356
    else:
357
        meta = None  # otherwise, run without a meta
×
358

359
    # extract the ranks from the sql files
UNCOV
360
    sql_ranks = [ol.split("_")[-1] for ol in opt_logs]
×
361

362
    # run multiprocessing
363
    if process_multi:
×
364
        cores = mp.cpu_count()
×
365
        pool = mp.Pool(min(len(opt_logs), cores))
×
366

367
        # load sql file
368
        outdata = pool.starmap(load_OMsql, [(log, True, meta, verbose) for log in opt_logs])
×
369
        pool.close()
×
370
        pool.join()
×
371
    else: # no multiprocessing
372
        outdata = [load_OMsql(log, parse_multi=True, verbose=verbose, meta=meta) for log in opt_logs]
×
373

374
    # create a dictionary and turn it into a dataframe for convenience
375
    collected_data = {}
×
376
    ndarray_keys = []
×
UNCOV
377
    for sql_rank, data in zip(sql_ranks, outdata):
×
UNCOV
378
        for key in data.keys():
×
UNCOV
379
            if key not in collected_data.keys():
×
UNCOV
380
                collected_data[key] = []
×
UNCOV
381
            if key == "rank": # adjust the rank based on sql file rank
×
382
                data[key] = [int(sql_rank) for _ in data[key]]
×
383
            for idx_key, _ in enumerate(data[key]):
×
384
                if isinstance(data[key][idx_key], int):
×
UNCOV
385
                    collected_data[key].append(int(np.array(data[key][idx_key])))
×
UNCOV
386
                elif isinstance(data[key][idx_key], float):
×
387
                    collected_data[key].append(float(np.array(data[key][idx_key])))
×
UNCOV
388
                elif len(data[key][idx_key]) == 1:
×
UNCOV
389
                    collected_data[key].append(float(np.array(data[key][idx_key])))
×
390
                    # try:
391
                    #     collected_data[key].append(np.array(data[key][idx_key][0]))
392
                    # except:
393
                    #     collected_data[key].append(np.array(data[key][idx_key]))
394
                else:
UNCOV
395
                    collected_data[key].append(np.array(data[key][idx_key]).tolist())
×
UNCOV
396
                    ndarray_keys.append(key)
×
UNCOV
397
    df = pd.DataFrame(collected_data)
×
398

399
    # return a dictionary of the data that was extracted
UNCOV
400
    return df.to_dict(orient="list")
×
401

402

UNCOV
403
def consolidate_multi(
×
404
    dataOMmulti,
405
    vars_dict,
406
    feas_tol=1e-5,
407
):
408
    """
409
    load the multi-processor openmdao sql files and squash them to the
410
    per-iteration best-feasible result
411

412
    parameters:
413
    -----------
414
        dataOMmulti : dict
415
            dictionary of all the datapoints extracted from the multiprocess
416
            WEIS/OM log files
417
        vars_dict:
418
            experiment design variables to be analyzed
419
        feas_tol : float (optional)
420
            tolerance for feasibility analysis
421
    returns:
422
    --------
423
        dataOMbest_DE : dict
424
            dictionary of the per-iteration best-feasible simulations
425
    """
426

UNCOV
427
    dfOMmulti = pd.DataFrame(dataOMmulti)
×
428
    tfeas, cfeas = get_feasible_iterations(dataOMmulti, vars_dict, feas_tol=feas_tol)
×
429

UNCOV
430
    dfOMmulti = dfOMmulti[tfeas].reset_index()
×
431

UNCOV
432
    dataOMbest_DE = dfOMmulti.groupby("iter").apply(
×
433
        lambda grp : grp.loc[grp["floatingse.system_structural_mass"].idxmin()],
434
        include_groups=False,
435
    ).to_dict()
436

UNCOV
437
    for key in dataOMbest_DE.keys():
×
UNCOV
438
        dataOMbest_DE[key] = np.array(list(dataOMbest_DE[key].values()))
×
439

UNCOV
440
    return dataOMbest_DE
×
441

442

UNCOV
443
def get_feasible_iterations(
×
444
    dataOM,
445
    vars_dict,
446
    feas_tol=1e-5,
447
):
448
    """
449
    get iteration-wise total and per-constraint feasibility from an experiment
450

451
    args:
452
        dataOM: dict
453
            openmdao data dictionary
454
        vars_dict:
455
            experiment design variables for checking
456

457
    returns:
458
        total_feasibility: np.ndarray[bool]
459
            iteration-wise total feasibility indications
460
        feasibility_constraintwise: dict[np.ndarray[bool]]
461
            dictionary to map from constraint names to iteration-wise feasibility indications for that constraint
462
    """
463

464
    # assert len(vars_dict["objectives"].values()) == 1, "can't handle multi-objective... yet. -cfrontin"
465
    objective_name = list(vars_dict["objectives"].values())[0]["name"]
×
466

467
    feasibility_constraintwise = dict()
×
468
    total_feasibility = np.ones_like(np.array(dataOM[objective_name]).reshape(-1,1), dtype=bool)
×
UNCOV
469
    for k, v in vars_dict["constraints"].items():
×
UNCOV
470
        feasibility = np.ones_like(dataOM[objective_name], dtype=bool).reshape(-1, 1)
×
471
        values = np.array(dataOM[v["name"]])
×
UNCOV
472
        if len(values.shape) == 1:
×
UNCOV
473
            values = values.reshape(-1,1)
×
UNCOV
474
        if v.get("upper") is not None:
×
UNCOV
475
            feasibility = np.logical_and(feasibility, np.all(np.less_equal(values, (1+feas_tol)*v["upper"]), axis=1).reshape(-1, 1))
×
UNCOV
476
        if v.get("lower") is not None:
×
UNCOV
477
            feasibility = np.logical_and(feasibility, np.all(np.greater_equal(values, (1-feas_tol)*v["lower"]), axis=1).reshape(-1, 1))
×
UNCOV
478
        feasibility_constraintwise[v["name"]] = feasibility
×
UNCOV
479
        total_feasibility = np.logical_and(total_feasibility, feasibility)
×
UNCOV
480
    return total_feasibility, feasibility_constraintwise
×
481

482

483
def verify_vars(
×
484
    vars_1,
485
    *vars_i,
486
):
487
    """
488
    verifies format of DVs, constraints, objective variable file
489
    guarantees a list of experiments has the same variables
490
    adjusts unbounded constraints
491
    returns verified list of vars
492
    """
493

494
    for vars_2 in vars_i:
×
495
        if vars_2 is not None:
×
496
            for k0 in set(vars_1.keys()).union(vars_2):
×
497
                assert k0 in vars_1
×
498
                assert k0 in vars_2
×
UNCOV
499
                for k1 in set(vars_1[k0].keys()).union(vars_2[k0].keys()):
×
500
                    assert k1 in vars_1[k0]
×
UNCOV
501
                    assert k1 in vars_2[k0]
×
502
                    for k2 in set(vars_1[k0][k1].keys()).union(vars_2[k0][k1].keys()):
×
503
                        assert k2 in vars_1[k0][k1]
×
504
                        assert k2 in vars_2[k0][k1]
×
505
                        if k2 == "val":
×
506
                            continue
×
507
                        if isinstance(vars_1[k0][k1][k2], str):
×
508
                            assert vars_1[k0][k1][k2] == vars_2[k0][k1][k2]
×
UNCOV
509
                        elif vars_1[k0][k1][k2] is not None:
×
510
                            assert np.all(np.isclose(vars_1[k0][k1][k2], vars_2[k0][k1][k2]))
×
511
                        else:
UNCOV
512
                            assert (vars_1[k0][k1][k2] is None) and (vars_2[k0][k1][k2] is None)
×
513

UNCOV
514
    vars_unified = vars_1.copy()
×
UNCOV
515
    for k0 in vars_unified.keys():
×
UNCOV
516
        for k1 in vars_unified[k0].keys():
×
UNCOV
517
            if (vars_unified[k0][k1].get("lower") is not None) and (vars_unified[k0][k1].get("lower") < -1e28):
×
UNCOV
518
                vars_unified[k0][k1]["lower"] = -np.inf
×
UNCOV
519
            if (vars_unified[k0][k1].get("upper") is not None) and (vars_unified[k0][k1].get("upper") > 1e28):
×
UNCOV
520
                vars_unified[k0][k1]["upper"] = np.inf
×
521

UNCOV
522
    return vars_unified
×
523

524

UNCOV
525
def prettyprint_variables(
×
526
    keys_all,
527
    keys_obj,
528
    keys_DV,
529
    keys_constr,
530
):
531
    """
532
    print the variables we have with a prefix showing whether they are an
533
    objective variable (**), design variabie (--), constraint (<>), or unknown
534
    (??)
535
    """
536

537
    # print them nicely
UNCOV
538
    print()
×
539
    [
×
540
        print(
541
            f"** {key}"
542
            if key in keys_obj
543
            else f"-- {key}" if key in keys_DV else f"<> {key}" if key in keys_constr else f"?? {key}"
544
        )
545
        for key in keys_all
546
    ]
UNCOV
547
    print()
×
548

549
def read_per_iteration(iteration, stats_paths):
×
550

551
    stats_path_matched = [x for x in stats_paths if f'iteration_{iteration}' in x][0]
×
552
    iteration_path = '/'.join(stats_path_matched.split('/')[:-1])
×
553
    stats = pd.read_pickle(stats_path_matched)
×
554
    # dels = pd.read_pickle(iteration_path+'/DELs.p')
555
    # fst_vt = pd.read_pickle(iteration_path+'/fst_vt.p')
556
    print('iteration path with ', iteration, ': ', stats_path_matched)
×
557

UNCOV
558
    return stats, iteration_path
×
559

560

UNCOV
561
def get_timeseries_data(run_num, stats, iteration_path):
×
562
    
UNCOV
563
    stats = stats.reset_index()     # make 'index' column that has elements of 'IEA_22_Semi_00, ...'
×
UNCOV
564
    filename = stats.loc[run_num, 'index'].to_string()      # filenames are not same - stats: IEA_22_Semi_83 / timeseries/: IEA_22_Semi_0_83.p
×
565
    if filename.split('_')[-1].startswith('0'):
×
UNCOV
566
        filename = ('_'.join(filename.split('_')[:-1])+'_0_'+filename.split('_')[-1][1:]+'.p').strip()
×
567
    else:
UNCOV
568
        filename = ('_'.join(filename.split('_')[:-1])+'_0_'+filename.split('_')[-1]+'.p').strip()
×
569
    
570
    # visualization_demo/openfast_runs/rank_0/iteration_0/timeseries/IEA_22_Semi_0_0.p
571
    timeseries_path = '/'.join([iteration_path, 'timeseries', filename])
×
572
    timeseries_data = pd.read_pickle(timeseries_path)
×
573

574
    return filename, timeseries_data
×
575

576

577
def empty_figure():
×
578
    '''
579
    Draw empty figure showing nothing once initialized
580
    '''
UNCOV
581
    fig = go.Figure(go.Scatter(x=[], y=[]))
×
UNCOV
582
    fig.update_layout(template=None)
×
583
    fig.update_xaxes(showgrid=False, showticklabels=False, zeroline=False)
×
584
    fig.update_yaxes(showgrid=False, showticklabels=False, zeroline=False)
×
585

586
    return fig
×
587

588

589
def toggle(click, is_open):
×
590
    if click:
×
UNCOV
591
        return not is_open
×
592
    return is_open
×
593

594

595
def store_dataframes(var_files):
×
596
    dfs = []
×
597
    for idx, file_path in var_files.items():
×
598
        if file_path == 'None':
×
599
            dfs.append({idx: []})
×
600
            continue
×
UNCOV
601
        df = pd.read_csv(file_path, skiprows=[0,1,2,3,4,5,7], sep='\s+')
×
602
        dfs.append({idx: df.to_dict('records')})
×
603
    
UNCOV
604
    return dfs
×
605

606

UNCOV
607
def get_file_info(file_path):
×
UNCOV
608
    file_name = file_path.split('/')[-1]
×
UNCOV
609
    file_abs_path = os.path.abspath(file_path)
×
610
    file_size = round(os.path.getsize(file_path) / (1024**2), 2)
×
UNCOV
611
    creation_time = os.path.getctime(file_path)
×
UNCOV
612
    modification_time = os.path.getmtime(file_path)
×
613

UNCOV
614
    file_info = {
×
615
        'file_name': file_name,
616
        'file_abs_path': file_abs_path,
617
        'file_size': file_size,
618
        'creation_time': creation_time,
619
        'modification_time': modification_time
620
    }
621

622
    return file_info
×
623

624

625
def find_file_path_from_tree(nested_dict, filename, prepath=()):
×
626
    # Works for multi-keyed files
627
    # Sample outputs: ('outputDirStructure', 'sample_test') ('outputDirStructure', 'sample_multi')
628
    for k, v in nested_dict.items():
×
629
        path = prepath + (k,)
×
630
        if v == filename:
×
631
            yield path + (v, )
×
UNCOV
632
        elif isinstance(v, list) and filename in v:
×
UNCOV
633
            yield path + (filename, )
×
634
        elif hasattr(v, 'items'):
×
635
            yield from find_file_path_from_tree(v, filename, path)
×
636

UNCOV
637
def find_iterations(nested_dict, prepath=()):
×
UNCOV
638
    for k, v in nested_dict.items():
×
639
        path = prepath + (k,)
×
UNCOV
640
        if 'iteration' in k:
×
UNCOV
641
            yield int(re.findall(r'\d+', k)[0])
×
642
        elif hasattr(v, 'items'):
×
UNCOV
643
            yield from find_iterations(v, path)
×
644

645

UNCOV
646
def update_yaml(input_dict, yaml_filepath):
×
647
    with open(yaml_filepath, 'w') as outfile:
×
UNCOV
648
        yaml.dump(input_dict, outfile, default_flow_style=False)
×
649

650

UNCOV
651
def read_cost_variables(labels, refturb_variables):
×
652
    # Read tcc cost-related variables from CSV file
653

UNCOV
654
    cost_matrix = [['Main Turbine Components', 'Cost']]
×
655

656
    for l in labels:
×
657
        cost_matrix.append([l, eval(refturb_variables[f'tcc.{l}_cost']['values'])[0]])
×
658
    
659
    return cost_matrix
×
660

661

662
def generate_raft_img(raft_design_dir, plot_dir, log_data):
×
663
    '''
664
    Temporary function to visualize raft 3d plot using matplotlib.
665
    TODO: to build interactive 3d plot using plotly
666
    '''
UNCOV
667
    n_plots = len(os.listdir(raft_design_dir))
×
668
    print('n_plots: ', n_plots)
×
669
    os.makedirs(plot_dir,exist_ok=True)
×
670

UNCOV
671
    opt_outs = {}
×
672
    opt_outs['max_pitch'] = np.squeeze(np.array(log_data['raft.Max_PtfmPitch']))
×
673

UNCOV
674
    for i_plot in range(n_plots):
×
675
        # Set up subplots
676
        fig = plt.figure()
×
677
        fig.patch.set_facecolor('white')
×
UNCOV
678
        ax = plt.axes(projection='3d')
×
679
        
UNCOV
680
        with open(os.path.join(raft_design_dir,f'raft_design_{i_plot}.pkl'),'rb') as f:
×
UNCOV
681
            design = pickle.load(f)
×
682

683
        # TODO: Found typo on gamma value at 1_raft_opt example
684
        if design['turbine']['tower']['gamma'] == np.array([0.]):
×
UNCOV
685
            design['turbine']['tower']['gamma'] = 0.0       # Change it from array([0.])
×
686
        
687
        # set up the model
688
        model1 = raft.Model(design)
×
689
        model1.analyzeUnloaded(
×
690
            ballast= False, 
691
            heave_tol = 1.0
692
            )
693

694
        model1.fowtList[0].r6[4] = np.radians(opt_outs['max_pitch'][i_plot])
×
695
        
UNCOV
696
        _, ax = model1.plot(ax=ax)
×
697

UNCOV
698
        ax.azim = -88.63636363636361
×
UNCOV
699
        ax.elev = 27.662337662337674
×
UNCOV
700
        ax.set_xlim3d((-110.90447789470043, 102.92063063344857))
×
UNCOV
701
        ax.set_ylim3d((64.47420067304586, 311.37818252335893))
×
UNCOV
702
        ax.set_zlim3d((-88.43591080818854, -57.499893019459606))
×
703
        
UNCOV
704
        image_filename = os.path.join(plot_dir,f'ptfm_{i_plot}.png')
×
UNCOV
705
        plt.savefig(image_filename, bbox_inches='tight')
×
UNCOV
706
        print('saved ', image_filename)
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc