• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

rl-institut / multi-vector-simulator / 8870538658

28 Apr 2024 09:31PM UTC coverage: 75.582% (-1.4%) from 76.96%
8870538658

push

github

web-flow
Merge pull request #971 from rl-institut/fix/black-vulnerability

Fix/black vulnerability

26 of 29 new or added lines in 15 files covered. (89.66%)

826 existing lines in 21 files now uncovered.

5977 of 7908 relevant lines covered (75.58%)

0.76 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

64.55
/src/multi_vector_simulator/E1_process_results.py
1
"""
2
Module E1 process results
3
=========================
4

5
Module E1 processes the oemof results.
6
- receive time series per bus for all assets
7
- write time series to dictionary
8
- get optimal capacity of optimized assets
9
- add the evaluation of time series
10

11
"""
12

13
import logging
1✔
14
import copy
1✔
15
import pandas as pd
1✔
16
from multi_vector_simulator.utils.helpers import reducable_demand_name
1✔
17
from multi_vector_simulator.utils.constants import TYPE_NONE, TOTAL_FLOW
1✔
18
from multi_vector_simulator.utils.constants_json_strings import (
1✔
19
    ECONOMIC_DATA,
20
    FLOW,
21
    INSTALLED_CAP,
22
    INPUT_POWER,
23
    OUTPUT_POWER,
24
    STORAGE_CAPACITY,
25
    TIME_INDEX,
26
    INFLOW_DIRECTION,
27
    OUTFLOW_DIRECTION,
28
    KPI_SCALARS_DICT,
29
    OPTIMIZED_FLOWS,
30
    UNIT,
31
    CURR,
32
    UNIT_YEAR,
33
    ENERGY_CONSUMPTION,
34
    LABEL,
35
    VALUE,
36
    OPTIMIZE_CAP,
37
    SIMULATION_SETTINGS,
38
    EVALUATED_PERIOD,
39
    TIMESERIES_PEAK,
40
    TIMESERIES_TOTAL,
41
    TIMESERIES_AVERAGE,
42
    DSO_FEEDIN,
43
    EXCESS_SINK,
44
    ENERGY_CONVERSION,
45
    ENERGY_PRODUCTION,
46
    ENERGY_STORAGE,
47
    OEMOF_ASSET_TYPE,
48
    INVESTMENT_BUS,
49
    ENERGY_VECTOR,
50
    KPI,
51
    KPI_COST_MATRIX,
52
    KPI_SCALAR_MATRIX,
53
    TOTAL_FLOW,
54
    PEAK_FLOW,
55
    AVERAGE_FLOW,
56
    OPTIMIZED_ADD_CAP,
57
    ANNUAL_TOTAL_FLOW,
58
    COST_OPERATIONAL_TOTAL,
59
    COST_INVESTMENT,
60
    COST_DISPATCH,
61
    COST_OM,
62
    COST_TOTAL,
63
    COST_UPFRONT,
64
    ANNUITY_TOTAL,
65
    ANNUITY_OM,
66
    LCOE_ASSET,
67
    EMISSION_FACTOR,
68
    TOTAL_EMISSIONS,
69
    TIMESERIES_SOC,
70
    KPI_UNCOUPLED_DICT,
71
    FIX_COST,
72
    LIFETIME_PRICE_DISPATCH,
73
    AVERAGE_SOC,
74
    TYPE_ASSET,
75
)
76

77
# Oemof.solph variables
78
OEMOF_FLOW = "flow"
1✔
79
OEMOF_SEQUENCES = "sequences"
1✔
80
OEMOF_INVEST = "invest"
1✔
81
OEMOF_SCALARS = "scalars"
1✔
82
OEMOF_STORAGE_CONTENT = "storage_content"
1✔
83

84
# Determines which assets are defined by...
85
# a influx from a bus
86
ASSET_GROUPS_DEFINED_BY_INFLUX = [ENERGY_CONSUMPTION]
1✔
87
# b outflux into a bus
88
ASSET_GROUPS_DEFINED_BY_OUTFLUX = [ENERGY_CONVERSION, ENERGY_PRODUCTION]
1✔
89

90
# Threshold for precision limit:
91
THRESHOLD = 10 ** (-6)
1✔
92

93

94
def cut_below_micro(value, label):
1✔
95
    r"""
96
    Function trims results of oemof optimization to positive values and rounds to 0, if within a certain precision threshold (of -10^-6)
97

98
    Oemof termination is dependent on the simulation settings of oemof solph. Thus, it can terminate the optimization if the results are with certain bounds, which can sometimes lead to negative decision variables (capacities, flows). Negative values do not make sense in this context. If the values are between -10^-6 and 0, we assume that they can be rounded to 0, as they result from the precision settings of the solver. In that case the value is overwritten for the futher post-processing. This should also avoid SOC timeseries with doubtful values outside of [0,1]. If any value is a higher negative value then the threshold, its value is not changed but a warning raised.
99
    Similarily, if a positive devision variable is detected that has a value lower then the theshold, it is assumed that this only happends because of the solver settings, and the values below the theshold are rounded to 0.
100

101
    Parameters
102
    ----------
103
    value: float or pd.Series
104
        Decision variable determined by oemof
105

106
    label: str
107
        String to be mentioned in the debug messages
108

109
    Returns
110
    -------
111

112
    value: float of pd.Series
113
        Decision variable with rounded values in case that slight negative values or positive values were observed.
114

115
    Notes
116
    -----
117

118
    Tested with:
119
    - E1.test_cut_below_micro_scalar_value_below_0_larger_threshold
120
    - E1.test_cut_below_micro_scalar_value_below_0_smaller_threshold
121
    - E1.test_cut_below_micro_scalar_value_0
122
    - E1.test_cut_below_micro_scalar_value_larger_0
123
    - E1.test_cut_below_micro_scalar_value_larger_0_smaller_threshold
124
    - E1.test_cut_below_micro_pd_Series_below_0_larger_threshold
125
    - E1.test_cut_below_micro_pd_Series_below_0_smaller_threshold
126
    - E1.test_cut_below_micro_pd_Series_0
127
    - E1.test_cut_below_micro_pd_Series_larger_0
128
    - E1.test_cut_below_micro_pd_Series_larger_0_smaller_threshold
129
    """
130
    text_block_start = f"The value of {label} is below 0"
1✔
131
    text_block_set_0 = f"Negative value (s) are smaller than {-THRESHOLD}. This is likely a result of the termination/precision settings of the cbc solver. As the difference is marginal, the value will be set to 0. "
1✔
132
    text_block_oemof = "This is so far below 0, that the value is not changed. All oemof decision variables should be positive so this needs to be investigated. "
1✔
133

134
    logging.debug(
1✔
135
        f"Check if the dispatch of asset {label} as per the oemof results is within the defined margin of precision ({THRESHOLD})"
136
    )
137

138
    # flows
139
    if isinstance(value, pd.Series):
1✔
140
        # Identifies any negative values. Decision variables should not have a negative value
141
        if (value < 0).any():
1✔
142
            log_msg = text_block_start
1✔
143
            # Counts the incidents, in which the value is below 0.
144
            if isinstance(value, pd.Series):
1✔
145
                instances = sum(value < 0)
1✔
146
                log_msg += f" in {instances} instances. "
1✔
147
            # Checks that all values are at least within the threshold for negative values.
148
            if (value > -THRESHOLD).all():
1✔
149
                log_msg += text_block_set_0
1✔
150
                logging.debug(log_msg)
1✔
151
                value = value.clip(lower=0)
1✔
152
            # If any value has a large negative value (lower then threshold), no values are changed.
153
            else:
154
                test = value.clip(upper=-THRESHOLD).abs()
1✔
155
                log_msg += f"At least one value exceeds the scale of {-THRESHOLD}. The highest negative value is -{max(test)}. "
1✔
156
                log_msg += text_block_oemof
1✔
157
                logging.warning(log_msg)
1✔
158

159
        # Determine if there are any positive values that are between 0 and the threshold:
160
        # Clip to interval
161
        positive_threshold = value.clip(lower=0, upper=THRESHOLD)
1✔
162
        # Determine instances in which bounds are met: 1=either 0 or larger threshold, 0=smaller threshold
163
        positive_threshold = (positive_threshold == 0) + (
1✔
164
            positive_threshold == THRESHOLD
165
        )
166
        # Instances in which values are in determined interval:
167
        instances = len(value) - sum(positive_threshold)
1✔
168
        if instances > 0:
1✔
169
            logging.debug(
1✔
170
                f"There are {instances} instances in which there are positive values smaller then the threshold."
171
            )
172
            # Multiply with positive_threshold (1=either 0 or larger threshold, 0=smaller threshold)
173
            value = value * positive_threshold
1✔
174

175
    # capacities
176
    else:
177
        # Value is lower 0, which should not be possible for decision variables
178
        if value < 0:
1✔
179
            log_msg = text_block_start
1✔
180
            # Value between [threshold, 0] = [-10**(-6)], ie. is so small that it can be neglected.
181
            if value > -THRESHOLD:
1✔
182
                log_msg += text_block_set_0
1✔
183
                logging.debug(log_msg)
1✔
184
                value = 0
1✔
185
            # Value is below 0 but already large enough that it should not be neglected.
186
            else:
187
                log_msg += f"The value exceeds the scale of {-THRESHOLD}, with {value}."
1✔
188
                log_msg += text_block_oemof
1✔
189
                logging.warning(log_msg)
1✔
190
        # Value is above 0 but below threshold, should be rounded
191
        elif value < THRESHOLD:
1✔
192
            logging.debug(
1✔
193
                f"The positive value {value} is below the {THRESHOLD}, and rounded to 0."
194
            )
195
            value = 0
1✔
196

197
    return value
1✔
198

199

200
def get_timeseries_per_bus(dict_values, bus_data):
1✔
201
    r"""
202
    Reads simulation results of all busses and stores time series.
203

204
    Parameters
205
    ----------
206
    dict_values : dict
207
        Contains all input data of the simulation.
208
    bus_data : dict Contains information about all busses in a nested dict.
209

210
        1st level keys: bus names;
211
        2nd level keys:
212

213
            'scalars': (pd.Series) (does not exist in all dicts)
214
            'sequences': (pd.DataFrame) - contains flows between components and busses
215

216
    Notes
217
    -----
218
    Tested with:
219
    - test_get_timeseries_per_bus_two_timeseries_for_directly_connected_storage()
220

221
    #Todo: This is a duplicate of the `E1.get_flow()` assertions, and thus `E1.cut_below_micro` is applied twice for each flow. This should rather be merged into the other functions.
222

223
    Returns
224
    -------
225
    Indirectly updated `dict_values` with 'optimizedFlows' - one data frame for each bus.
226

227
    """
228
    logging.debug(
1✔
229
        "Time series for plots and 'timeseries.xlsx' are added to `dict_values[OPTIMIZED_FLOWS]` in `E1.get_timeseries_per_bus`; check there in case of problems."
230
    )
231
    bus_data_timeseries = {}
1✔
232
    for bus in bus_data.keys():
1✔
233
        bus_data_timeseries.update(
1✔
234
            {bus: pd.DataFrame(index=dict_values[SIMULATION_SETTINGS][TIME_INDEX])}
235
        )
236

237
        # obtain flows that flow into the bus
238
        to_bus = {
1✔
239
            key[0][0]: key
240
            for key in bus_data[bus][OEMOF_SEQUENCES].keys()
241
            if key[0][1] == bus and key[1] == OEMOF_FLOW
242
        }
243
        for asset in to_bus:
1✔
244
            flow = bus_data[bus][OEMOF_SEQUENCES][to_bus[asset]]
1✔
245
            flow = cut_below_micro(flow, bus + "/" + asset)
1✔
246
            bus_data_timeseries[bus][asset] = flow
1✔
247
        # obtain flows that flow out of the bus
248
        from_bus = {
1✔
249
            key[0][1]: key
250
            for key in bus_data[bus][OEMOF_SEQUENCES].keys()
251
            if key[0][0] == bus and key[1] == OEMOF_FLOW
252
        }
253
        for asset in from_bus:
1✔
254
            try:
1✔
255
                # if `asset` already exists add input/output power to column name
256
                # (occurs for storages that are directly added to a bus)
257
                bus_data_timeseries[bus][asset]
1✔
258
                # asset is already in bus_data_timeseries[bus]. Therefore a renaming is necessary:
259
                bus_data_timeseries[bus].rename(
1✔
260
                    columns={asset: " ".join([asset, OUTPUT_POWER])}, inplace=True
261
                )
262
                # Now the "from_bus" ie. the charging/input power of the storage asset is added to the data set:
263
                bus_data_timeseries[bus][" ".join([asset, INPUT_POWER])] = -bus_data[
1✔
264
                    bus
265
                ][OEMOF_SEQUENCES][from_bus[asset]]
266
            except KeyError:
1✔
267
                # The asset was not previously added to the `OPTIMIZED_FLOWS`, ie. is not a storage asset
268
                bus_data_timeseries[bus][asset] = -bus_data[bus][OEMOF_SEQUENCES][
1✔
269
                    from_bus[asset]
270
                ]
271

272
    dict_values.update({OPTIMIZED_FLOWS: bus_data_timeseries})
1✔
273

274

275
def get_storage_results(settings, storage_bus, dict_asset):
1✔
276
    r"""
277
    Reads storage results of simulation and stores them in `dict_asset`.
278

279
    Parameters
280
    ----------
281
    settings : dict
282
        Contains simulation settings from `simulation_settings.csv` with
283
        additional information like the amount of time steps simulated in the
284
        optimization ('periods').
285
    storage_bus : dict
286
        Contains information about the storage bus. Information about the scalars
287
        like investment or initial capacity in key 'scalars' (pd.Series) and the
288
        flows between the component and the busses in key 'sequences' (pd.DataFrame).
289
    dict_asset : dict
290
        Contains information about the storage like capacity, charging power, etc.
291

292
    Returns
293
    -------
294
    Indirectly updates `dict_asset` with simulation results concerning the
295
    storage.
296

297
    """
298
    power_charge = storage_bus[OEMOF_SEQUENCES][
1✔
299
        ((dict_asset[INFLOW_DIRECTION], dict_asset[LABEL]), OEMOF_FLOW)
300
    ]
301
    power_charge = cut_below_micro(power_charge, dict_asset[LABEL] + " charge flow")
1✔
302
    add_info_flows(
1✔
303
        evaluated_period=settings[EVALUATED_PERIOD][VALUE],
304
        dict_asset=dict_asset[INPUT_POWER],
305
        flow=power_charge.dropna(),
306
    )
307

308
    power_discharge = storage_bus[OEMOF_SEQUENCES][
1✔
309
        ((dict_asset[LABEL], dict_asset[OUTFLOW_DIRECTION]), OEMOF_FLOW)
310
    ]
311
    power_discharge = cut_below_micro(
1✔
312
        power_discharge, dict_asset[LABEL] + " discharge flow"
313
    )
314

315
    add_info_flows(
1✔
316
        evaluated_period=settings[EVALUATED_PERIOD][VALUE],
317
        dict_asset=dict_asset[OUTPUT_POWER],
318
        flow=power_discharge.dropna(),
319
    )
320

321
    storage_capacity = storage_bus[OEMOF_SEQUENCES][
1✔
322
        ((dict_asset[LABEL], TYPE_NONE), OEMOF_STORAGE_CONTENT)
323
    ]
324
    storage_capacity = cut_below_micro(
1✔
325
        storage_capacity, dict_asset[LABEL] + " " + STORAGE_CAPACITY
326
    )
327

328
    add_info_flows(
1✔
329
        evaluated_period=settings[EVALUATED_PERIOD][VALUE],
330
        dict_asset=dict_asset[STORAGE_CAPACITY],
331
        flow=storage_capacity.dropna(),
332
        type=STORAGE_CAPACITY,
333
    )
334

335
    if OPTIMIZE_CAP in dict_asset:
1✔
336
        if dict_asset[OPTIMIZE_CAP][VALUE] is True:
1✔
337
            power_charge = storage_bus[OEMOF_SCALARS][
1✔
338
                ((dict_asset[INFLOW_DIRECTION], dict_asset[LABEL]), OEMOF_INVEST)
339
            ]
340
            dict_asset[INPUT_POWER].update(
1✔
341
                {
342
                    OPTIMIZED_ADD_CAP: {
343
                        VALUE: power_charge,
344
                        UNIT: dict_asset[INPUT_POWER][UNIT],
345
                    }
346
                }
347
            )
348
            logging.debug(
1✔
349
                "Accessed optimized capacity of asset %s: %s",
350
                dict_asset[INPUT_POWER][LABEL],
351
                power_charge,
352
            )
353

354
            power_discharge = storage_bus[OEMOF_SCALARS][
1✔
355
                ((dict_asset[LABEL], dict_asset[OUTFLOW_DIRECTION]), OEMOF_INVEST)
356
            ]
357
            dict_asset[OUTPUT_POWER].update(
1✔
358
                {
359
                    OPTIMIZED_ADD_CAP: {
360
                        VALUE: power_discharge,
361
                        UNIT: dict_asset[OUTPUT_POWER][UNIT],
362
                    }
363
                }
364
            )
365
            logging.debug(
1✔
366
                "Accessed optimized capacity of asset %s: %s",
367
                dict_asset[OUTPUT_POWER][LABEL],
368
                power_discharge,
369
            )
370

371
            storage_capacity = storage_bus[OEMOF_SCALARS][
1✔
372
                ((dict_asset[LABEL], TYPE_NONE), OEMOF_INVEST)
373
            ]
374
            dict_asset[STORAGE_CAPACITY].update(
1✔
375
                {
376
                    OPTIMIZED_ADD_CAP: {
377
                        VALUE: storage_capacity,
378
                        UNIT: dict_asset[STORAGE_CAPACITY][UNIT],
379
                    }
380
                }
381
            )
382
            logging.debug(
1✔
383
                "Accessed optimized capacity of asset %s: %s",
384
                dict_asset[STORAGE_CAPACITY][LABEL],
385
                storage_capacity,
386
            )
387

388
        else:
389
            dict_asset[INPUT_POWER].update(
1✔
390
                {
391
                    OPTIMIZED_ADD_CAP: {
392
                        VALUE: 0,
393
                        UNIT: dict_asset[STORAGE_CAPACITY][UNIT],
394
                    }
395
                }
396
            )
397
            dict_asset[OUTPUT_POWER].update(
1✔
398
                {
399
                    OPTIMIZED_ADD_CAP: {
400
                        VALUE: 0,
401
                        UNIT: dict_asset[STORAGE_CAPACITY][UNIT],
402
                    }
403
                }
404
            )
405
            dict_asset[STORAGE_CAPACITY].update(
1✔
406
                {
407
                    OPTIMIZED_ADD_CAP: {
408
                        VALUE: 0,
409
                        UNIT: dict_asset[STORAGE_CAPACITY][UNIT],
410
                    }
411
                }
412
            )
413

414
    get_state_of_charge_info(dict_asset)
1✔
415

416

417
def get_state_of_charge_info(dict_asset):
1✔
418
    r"""
419
    Adds state of charge timeseries and average value of the timeseries to the storage dict.
420

421
    Parameters
422
    ----------
423
    dict_asset: dict
424
        Dict of the asset, specifically including the STORAGE_CAPACITY
425

426
    Returns
427
    -------
428
    Updated dict_asset
429

430
    Notes
431
    -----
432

433
    Tested with:
434
    - E1.test_get_state_of_charge_info()
435
    """
436
    timeseries_soc = dict_asset[STORAGE_CAPACITY][FLOW] / (
1✔
437
        dict_asset[STORAGE_CAPACITY][INSTALLED_CAP][VALUE]
438
        + dict_asset[STORAGE_CAPACITY][OPTIMIZED_ADD_CAP][VALUE]
439
    )
440
    dict_asset.update(
1✔
441
        {
442
            TIMESERIES_SOC: timeseries_soc,
443
            AVERAGE_SOC: {VALUE: timeseries_soc.mean(), UNIT: "factor"},
444
        }
445
    )
446

447

448
def get_results(settings, bus_data, dict_asset, asset_group):
1✔
449
    r"""
450
    Reads results of the asset defined in `dict_asset` and stores them in `dict_asset`.
451

452
    Parameters
453
    ----------
454
    settings : dict
455
        Contains simulation settings from `simulation_settings.csv` with
456
        additional information like the amount of time steps simulated in the
457
        optimization ('periods').
458

459
    bus_data : dict
460
        Contains information about all busses in a nested dict.
461
        1st level keys: bus names;
462
        2nd level keys:
463

464
            'scalars': (pd.Series) (does not exist in all dicts)
465
            'sequences': (pd.DataFrame) - contains flows between components and busses
466

467
    dict_asset : dict
468
        Contains information about the asset.
469

470
    asset_group: str
471
       Asset group to which the evaluated asset belongs
472

473
    Returns
474
    -------
475
    Indirectly updates `dict_asset` with results.
476

477
    """
478
    # Get which parameter/bus needs to be evaluated
479
    parameter_to_be_evaluated = get_parameter_to_be_evaluated_from_oemof_results(
1✔
480
        asset_group, dict_asset[LABEL]
481
    )
482

483
    # Check if the parameter/bus is defined for dict_asset
484
    if parameter_to_be_evaluated not in dict_asset:
1✔
UNCOV
485
        logging.warning(
×
486
            f"The asset {dict_asset[LABEL]} of group {asset_group} should contain parameter {parameter_to_be_evaluated}, but it does not."
487
        )
488

489
    # Determine bus that needs to be evaluated
490
    bus_name = dict_asset[parameter_to_be_evaluated]
1✔
491

492
    # Determine flows of the asset, also if flows are connected to multiple busses
493
    if not isinstance(bus_name, list):
1✔
494
        flow_tuple = get_tuple_for_oemof_results(
1✔
495
            dict_asset[LABEL], asset_group, bus_name
496
        )
497

498
        investment_bus = dict_asset.get(INVESTMENT_BUS)
1✔
499
        if investment_bus is not None:
1✔
UNCOV
500
            bus_name = investment_bus
×
UNCOV
501
            logging.info(
×
502
                f"The asset {dict_asset[LABEL]} of group {asset_group} had 'investment_bus' set to '{investment_bus}'"
503
            )
UNCOV
504
            if investment_bus in dict_asset.get(INFLOW_DIRECTION, []):
×
UNCOV
505
                flow_tuple = (bus_name, dict_asset[LABEL])
×
UNCOV
506
            elif investment_bus in dict_asset.get(OUTFLOW_DIRECTION, []):
×
UNCOV
507
                flow_tuple = (dict_asset[LABEL], bus_name)
×
508

509
        # Get flow information
510
        get_flow(
1✔
511
            settings=settings,
512
            bus=bus_data[bus_name],
513
            dict_asset=dict_asset,
514
            flow_tuple=flow_tuple,
515
        )
516
        # Get capacity information
517
        get_optimal_cap(bus_data[bus_name], dict_asset, flow_tuple)
1✔
518

519
    else:
520
        # Asset is connected to multiple busses, evaluate all
UNCOV
521
        for bus_instance in bus_name:
×
UNCOV
522
            flow_tuple = get_tuple_for_oemof_results(
×
523
                dict_asset[LABEL], asset_group, bus_instance
524
            )
525
            get_flow(
×
526
                settings=settings,
527
                bus=bus_data[bus_instance],
528
                dict_asset=dict_asset,
529
                flow_tuple=flow_tuple,
530
                multi_bus=bus_instance,
531
            )
532
            # Get capacity information
UNCOV
533
            get_optimal_cap(bus_data[bus_instance], dict_asset, flow_tuple)
×
534

535
        # For assets with multiple output busses
UNCOV
536
        if parameter_to_be_evaluated == OUTFLOW_DIRECTION:
×
UNCOV
537
            cumulative_flow = 0
×
UNCOV
538
            for bus_instance in bus_name:
×
UNCOV
539
                cumulative_flow += dict_asset[FLOW][bus_instance]
×
UNCOV
540
            dict_asset[PEAK_FLOW][VALUE] = max(cumulative_flow)
×
UNCOV
541
            dict_asset[PEAK_FLOW][VALUE] = cumulative_flow.mean()
×
UNCOV
542
        elif parameter_to_be_evaluated == INFLOW_DIRECTION:
×
UNCOV
543
            logging.error(
×
544
                "The result processing of asset with multiple inputs might not be done correctly"
545
            )
546

547

548
def get_parameter_to_be_evaluated_from_oemof_results(asset_group, asset_label):
1✔
549
    r"""
550
    Determine the parameter that needs to be evaluated to determine an asset`s optimized flow and capacity.
551

552
    Parameters
553
    ----------
554
    asset_group: str
555
        Asset group to which the evaluated asset belongs
556

557
    asset_label: str
558
        Label of the asset, needed for log message
559

560
    Returns
561
    -------
562
    parameter_to_be_evaluated: str
563
        Parameter that will be processed to get the dispatch and capacity of an asset
564

565
    Notes
566
    -----
567
    Tested by:
568
    - test_get_parameter_to_be_evaluated_from_oemof_results()
569
    """
570
    if asset_group in ASSET_GROUPS_DEFINED_BY_INFLUX:
1✔
571
        parameter_to_be_evaluated = INFLOW_DIRECTION
1✔
572

573
    elif asset_group in ASSET_GROUPS_DEFINED_BY_OUTFLUX:
1✔
574
        parameter_to_be_evaluated = OUTFLOW_DIRECTION
1✔
575

576
    else:
UNCOV
577
        logging.warning(
×
578
            f"The asset {asset_label} is of group {asset_group}, which is not defined in E1.get_results()."
579
        )
580

581
    return parameter_to_be_evaluated
1✔
582

583

584
def get_tuple_for_oemof_results(asset_label, asset_group, bus):
1✔
585
    r"""
586
    Determines the tuple with which to access the oemof-solph results
587

588
    The order of the parameters in the tuple depends on the direction of the flow.
589
    If the asset is defined...
590
    a) ...by its influx from a bus, the bus has to be named first in the touple
591
    b) ...by its outflux into a bus, the asset has to be named first in the touple
592

593
    Parameters
594
    ----------
595
    asset_label: str
596
        Name of the asset
597

598
    asset_group: str
599
        Asset group the asset belongs to
600

601
    bus: str
602
        Bus that is to be accessed for the asset´s information
603

604
    Returns
605
    -------
606
    flow_tuple: tuple of str
607
        Keys to be accessed in the oemof-solph results
608

609
    Notes
610
    -----
611
    Tested with
612
    - test_get_tuple_for_oemof_results()
613
    """
614
    # Determine which flux is evaluated for the flow
615
    if asset_group in ASSET_GROUPS_DEFINED_BY_INFLUX:
1✔
616
        flow_tuple = (bus, asset_label)
1✔
617
    elif asset_group in ASSET_GROUPS_DEFINED_BY_OUTFLUX:
1✔
618
        flow_tuple = (asset_label, bus)
1✔
619
    else:
UNCOV
620
        logging.warning(
×
621
            f"The asset {asset_label} is of group {asset_group}, but it is not defined in E1.get_results() which flux is to be evaluated."
622
        )
623

624
    return flow_tuple
1✔
625

626

627
def get_optimal_cap(bus, dict_asset, flow_tuple):
1✔
628
    r"""
629
    Retrieves optimized capacity of asset specified in `dict_asset`.
630

631
    Parameters
632
    ----------
633
    bus : dict
634
        Contains information about the busses linked to the asset specified in
635
        `dict_asset`. Information about the scalars like investment or initial
636
        capacity in key 'scalars' (pd.Series) and the flows between the
637
        component and the busses in key 'sequences' (pd.DataFrame).
638

639
    dict_asset : dict
640
        Contains information about the asset.
641

642
    flow_tuple : tuple
643
        Key of the oemof-solph outputs dict mapping the value to be evaluated
644

645
    Returns
646
    -------
647
    Indirectly updated `dict_asset` with optimal capacity to be added
648
    ('optimizedAddCap').
649

650
    TODOS
651
    ^^^^^
652
    * direction as optimal parameter or with default value None (direction is
653
        not needed if 'optimizeCap' is not in `dict_asset` or if it's value is False
654

655
    """
656
    if OPTIMIZE_CAP in dict_asset:
1✔
657

658
        if (
1✔
659
            dict_asset[OPTIMIZE_CAP][VALUE] is True
660
            and (flow_tuple, OEMOF_INVEST) in bus[OEMOF_SCALARS]
661
        ):
662
            optimal_capacity = bus[OEMOF_SCALARS][(flow_tuple, OEMOF_INVEST)]
1✔
663
            optimal_capacity = cut_below_micro(optimal_capacity, dict_asset[LABEL])
1✔
664
            if TIMESERIES_PEAK in dict_asset:
1✔
665
                if dict_asset[TIMESERIES_PEAK][VALUE] > 0:
1✔
666
                    dict_asset.update(
1✔
667
                        {
668
                            OPTIMIZED_ADD_CAP: {
669
                                VALUE: optimal_capacity
670
                                / dict_asset[TIMESERIES_PEAK][VALUE],
671
                                UNIT: dict_asset[UNIT],
672
                            }
673
                        }
674
                    )
675
                else:
UNCOV
676
                    logging.warning(
×
677
                        "Time series peak of asset %s negative or zero! Check timeseries. "
678
                        "No optimized capacity derived.",
679
                        dict_asset[LABEL],
680
                    )
UNCOV
681
                    pass
×
682
            else:
683
                dict_asset.update(
1✔
684
                    {
685
                        OPTIMIZED_ADD_CAP: {
686
                            VALUE: optimal_capacity,
687
                            UNIT: dict_asset[UNIT],
688
                        }
689
                    }
690
                )
691
            logging.debug(
1✔
692
                "Accessed optimized capacity of asset %s: %s",
693
                dict_asset[LABEL],
694
                optimal_capacity,
695
            )
696
        else:
697
            # only set a default optimized add cap value if the key does not exist already
698
            # this prevent erasing the value in case of multiple in/output busses
699
            if OPTIMIZED_ADD_CAP not in dict_asset:
1✔
700
                dict_asset.update(
1✔
701
                    {OPTIMIZED_ADD_CAP: {VALUE: 0, UNIT: dict_asset[UNIT]}}
702
                )
703

704

705
def get_flow(settings, bus, dict_asset, flow_tuple, multi_bus=None):
1✔
706
    r"""
707
    Adds flow of `bus` and total flow amongst other information to `dict_asset`.
708

709
    Depending on `direction` the input or the output flow is used.
710

711
    Parameters
712
    ----------
713
    settings : dict
714
        Contains simulation settings from `simulation_settings.csv` with
715
        additional information like the amount of time steps simulated in the
716
        optimization ('periods').
717

718
    bus : dict
719
        Contains information about a specific bus. Information about the scalars, if they exist,
720
            like investment or initial capacity in key 'scalars' (pd.Series) and the
721
            flows between the component and the bus(ses) in key 'sequences' (pd.DataFrame).
722

723
    dict_asset : dict
724
        Contains information about the asset.
725

726
    flow_tuple : tuple
727
        Entry of the oemof-solph outputs to be evaluated
728

729
    multi_bus: str or None
730
        The name of the current bus (for asset connected to more than one bus)
731

732
    Returns
733
    -------
734
    Indirectly updates `dict_asset` with the flow of `bus`, the total flow, the annual
735
    total flow, the maximum of the flow ('peak_flow') and the average value of
736
    the flow ('average_flow').
737

738
    """
739

740
    if dict_asset.get(TYPE_ASSET) == "reducable_demand":
1✔
UNCOV
741
        flow_tuple = (flow_tuple[0], reducable_demand_name(dict_asset[LABEL]))
×
UNCOV
742
        flow = bus[OEMOF_SEQUENCES][(flow_tuple, OEMOF_FLOW)]
×
UNCOV
743
        flow = cut_below_micro(flow, dict_asset[LABEL] + FLOW)
×
NEW
744
        flow_tuple = (
×
745
            flow_tuple[0],
746
            reducable_demand_name(dict_asset[LABEL], critical=True),
747
        )
748

UNCOV
749
        flow_crit = bus[OEMOF_SEQUENCES][(flow_tuple, OEMOF_FLOW)]
×
UNCOV
750
        flow_crit = cut_below_micro(flow_crit, dict_asset[LABEL] + FLOW)
×
UNCOV
751
        flow = flow + flow_crit
×
752

753
    else:
754
        flow = bus[OEMOF_SEQUENCES][(flow_tuple, OEMOF_FLOW)]
1✔
755
        flow = cut_below_micro(flow, dict_asset[LABEL] + FLOW)
1✔
756

757
    add_info_flows(
1✔
758
        evaluated_period=settings[EVALUATED_PERIOD][VALUE],
759
        dict_asset=dict_asset,
760
        flow=flow.dropna(),
761
        bus_name=multi_bus,
762
    )
763
    if multi_bus is None:
1✔
764
        total_flow = dict_asset[TOTAL_FLOW][VALUE]
1✔
765
        bus_info = ""
1✔
766
    else:
UNCOV
767
        total_flow = dict_asset[TOTAL_FLOW][multi_bus]
×
UNCOV
768
        bus_info = f" for bus '{multi_bus}'"
×
769

770
    logging.debug(
1✔
771
        "Accessed simulated timeseries of asset %s (total sum: %s)%s",
772
        dict_asset[LABEL],
773
        round(total_flow),
774
        bus_info,
775
    )
776

777

778
def add_info_flows(evaluated_period, dict_asset, flow, type=None, bus_name=None):
1✔
779
    r"""
780
    Adds `flow` and total flow amongst other information to `dict_asset`.
781

782
    Parameters
783
    ----------
784
    evaluated_period : int
785
        The number of days simulated with the energy system model.
786
    dict_asset : dict
787
        Contains information about the asset `flow` belongs to.
788
    flow : pd.Series
789
        Time series of the flow.
790
    type: str, default: None
791
        type of the flow, only exception is "STORAGE_CAPACITY".
792
    bus_name: str or None
793
        The name of the current bus (for asset connected to more than one bus)
794

795
    Returns
796
    -------
797
    Indirectly updates `dict_asset` with the `flow`, the total flow, the annual
798
    total flow, the maximum of the flow ('peak_flow') and the average value of
799
    the flow ('average_flow'). As Storage capacity is not a flow, an aggregation of the timeseries does not make sense
800
    and the parameters TOTAL_FLOW, ANNUAL_TOTAL_FLOW, PEAK_FLOW, AVERAGE_FLOW are added set to None.
801

802
    Notes
803
    -----
804

805
    Tested with:
806
    - E1.test_add_info_flows_365_days()
807
    - E1.test_add_info_flows_1_day()
808
    - E1.test_add_info_flows_storage_capacity()
809
    """
810
    total_flow = sum(flow)
1✔
811
    # import ipdb;ipdb.set_trace()
812
    if bus_name is None:
1✔
813
        dict_asset.update({FLOW: flow})
1✔
814
    else:
UNCOV
815
        if FLOW not in dict_asset:
×
UNCOV
816
            dict_asset.update({FLOW: {bus_name: flow}})
×
817
        else:
818
            dict_asset[FLOW][bus_name] = flow
×
819

820
    if type == STORAGE_CAPACITY:
1✔
821
        # The oemof-solph "flow" connected to the storage capacity describes the energy stored in the storage asset, not the actual flow. As such, the below parameters are non-sensical, especially TOTAL_FLOW and ANNUAL_TOTAL_FLOW. PEAK_FLOW and AVERAGE_FLOW are, as a consequence, also not captured. Instead, the AVERAGE_SOC is calculated in a later processing step.
822
        for parameter in [TOTAL_FLOW, ANNUAL_TOTAL_FLOW, PEAK_FLOW, AVERAGE_FLOW]:
1✔
823
            dict_asset.update({parameter: {VALUE: None, UNIT: "NaN"}})
1✔
824

825
    else:
826
        if bus_name is None:
1✔
827
            dict_asset.update(
1✔
828
                {
829
                    TOTAL_FLOW: {VALUE: total_flow, UNIT: "kWh"},
830
                    ANNUAL_TOTAL_FLOW: {
831
                        VALUE: total_flow * 365 / evaluated_period,
832
                        UNIT: "kWh",
833
                    },
834
                    PEAK_FLOW: {VALUE: max(flow), UNIT: "kW"},
835
                    AVERAGE_FLOW: {VALUE: flow.mean(), UNIT: "kW"},
836
                }
837
            )
838

839
        else:
840
            if TOTAL_FLOW not in dict_asset:
×
UNCOV
841
                dict_asset.update(
×
842
                    {TOTAL_FLOW: {bus_name: total_flow, VALUE: total_flow, UNIT: "kWh"}}
843
                )
844
            else:
845
                dict_asset[TOTAL_FLOW][bus_name] = total_flow
×
UNCOV
846
                dict_asset[TOTAL_FLOW][VALUE] += total_flow * 365 / evaluated_period
×
847

UNCOV
848
            if ANNUAL_TOTAL_FLOW not in dict_asset:
×
UNCOV
849
                dict_asset.update(
×
850
                    {
851
                        ANNUAL_TOTAL_FLOW: {
852
                            bus_name: total_flow * 365 / evaluated_period,
853
                            VALUE: total_flow * 365 / evaluated_period,
854
                            UNIT: "kWh",
855
                        }
856
                    }
857
                )
858
            else:
UNCOV
859
                dict_asset[ANNUAL_TOTAL_FLOW][bus_name] = (
×
860
                    total_flow * 365 / evaluated_period
861
                )
UNCOV
862
                dict_asset[ANNUAL_TOTAL_FLOW][VALUE] += (
×
863
                    total_flow * 365 / evaluated_period
864
                )
865

UNCOV
866
            if PEAK_FLOW not in dict_asset:
×
UNCOV
867
                dict_asset.update({PEAK_FLOW: {bus_name: max(flow), UNIT: "kW"}})
×
868
            else:
869
                dict_asset[PEAK_FLOW][bus_name] = max(flow)
×
870

UNCOV
871
            if AVERAGE_FLOW not in dict_asset:
×
872
                dict_asset.update({AVERAGE_FLOW: {bus_name: flow.mean(), UNIT: "kW"}})
×
873
            else:
UNCOV
874
                dict_asset[AVERAGE_FLOW][bus_name] = flow.mean()
×
875

876

877
def convert_demand_to_dataframe(dict_values, sector_demands=None):
1✔
878
    """Dataframe used for the demands table of the report
879

880
    Parameters
881
    ----------
882
    dict_values: dict
883
        output values of MVS
884

885
    sector_demands: str
886
        Name of the sector of the energy system whose demands must be returned as a df by this function
887
        Default: None
888

889
    Returns
890
    -------
891
    :class:`pandas.DataFrame<frame>`
892

893
    """
894

895
    # Make a dict which is a sub-dict of the JSON results file with only the consumption components of the energy system
896
    demands = copy.deepcopy(dict_values[ENERGY_CONSUMPTION])
×
897

898
    # The following block removes all the non-current sectoral demands from the demands dict
899
    if sector_demands is not None:
×
UNCOV
900
        non_sec_demands = []
×
901
        # Loop through the demands checking if there are keys which do not belong to the current sector
902
        for demand_key in demands.keys():
×
903
            if demands[demand_key][ENERGY_VECTOR] != (sector_demands.title()):
×
UNCOV
904
                non_sec_demands.append(demand_key)
×
905
        # Drop the non-sectoral demands from the demands dict
UNCOV
906
        for demand_to_drop in non_sec_demands:
×
UNCOV
907
            del demands[demand_to_drop]
×
908

909
    # Removing all the keys that do not represent actual demands
UNCOV
910
    drop_list = []
×
911

912
    # Loop though the demands identifying irrelevant demands
UNCOV
913
    for column_label in demands:
×
914
        # Identifies excess sink in demands for removal
915
        if EXCESS_SINK in column_label:
×
UNCOV
916
            drop_list.append(column_label)
×
917
        # Identifies DSO_feedin sink in demands for removal
UNCOV
918
        elif DSO_FEEDIN in column_label:
×
UNCOV
919
            drop_list.append(column_label)
×
920

921
    # Remove some elements from drop_list (ie. sinks that are not demands) from data
UNCOV
922
    for item in drop_list:
×
UNCOV
923
        del demands[item]
×
924

925
    # Create empty dict to hold the current-sector demands' data
UNCOV
926
    demand_data = {}
×
927

928
    # Populate the above dict with data for each of the demand in the current sector
929
    for dem in list(demands.keys()):
×
930
        demand_data.update(
×
931
            {
932
                dem: [
933
                    demands[dem][UNIT],
934
                    demands[dem][ENERGY_VECTOR],
935
                    demands[dem][TIMESERIES_PEAK][VALUE],
936
                    demands[dem][TIMESERIES_AVERAGE][VALUE],
937
                    demands[dem][TIMESERIES_TOTAL][VALUE],
938
                ]
939
            }
940
        )
941
    # Creating a dataframe with all of the demands from the above dict
UNCOV
942
    df_dem = pd.DataFrame.from_dict(
×
943
        demand_data,
944
        orient="index",
945
        columns=[
946
            UNIT,
947
            "Type of Demand",
948
            "Peak Demand",
949
            "Mean Demand",
950
            "Total Annual Demand",
951
        ],
952
    )
953

954
    # Operations on the index of the dataframe created above
UNCOV
955
    df_dem.index.name = "Demands"
×
UNCOV
956
    df_dem = df_dem.reset_index()
×
UNCOV
957
    df_dem = df_dem.round(2)
×
958

UNCOV
959
    return df_dem
×
960

961

962
def convert_components_to_dataframe(dict_values):
1✔
963
    """Dataframe used for the component table of the report
964

965
    Parameters
966
    ----------
967
    dict_values: dict
968
        output values of MVS
969

970
    Returns
971
    -------
972
    :class:`pandas.DataFrame<frame>`
973

974
    Notes
975
    -----
976

977
    Tested with:
978
        - test_E1_process_results.test_convert_components_to_dataframe()
979
    """
980

981
    # Read the subdicts energyProduction, energyConversion and energyStorage as separate dicts
982
    dict_energy_production = dict_values[ENERGY_PRODUCTION]
1✔
983
    dict_energy_conversion = dict_values[ENERGY_CONVERSION]
1✔
984
    dict_energy_storage = dict_values[ENERGY_STORAGE]
1✔
985

986
    # Read the keys of the above dicts into separate lists
987
    keys_production = list(dict_energy_production.keys())
1✔
988
    keys_conversion = list(dict_energy_conversion.keys())
1✔
989
    keys_storage = list(dict_energy_storage.keys())
1✔
990

991
    # Dict to hold the data for creating a pandas dataframe
992
    components = {}
1✔
993

994
    # Energy production and conversion have the same tree structure, can be processed together:
995

996
    # Add the above dictionaries and lists of keys into new lists for iterating through, later
997
    comp_dict_list = [dict_energy_production, dict_energy_conversion]
1✔
998
    components_list = [keys_production, keys_conversion]
1✔
999

1000
    # Defining the columns of the table and filling them up with the appropriate data
1001
    for component_key, comp_dict in zip(components_list, comp_dict_list):
1✔
1002
        for comps in component_key:
1✔
1003
            # Define whether optimization takes place
1004
            optimize = translate_optimizeCap_from_boolean_to_yes_no(
1✔
1005
                comp_dict[comps][OPTIMIZE_CAP][VALUE]
1006
            )
1007
            components.update(
1✔
1008
                {
1009
                    comps: [
1010
                        comp_dict[comps][OEMOF_ASSET_TYPE],
1011
                        comp_dict[comps][ENERGY_VECTOR],
1012
                        comp_dict[comps][UNIT],
1013
                        comp_dict[comps][INSTALLED_CAP][VALUE],
1014
                        optimize,
1015
                    ]
1016
                }
1017
            )
1018

1019
    # Energy storage assets have different structure, added individually
1020
    for storage_component in keys_storage:
1✔
1021
        for sub_stor_comp in [INPUT_POWER, STORAGE_CAPACITY, OUTPUT_POWER]:
1✔
1022
            comp_label = dict_energy_storage[storage_component][sub_stor_comp][LABEL]
1✔
1023
            # Define whether optimization takes place
1024
            # Currently, storage optimization setting applies to all sub-categories.
1025
            # Can be re-used when storage asset sub-components can be optimized individually:
1026
            # dict_energy_storage[storage_component][sub_stor_comp][OPTIMIZE_CAP][VALUE]
1027
            optimize = translate_optimizeCap_from_boolean_to_yes_no(
1✔
1028
                dict_energy_storage[storage_component][OPTIMIZE_CAP][VALUE]
1029
            )
1030
            components.update(
1✔
1031
                {
1032
                    comp_label: [
1033
                        dict_energy_storage[storage_component][OEMOF_ASSET_TYPE],
1034
                        dict_energy_storage[storage_component][ENERGY_VECTOR],
1035
                        dict_energy_storage[storage_component][sub_stor_comp][
1036
                            INSTALLED_CAP
1037
                        ][UNIT],
1038
                        dict_energy_storage[storage_component][sub_stor_comp][
1039
                            INSTALLED_CAP
1040
                        ][VALUE],
1041
                        optimize,
1042
                    ]
1043
                }
1044
            )
1045

1046
    # Create a pandas dataframe from the dictionary created above
1047
    df_comp = pd.DataFrame.from_dict(
1✔
1048
        components,
1049
        orient="index",
1050
        columns=[
1051
            "Type of Component",
1052
            "Energy Vector",
1053
            UNIT,
1054
            "Installed Capacity",
1055
            "Capacity optimization",
1056
        ],
1057
    )
1058
    df_comp.index.name = "Component"
1✔
1059
    df_comp = df_comp.reset_index()
1✔
1060
    return df_comp
1✔
1061

1062

1063
def translate_optimizeCap_from_boolean_to_yes_no(optimize_cap):
1✔
1064
    r"""
1065
    Translates the boolean OPTIMIZE_CAP to a yes-no value for readability of auto report
1066

1067
    Parameters
1068
    ----------
1069
    optimize_cap: bool
1070
        Setting whether asset is optimized or not
1071

1072
    Returns
1073
    -------
1074
    optimize: str
1075
        If OPTIMIZE_CAP==True: "Yes", else "No".
1076

1077
    Notes
1078
    -----
1079
    Tested with:
1080
    - test_E1_process_results.test_translate_optimizeCap_from_boolean_to_yes_no()
1081
    """
1082
    if optimize_cap is True:
1✔
1083
        optimize = "Yes"
1✔
1084
    else:
1085
        optimize = "No"
1✔
1086
    return optimize
1✔
1087

1088

1089
def convert_scalar_matrix_to_dataframe(dict_values):
1✔
1090
    """Dataframe used for the scalar matrix table of the report
1091

1092
    Parameters
1093
    ----------
1094
    dict_values: dict
1095
        output values of MVS
1096

1097
    Returns
1098
    -------
1099
    :class:`pandas.DataFrame<frame>`
1100

1101
    """
1102

1103
    # Read in the scalar matrix as pandas dataframe
UNCOV
1104
    df_scalar_matrix = dict_values[KPI][KPI_SCALAR_MATRIX]
×
1105

1106
    # Changing the index to a sequence of 0,1,2...
UNCOV
1107
    df_scalar_matrix = df_scalar_matrix.reset_index()
×
1108

1109
    # Dropping irrelevant columns from the dataframe
UNCOV
1110
    df_scalar_matrix = df_scalar_matrix.drop(
×
1111
        ["index", TOTAL_FLOW, PEAK_FLOW, AVERAGE_FLOW], axis=1
1112
    )
1113

1114
    # Renaming the columns
UNCOV
1115
    df_scalar_matrix = df_scalar_matrix.rename(
×
1116
        columns={
1117
            LABEL: "Component/Parameter",
1118
            OPTIMIZED_ADD_CAP: "CAP",
1119
            ANNUAL_TOTAL_FLOW: "Aggregated Flow",
1120
        }
1121
    )
1122
    # Rounding the numeric values to two significant digits
UNCOV
1123
    df_scalar_matrix = df_scalar_matrix.round(2)
×
1124

UNCOV
1125
    return df_scalar_matrix
×
1126

1127

1128
def convert_cost_matrix_to_dataframe(dict_values):
1✔
1129
    """Dataframe used for the cost matrix table of the report
1130

1131
    Parameters
1132
    ----------
1133
    dict_values: dict
1134
        output values of MVS
1135

1136
    Returns
1137
    -------
1138
    :class:`pandas.DataFrame<frame>`
1139

1140
    """
1141

1142
    # Read in the cost matrix as a pandas dataframe
UNCOV
1143
    df_cost_matrix = dict_values[KPI][KPI_COST_MATRIX]
×
1144

1145
    # Changing the index to a sequence of 0,1,2...
UNCOV
1146
    df_cost_matrix = df_cost_matrix.reset_index()
×
1147

1148
    # Drop some irrelevant columns from the dataframe
UNCOV
1149
    df_cost_matrix = df_cost_matrix.drop(
×
1150
        ["index", COST_OPERATIONAL_TOTAL, COST_INVESTMENT, COST_DISPATCH, COST_OM],
1151
        axis=1,
1152
    )
1153

1154
    # Rename some of the column names
1155
    df_cost_matrix = df_cost_matrix.rename(
×
1156
        columns={
1157
            LABEL: "Component",
1158
            COST_TOTAL: "Total costs",
1159
            COST_UPFRONT: "Upfront Investment Costs",
1160
        }
1161
    )
1162

1163
    # Round the numeric values to two significant digits
1164
    df_cost_matrix = df_cost_matrix.round(2)
×
UNCOV
1165
    return df_cost_matrix
×
1166

1167

1168
def convert_costs_to_dataframe(dict_values):
1✔
1169
    """Dataframe used for the costs piecharts of the report
1170

1171
    Parameters
1172
    ----------
1173
    dict_values: dict
1174
        output values of MVS
1175

1176
    Returns
1177
    -------
1178
    :class:`pandas.DataFrame<frame>`
1179

1180
    """
1181
    # Get the cost matrix from the results JSON file into a pandas DF
UNCOV
1182
    df_pie_plot = dict_values[KPI][KPI_COST_MATRIX]
×
1183

1184
    # List of the needed parameters
UNCOV
1185
    costs_needed = [LABEL, ANNUITY_TOTAL, COST_INVESTMENT, COST_OPERATIONAL_TOTAL]
×
1186

1187
    # Drop all the irrelevant columns
UNCOV
1188
    df_pie_plot = df_pie_plot[costs_needed]
×
1189

1190
    # Add a row with total of each column, except label
1191
    df_pie_plot = pd.concat([df_pie_plot, df_pie_plot.sum().to_frame().T])
×
1192

1193
    # Add a label for the row holding the sum of each column
UNCOV
1194
    df_pie_plot.iloc[-1, 0] = "Total"
×
1195

UNCOV
1196
    return df_pie_plot
×
1197

1198

1199
def convert_scalars_to_dataframe(dict_values):
1✔
1200
    """
1201
    Processes the scalar system-wide KPI so that they can be included in the report
1202

1203
    Parameters
1204
    ----------
1205
    dict_values: dict
1206
        output values of MVS
1207

1208
    Returns
1209
    -------
1210
    kpi_scalars_dataframe: :class:`pandas.DataFrame<frame>`
1211
        Dataframe to be displayed as a table in the report
1212

1213
    Notes
1214
    -----
1215
    Currently, as the KPI_SCALARS_DICT does not hold any units, the table printed in the report is unit-les.
1216
    """
1217

UNCOV
1218
    units_cost_kpi = get_units_of_cost_matrix_entries(
×
1219
        dict_values[ECONOMIC_DATA], dict_values[KPI][KPI_SCALARS_DICT]
1220
    )
1221

UNCOV
1222
    kpi_scalars_dataframe = pd.DataFrame(
×
1223
        dict_values[KPI][KPI_SCALARS_DICT], index=[VALUE]
1224
    )
UNCOV
1225
    kpi_names = kpi_scalars_dataframe.columns
×
1226
    kpi_scalars_dataframe = kpi_scalars_dataframe.transpose()
×
1227
    kpi_scalars_dataframe[KPI] = kpi_names
×
UNCOV
1228
    kpi_scalars_dataframe[UNIT] = units_cost_kpi
×
UNCOV
1229
    kpi_scalars_dataframe = kpi_scalars_dataframe[[KPI, UNIT, VALUE]]
×
1230

1231
    return kpi_scalars_dataframe
×
1232

1233

1234
def convert_kpi_sector_to_dataframe(dict_values):
1✔
1235
    """
1236
    Processes the sector KPIs so that they can be included in the report
1237

1238
    Parameters
1239
    ----------
1240
    dict_values: dict
1241
        output values of MVS
1242

1243
    Returns
1244
    -------
1245
    kpi_sectors_dataframe: :class:`pandas.DataFrame<frame>`
1246
        Dataframe to be displayed as a table in the report
1247

1248
    Notes
1249
    -----
1250
    Currently, as the KPI_UNCOUPLED_DICT does not hold any units, the table printed in the report is unit-les.
1251
    """
1252

UNCOV
1253
    if isinstance(dict_values[KPI][KPI_UNCOUPLED_DICT], dict):
×
UNCOV
1254
        kpi_sectors_dataframe = pd.DataFrame.from_dict(
×
1255
            dict_values[KPI][KPI_UNCOUPLED_DICT], orient="index"
1256
        )
1257
    else:
UNCOV
1258
        kpi_sectors_dataframe = dict_values[KPI][KPI_UNCOUPLED_DICT]
×
1259
    # Formats the kpi_sectors dataframe for nicer display
UNCOV
1260
    cols = list(kpi_sectors_dataframe.columns)
×
UNCOV
1261
    kpi_sectors_dataframe["KPI"] = kpi_sectors_dataframe.index  #
×
UNCOV
1262
    kpi_sectors_dataframe = kpi_sectors_dataframe[["KPI"] + cols]
×
1263

UNCOV
1264
    return kpi_sectors_dataframe
×
1265

1266

1267
def get_units_of_cost_matrix_entries(dict_economic, kpi_list):
1✔
1268
    """
1269
    Determines the units of the costs KPI to be stored to :class: DataFrame.
1270

1271
    Parameters
1272
    ----------
1273
    dict_economic:
1274
        Economic project data
1275

1276
    kpi_list:
1277
        List of cost matrix entries
1278

1279
    Returns
1280
    -------
1281
    unit_list: list
1282
        List of units for the :class: DataFrame to be created
1283
    """
1284

1285
    unit_list = []
1✔
1286
    kpi_cost_unit_dict = {
1✔
1287
        LABEL: None,
1288
        UNIT: None,
1289
        COST_TOTAL: dict_economic[CURR],
1290
        COST_OPERATIONAL_TOTAL: dict_economic[CURR],
1291
        COST_INVESTMENT: dict_economic[CURR],
1292
        COST_UPFRONT: dict_economic[CURR],
1293
        COST_DISPATCH: dict_economic[CURR],
1294
        COST_OM: dict_economic[CURR],
1295
        ANNUITY_TOTAL: dict_economic[CURR] + "/" + UNIT_YEAR,
1296
        ANNUITY_OM: dict_economic[CURR] + "/" + UNIT_YEAR,
1297
        LCOE_ASSET: dict_economic[CURR] + "/" + "energy carrier unit",
1298
    }
1299
    for key in kpi_list:
1✔
1300
        if key not in kpi_cost_unit_dict:
1✔
1301
            unit_list.append("NA")
1✔
1302
        else:
1303
            unit_list.append(kpi_cost_unit_dict[key])
1✔
1304
    return unit_list
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc