• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

rl-institut / multi-vector-simulator / 4085789062

pending completion
4085789062

push

github

pierre-francois.duc
Lint with black

1 of 1 new or added line in 1 file covered. (100.0%)

5941 of 7819 relevant lines covered (75.98%)

0.76 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

65.86
/src/multi_vector_simulator/E1_process_results.py
1
"""
2
Module E1 process results
3
=========================
4

5
Module E1 processes the oemof results.
6
- receive time series per bus for all assets
7
- write time series to dictionary
8
- get optimal capacity of optimized assets
9
- add the evaluation of time series
10

11
"""
12
import logging
1✔
13
import copy
1✔
14
import pandas as pd
1✔
15

16
from multi_vector_simulator.utils.constants import TYPE_NONE, TOTAL_FLOW
1✔
17
from multi_vector_simulator.utils.constants_json_strings import (
1✔
18
    ECONOMIC_DATA,
19
    FLOW,
20
    INSTALLED_CAP,
21
    INPUT_POWER,
22
    OUTPUT_POWER,
23
    STORAGE_CAPACITY,
24
    TIME_INDEX,
25
    INFLOW_DIRECTION,
26
    OUTFLOW_DIRECTION,
27
    KPI_SCALARS_DICT,
28
    OPTIMIZED_FLOWS,
29
    UNIT,
30
    CURR,
31
    UNIT_YEAR,
32
    ENERGY_CONSUMPTION,
33
    LABEL,
34
    VALUE,
35
    OPTIMIZE_CAP,
36
    SIMULATION_SETTINGS,
37
    EVALUATED_PERIOD,
38
    TIMESERIES_PEAK,
39
    TIMESERIES_TOTAL,
40
    TIMESERIES_AVERAGE,
41
    DSO_FEEDIN,
42
    EXCESS_SINK,
43
    ENERGY_CONVERSION,
44
    ENERGY_PRODUCTION,
45
    ENERGY_STORAGE,
46
    OEMOF_ASSET_TYPE,
47
    INVESTMENT_BUS,
48
    ENERGY_VECTOR,
49
    KPI,
50
    KPI_COST_MATRIX,
51
    KPI_SCALAR_MATRIX,
52
    TOTAL_FLOW,
53
    PEAK_FLOW,
54
    AVERAGE_FLOW,
55
    OPTIMIZED_ADD_CAP,
56
    ANNUAL_TOTAL_FLOW,
57
    COST_OPERATIONAL_TOTAL,
58
    COST_INVESTMENT,
59
    COST_DISPATCH,
60
    COST_OM,
61
    COST_TOTAL,
62
    COST_UPFRONT,
63
    ANNUITY_TOTAL,
64
    ANNUITY_OM,
65
    LCOE_ASSET,
66
    EMISSION_FACTOR,
67
    TOTAL_EMISSIONS,
68
    TIMESERIES_SOC,
69
    KPI_UNCOUPLED_DICT,
70
    FIX_COST,
71
    LIFETIME_PRICE_DISPATCH,
72
    AVERAGE_SOC,
73
)
74

75
# Oemof.solph variables
76
OEMOF_FLOW = "flow"
1✔
77
OEMOF_SEQUENCES = "sequences"
1✔
78
OEMOF_INVEST = "invest"
1✔
79
OEMOF_SCALARS = "scalars"
1✔
80
OEMOF_STORAGE_CONTENT = "storage_content"
1✔
81

82
# Determines which assets are defined by...
83
# a influx from a bus
84
ASSET_GROUPS_DEFINED_BY_INFLUX = [ENERGY_CONSUMPTION]
1✔
85
# b outflux into a bus
86
ASSET_GROUPS_DEFINED_BY_OUTFLUX = [ENERGY_CONVERSION, ENERGY_PRODUCTION]
1✔
87

88
# Threshold for precision limit:
89
THRESHOLD = 10 ** (-6)
1✔
90

91

92
def cut_below_micro(value, label):
1✔
93
    r"""
94
    Function trims results of oemof optimization to positive values and rounds to 0, if within a certain precision threshold (of -10^-6)
95

96
    Oemof termination is dependent on the simulation settings of oemof solph. Thus, it can terminate the optimization if the results are with certain bounds, which can sometimes lead to negative decision variables (capacities, flows). Negative values do not make sense in this context. If the values are between -10^-6 and 0, we assume that they can be rounded to 0, as they result from the precision settings of the solver. In that case the value is overwritten for the futher post-processing. This should also avoid SOC timeseries with doubtful values outside of [0,1]. If any value is a higher negative value then the threshold, its value is not changed but a warning raised.
97
    Similarily, if a positive devision variable is detected that has a value lower then the theshold, it is assumed that this only happends because of the solver settings, and the values below the theshold are rounded to 0.
98

99
    Parameters
100
    ----------
101
    value: float or pd.Series
102
        Decision variable determined by oemof
103

104
    label: str
105
        String to be mentioned in the debug messages
106

107
    Returns
108
    -------
109

110
    value: float of pd.Series
111
        Decision variable with rounded values in case that slight negative values or positive values were observed.
112

113
    Notes
114
    -----
115

116
    Tested with:
117
    - E1.test_cut_below_micro_scalar_value_below_0_larger_threshold
118
    - E1.test_cut_below_micro_scalar_value_below_0_smaller_threshold
119
    - E1.test_cut_below_micro_scalar_value_0
120
    - E1.test_cut_below_micro_scalar_value_larger_0
121
    - E1.test_cut_below_micro_scalar_value_larger_0_smaller_threshold
122
    - E1.test_cut_below_micro_pd_Series_below_0_larger_threshold
123
    - E1.test_cut_below_micro_pd_Series_below_0_smaller_threshold
124
    - E1.test_cut_below_micro_pd_Series_0
125
    - E1.test_cut_below_micro_pd_Series_larger_0
126
    - E1.test_cut_below_micro_pd_Series_larger_0_smaller_threshold
127
    """
128
    text_block_start = f"The value of {label} is below 0"
1✔
129
    text_block_set_0 = f"Negative value (s) are smaller than {-THRESHOLD}. This is likely a result of the termination/precision settings of the cbc solver. As the difference is marginal, the value will be set to 0. "
1✔
130
    text_block_oemof = "This is so far below 0, that the value is not changed. All oemof decision variables should be positive so this needs to be investigated. "
1✔
131

132
    logging.debug(
1✔
133
        f"Check if the dispatch of asset {label} as per the oemof results is within the defined margin of precision ({THRESHOLD})"
134
    )
135

136
    # flows
137
    if isinstance(value, pd.Series):
1✔
138
        # Identifies any negative values. Decision variables should not have a negative value
139
        if (value < 0).any():
1✔
140
            log_msg = text_block_start
1✔
141
            # Counts the incidents, in which the value is below 0.
142
            if isinstance(value, pd.Series):
1✔
143
                instances = sum(value < 0)
1✔
144
                log_msg += f" in {instances} instances. "
1✔
145
            # Checks that all values are at least within the threshold for negative values.
146
            if (value > -THRESHOLD).all():
1✔
147
                log_msg += text_block_set_0
1✔
148
                logging.debug(log_msg)
1✔
149
                value = value.clip(lower=0)
1✔
150
            # If any value has a large negative value (lower then threshold), no values are changed.
151
            else:
152
                test = value.clip(upper=-THRESHOLD).abs()
1✔
153
                log_msg += f"At least one value exceeds the scale of {-THRESHOLD}. The highest negative value is -{max(test)}. "
1✔
154
                log_msg += text_block_oemof
1✔
155
                logging.warning(log_msg)
1✔
156

157
        # Determine if there are any positive values that are between 0 and the threshold:
158
        # Clip to interval
159
        positive_threshold = value.clip(lower=0, upper=THRESHOLD)
1✔
160
        # Determine instances in which bounds are met: 1=either 0 or larger threshold, 0=smaller threshold
161
        positive_threshold = (positive_threshold == 0) + (
1✔
162
            positive_threshold == THRESHOLD
163
        )
164
        # Instances in which values are in determined interval:
165
        instances = len(value) - sum(positive_threshold)
1✔
166
        if instances > 0:
1✔
167
            logging.debug(
1✔
168
                f"There are {instances} instances in which there are positive values smaller then the threshold."
169
            )
170
            # Multiply with positive_threshold (1=either 0 or larger threshold, 0=smaller threshold)
171
            value = value * positive_threshold
1✔
172

173
    # capacities
174
    else:
175
        # Value is lower 0, which should not be possible for decision variables
176
        if value < 0:
1✔
177
            log_msg = text_block_start
1✔
178
            # Value between [threshold, 0] = [-10**(-6)], ie. is so small that it can be neglected.
179
            if value > -THRESHOLD:
1✔
180
                log_msg += text_block_set_0
1✔
181
                logging.debug(log_msg)
1✔
182
                value = 0
1✔
183
            # Value is below 0 but already large enough that it should not be neglected.
184
            else:
185
                log_msg += f"The value exceeds the scale of {-THRESHOLD}, with {value}."
1✔
186
                log_msg += text_block_oemof
1✔
187
                logging.warning(log_msg)
1✔
188
        # Value is above 0 but below threshold, should be rounded
189
        elif value < THRESHOLD:
1✔
190
            logging.debug(
1✔
191
                f"The positive value {value} is below the {THRESHOLD}, and rounded to 0."
192
            )
193
            value = 0
1✔
194

195
    return value
1✔
196

197

198
def get_timeseries_per_bus(dict_values, bus_data):
1✔
199
    r"""
200
    Reads simulation results of all busses and stores time series.
201

202
    Parameters
203
    ----------
204
    dict_values : dict
205
        Contains all input data of the simulation.
206
    bus_data : dict Contains information about all busses in a nested dict.
207

208
        1st level keys: bus names;
209
        2nd level keys:
210

211
            'scalars': (pd.Series) (does not exist in all dicts)
212
            'sequences': (pd.DataFrame) - contains flows between components and busses
213

214
    Notes
215
    -----
216
    Tested with:
217
    - test_get_timeseries_per_bus_two_timeseries_for_directly_connected_storage()
218

219
    #Todo: This is a duplicate of the `E1.get_flow()` assertions, and thus `E1.cut_below_micro` is applied twice for each flow. This should rather be merged into the other functions.
220

221
    Returns
222
    -------
223
    Indirectly updated `dict_values` with 'optimizedFlows' - one data frame for each bus.
224

225
    """
226
    logging.debug(
1✔
227
        "Time series for plots and 'timeseries.xlsx' are added to `dict_values[OPTIMIZED_FLOWS]` in `E1.get_timeseries_per_bus`; check there in case of problems."
228
    )
229
    bus_data_timeseries = {}
1✔
230
    for bus in bus_data.keys():
1✔
231
        bus_data_timeseries.update(
1✔
232
            {bus: pd.DataFrame(index=dict_values[SIMULATION_SETTINGS][TIME_INDEX])}
233
        )
234

235
        # obtain flows that flow into the bus
236
        to_bus = {
1✔
237
            key[0][0]: key
238
            for key in bus_data[bus][OEMOF_SEQUENCES].keys()
239
            if key[0][1] == bus and key[1] == OEMOF_FLOW
240
        }
241
        for asset in to_bus:
1✔
242
            flow = bus_data[bus][OEMOF_SEQUENCES][to_bus[asset]]
1✔
243
            flow = cut_below_micro(flow, bus + "/" + asset)
1✔
244
            bus_data_timeseries[bus][asset] = flow
1✔
245
        # obtain flows that flow out of the bus
246
        from_bus = {
1✔
247
            key[0][1]: key
248
            for key in bus_data[bus][OEMOF_SEQUENCES].keys()
249
            if key[0][0] == bus and key[1] == OEMOF_FLOW
250
        }
251
        for asset in from_bus:
1✔
252
            try:
1✔
253
                # if `asset` already exists add input/output power to column name
254
                # (occurs for storages that are directly added to a bus)
255
                bus_data_timeseries[bus][asset]
1✔
256
                # asset is already in bus_data_timeseries[bus]. Therefore a renaming is necessary:
257
                bus_data_timeseries[bus].rename(
1✔
258
                    columns={asset: " ".join([asset, OUTPUT_POWER])}, inplace=True
259
                )
260
                # Now the "from_bus" ie. the charging/input power of the storage asset is added to the data set:
261
                bus_data_timeseries[bus][" ".join([asset, INPUT_POWER])] = -bus_data[
1✔
262
                    bus
263
                ][OEMOF_SEQUENCES][from_bus[asset]]
264
            except KeyError:
1✔
265
                # The asset was not previously added to the `OPTIMIZED_FLOWS`, ie. is not a storage asset
266
                bus_data_timeseries[bus][asset] = -bus_data[bus][OEMOF_SEQUENCES][
1✔
267
                    from_bus[asset]
268
                ]
269

270
    dict_values.update({OPTIMIZED_FLOWS: bus_data_timeseries})
1✔
271

272

273
def get_storage_results(settings, storage_bus, dict_asset):
1✔
274
    r"""
275
    Reads storage results of simulation and stores them in `dict_asset`.
276

277
    Parameters
278
    ----------
279
    settings : dict
280
        Contains simulation settings from `simulation_settings.csv` with
281
        additional information like the amount of time steps simulated in the
282
        optimization ('periods').
283
    storage_bus : dict
284
        Contains information about the storage bus. Information about the scalars
285
        like investment or initial capacity in key 'scalars' (pd.Series) and the
286
        flows between the component and the busses in key 'sequences' (pd.DataFrame).
287
    dict_asset : dict
288
        Contains information about the storage like capacity, charging power, etc.
289

290
    Returns
291
    -------
292
    Indirectly updates `dict_asset` with simulation results concerning the
293
    storage.
294

295
    """
296
    power_charge = storage_bus[OEMOF_SEQUENCES][
1✔
297
        ((dict_asset[INFLOW_DIRECTION], dict_asset[LABEL]), OEMOF_FLOW)
298
    ]
299
    power_charge = cut_below_micro(power_charge, dict_asset[LABEL] + " charge flow")
1✔
300
    add_info_flows(
1✔
301
        evaluated_period=settings[EVALUATED_PERIOD][VALUE],
302
        dict_asset=dict_asset[INPUT_POWER],
303
        flow=power_charge,
304
    )
305

306
    power_discharge = storage_bus[OEMOF_SEQUENCES][
1✔
307
        ((dict_asset[LABEL], dict_asset[OUTFLOW_DIRECTION]), OEMOF_FLOW)
308
    ]
309
    power_discharge = cut_below_micro(
1✔
310
        power_discharge, dict_asset[LABEL] + " discharge flow"
311
    )
312

313
    add_info_flows(
1✔
314
        evaluated_period=settings[EVALUATED_PERIOD][VALUE],
315
        dict_asset=dict_asset[OUTPUT_POWER],
316
        flow=power_discharge,
317
    )
318

319
    storage_capacity = storage_bus[OEMOF_SEQUENCES][
1✔
320
        ((dict_asset[LABEL], TYPE_NONE), OEMOF_STORAGE_CONTENT)
321
    ]
322
    storage_capacity = cut_below_micro(
1✔
323
        storage_capacity, dict_asset[LABEL] + " " + STORAGE_CAPACITY
324
    )
325

326
    add_info_flows(
1✔
327
        evaluated_period=settings[EVALUATED_PERIOD][VALUE],
328
        dict_asset=dict_asset[STORAGE_CAPACITY],
329
        flow=storage_capacity,
330
        type=STORAGE_CAPACITY,
331
    )
332

333
    if OPTIMIZE_CAP in dict_asset:
1✔
334
        if dict_asset[OPTIMIZE_CAP][VALUE] is True:
1✔
335
            power_charge = storage_bus[OEMOF_SCALARS][
1✔
336
                ((dict_asset[INFLOW_DIRECTION], dict_asset[LABEL]), OEMOF_INVEST)
337
            ]
338
            dict_asset[INPUT_POWER].update(
1✔
339
                {
340
                    OPTIMIZED_ADD_CAP: {
341
                        VALUE: power_charge,
342
                        UNIT: dict_asset[INPUT_POWER][UNIT],
343
                    }
344
                }
345
            )
346
            logging.debug(
1✔
347
                "Accessed optimized capacity of asset %s: %s",
348
                dict_asset[INPUT_POWER][LABEL],
349
                power_charge,
350
            )
351

352
            power_discharge = storage_bus[OEMOF_SCALARS][
1✔
353
                ((dict_asset[LABEL], dict_asset[OUTFLOW_DIRECTION]), OEMOF_INVEST)
354
            ]
355
            dict_asset[OUTPUT_POWER].update(
1✔
356
                {
357
                    OPTIMIZED_ADD_CAP: {
358
                        VALUE: power_discharge,
359
                        UNIT: dict_asset[OUTPUT_POWER][UNIT],
360
                    }
361
                }
362
            )
363
            logging.debug(
1✔
364
                "Accessed optimized capacity of asset %s: %s",
365
                dict_asset[OUTPUT_POWER][LABEL],
366
                power_discharge,
367
            )
368

369
            storage_capacity = storage_bus[OEMOF_SCALARS][
1✔
370
                ((dict_asset[LABEL], TYPE_NONE), OEMOF_INVEST)
371
            ]
372
            dict_asset[STORAGE_CAPACITY].update(
1✔
373
                {
374
                    OPTIMIZED_ADD_CAP: {
375
                        VALUE: storage_capacity,
376
                        UNIT: dict_asset[STORAGE_CAPACITY][UNIT],
377
                    }
378
                }
379
            )
380
            logging.debug(
1✔
381
                "Accessed optimized capacity of asset %s: %s",
382
                dict_asset[STORAGE_CAPACITY][LABEL],
383
                storage_capacity,
384
            )
385

386
        else:
387
            dict_asset[INPUT_POWER].update(
1✔
388
                {
389
                    OPTIMIZED_ADD_CAP: {
390
                        VALUE: 0,
391
                        UNIT: dict_asset[STORAGE_CAPACITY][UNIT],
392
                    }
393
                }
394
            )
395
            dict_asset[OUTPUT_POWER].update(
1✔
396
                {
397
                    OPTIMIZED_ADD_CAP: {
398
                        VALUE: 0,
399
                        UNIT: dict_asset[STORAGE_CAPACITY][UNIT],
400
                    }
401
                }
402
            )
403
            dict_asset[STORAGE_CAPACITY].update(
1✔
404
                {
405
                    OPTIMIZED_ADD_CAP: {
406
                        VALUE: 0,
407
                        UNIT: dict_asset[STORAGE_CAPACITY][UNIT],
408
                    }
409
                }
410
            )
411

412
    get_state_of_charge_info(dict_asset)
1✔
413

414

415
def get_state_of_charge_info(dict_asset):
1✔
416
    r"""
417
    Adds state of charge timeseries and average value of the timeseries to the storage dict.
418

419
    Parameters
420
    ----------
421
    dict_asset: dict
422
        Dict of the asset, specifically including the STORAGE_CAPACITY
423

424
    Returns
425
    -------
426
    Updated dict_asset
427

428
    Notes
429
    -----
430

431
    Tested with:
432
    - E1.test_get_state_of_charge_info()
433
    """
434
    timeseries_soc = dict_asset[STORAGE_CAPACITY][FLOW] / (
1✔
435
        dict_asset[STORAGE_CAPACITY][INSTALLED_CAP][VALUE]
436
        + dict_asset[STORAGE_CAPACITY][OPTIMIZED_ADD_CAP][VALUE]
437
    )
438
    dict_asset.update(
1✔
439
        {
440
            TIMESERIES_SOC: timeseries_soc,
441
            AVERAGE_SOC: {VALUE: timeseries_soc.mean(), UNIT: "factor"},
442
        }
443
    )
444

445

446
def get_results(settings, bus_data, dict_asset, asset_group):
1✔
447
    r"""
448
    Reads results of the asset defined in `dict_asset` and stores them in `dict_asset`.
449

450
    Parameters
451
    ----------
452
    settings : dict
453
        Contains simulation settings from `simulation_settings.csv` with
454
        additional information like the amount of time steps simulated in the
455
        optimization ('periods').
456

457
    bus_data : dict
458
        Contains information about all busses in a nested dict.
459
        1st level keys: bus names;
460
        2nd level keys:
461

462
            'scalars': (pd.Series) (does not exist in all dicts)
463
            'sequences': (pd.DataFrame) - contains flows between components and busses
464

465
    dict_asset : dict
466
        Contains information about the asset.
467

468
    asset_group: str
469
       Asset group to which the evaluated asset belongs
470

471
    Returns
472
    -------
473
    Indirectly updates `dict_asset` with results.
474

475
    """
476
    # Get which parameter/bus needs to be evaluated
477
    parameter_to_be_evaluated = get_parameter_to_be_evaluated_from_oemof_results(
1✔
478
        asset_group, dict_asset[LABEL]
479
    )
480

481
    # Check if the parameter/bus is defined for dict_asset
482
    if parameter_to_be_evaluated not in dict_asset:
1✔
483
        logging.warning(
×
484
            f"The asset {dict_asset[LABEL]} of group {asset_group} should contain parameter {parameter_to_be_evaluated}, but it does not."
485
        )
486

487
    # Determine bus that needs to be evaluated
488
    bus_name = dict_asset[parameter_to_be_evaluated]
1✔
489

490
    # Determine flows of the asset, also if flows are connected to multiple busses
491
    if not isinstance(bus_name, list):
1✔
492
        flow_tuple = get_tuple_for_oemof_results(
1✔
493
            dict_asset[LABEL], asset_group, bus_name
494
        )
495

496
        investment_bus = dict_asset.get(INVESTMENT_BUS)
1✔
497
        if investment_bus is not None:
1✔
498
            bus_name = investment_bus
×
499
            logging.info(
×
500
                f"The asset {dict_asset[LABEL]} of group {asset_group} had 'investment_bus' set to '{investment_bus}'"
501
            )
502
            if investment_bus in dict_asset.get(INFLOW_DIRECTION, []):
×
503
                flow_tuple = (bus_name, dict_asset[LABEL])
×
504
            elif investment_bus in dict_asset.get(OUTFLOW_DIRECTION, []):
×
505
                flow_tuple = (dict_asset[LABEL], bus_name)
×
506

507
        # Get flow information
508
        get_flow(
1✔
509
            settings=settings,
510
            bus=bus_data[bus_name],
511
            dict_asset=dict_asset,
512
            flow_tuple=flow_tuple,
513
        )
514
        # Get capacity information
515
        get_optimal_cap(bus_data[bus_name], dict_asset, flow_tuple)
1✔
516

517
    else:
518
        # Asset is connected to multiple busses, evaluate all
519
        for bus_instance in bus_name:
×
520
            flow_tuple = get_tuple_for_oemof_results(
×
521
                dict_asset[LABEL], asset_group, bus_instance
522
            )
523
            get_flow(
×
524
                settings=settings,
525
                bus=bus_data[bus_instance],
526
                dict_asset=dict_asset,
527
                flow_tuple=flow_tuple,
528
                multi_bus=bus_instance,
529
            )
530
            # Get capacity information
531
            get_optimal_cap(bus_data[bus_instance], dict_asset, flow_tuple)
×
532

533
        # For assets with multiple output busses
534
        if parameter_to_be_evaluated == OUTFLOW_DIRECTION:
×
535
            cumulative_flow = 0
×
536
            for bus_instance in bus_name:
×
537
                cumulative_flow += dict_asset[FLOW][bus_instance]
×
538
            dict_asset[PEAK_FLOW][VALUE] = max(cumulative_flow)
×
539
            dict_asset[PEAK_FLOW][VALUE] = cumulative_flow.mean()
×
540
        elif parameter_to_be_evaluated == INFLOW_DIRECTION:
×
541
            logging.error(
×
542
                "The result processing of asset with multiple inputs might not be done correctly"
543
            )
544

545

546
def get_parameter_to_be_evaluated_from_oemof_results(asset_group, asset_label):
1✔
547
    r"""
548
    Determine the parameter that needs to be evaluated to determine an asset`s optimized flow and capacity.
549

550
    Parameters
551
    ----------
552
    asset_group: str
553
        Asset group to which the evaluated asset belongs
554

555
    asset_label: str
556
        Label of the asset, needed for log message
557

558
    Returns
559
    -------
560
    parameter_to_be_evaluated: str
561
        Parameter that will be processed to get the dispatch and capacity of an asset
562

563
    Notes
564
    -----
565
    Tested by:
566
    - test_get_parameter_to_be_evaluated_from_oemof_results()
567
    """
568
    if asset_group in ASSET_GROUPS_DEFINED_BY_INFLUX:
1✔
569
        parameter_to_be_evaluated = INFLOW_DIRECTION
1✔
570

571
    elif asset_group in ASSET_GROUPS_DEFINED_BY_OUTFLUX:
1✔
572
        parameter_to_be_evaluated = OUTFLOW_DIRECTION
1✔
573

574
    else:
575
        logging.warning(
×
576
            f"The asset {asset_label} is of group {asset_group}, which is not defined in E1.get_results()."
577
        )
578

579
    return parameter_to_be_evaluated
1✔
580

581

582
def get_tuple_for_oemof_results(asset_label, asset_group, bus):
1✔
583
    r"""
584
    Determines the tuple with which to access the oemof-solph results
585

586
    The order of the parameters in the tuple depends on the direction of the flow.
587
    If the asset is defined...
588
    a) ...by its influx from a bus, the bus has to be named first in the touple
589
    b) ...by its outflux into a bus, the asset has to be named first in the touple
590

591
    Parameters
592
    ----------
593
    asset_label: str
594
        Name of the asset
595

596
    asset_group: str
597
        Asset group the asset belongs to
598

599
    bus: str
600
        Bus that is to be accessed for the asset´s information
601

602
    Returns
603
    -------
604
    flow_tuple: tuple of str
605
        Keys to be accessed in the oemof-solph results
606

607
    Notes
608
    -----
609
    Tested with
610
    - test_get_tuple_for_oemof_results()
611
    """
612
    # Determine which flux is evaluated for the flow
613
    if asset_group in ASSET_GROUPS_DEFINED_BY_INFLUX:
1✔
614
        flow_tuple = (bus, asset_label)
1✔
615
    elif asset_group in ASSET_GROUPS_DEFINED_BY_OUTFLUX:
1✔
616
        flow_tuple = (asset_label, bus)
1✔
617
    else:
618
        logging.warning(
×
619
            f"The asset {asset_label} is of group {asset_group}, but it is not defined in E1.get_results() which flux is to be evaluated."
620
        )
621

622
    return flow_tuple
1✔
623

624

625
def get_optimal_cap(bus, dict_asset, flow_tuple):
1✔
626
    r"""
627
    Retrieves optimized capacity of asset specified in `dict_asset`.
628

629
    Parameters
630
    ----------
631
    bus : dict
632
        Contains information about the busses linked to the asset specified in
633
        `dict_asset`. Information about the scalars like investment or initial
634
        capacity in key 'scalars' (pd.Series) and the flows between the
635
        component and the busses in key 'sequences' (pd.DataFrame).
636

637
    dict_asset : dict
638
        Contains information about the asset.
639

640
    flow_tuple : tuple
641
        Key of the oemof-solph outputs dict mapping the value to be evaluated
642

643
    Returns
644
    -------
645
    Indirectly updated `dict_asset` with optimal capacity to be added
646
    ('optimizedAddCap').
647

648
    TODOS
649
    ^^^^^
650
    * direction as optimal parameter or with default value None (direction is
651
        not needed if 'optimizeCap' is not in `dict_asset` or if it's value is False
652

653
    """
654
    if OPTIMIZE_CAP in dict_asset:
1✔
655

656
        if (
1✔
657
            dict_asset[OPTIMIZE_CAP][VALUE] is True
658
            and (flow_tuple, OEMOF_INVEST) in bus[OEMOF_SCALARS]
659
        ):
660
            optimal_capacity = bus[OEMOF_SCALARS][(flow_tuple, OEMOF_INVEST)]
1✔
661
            optimal_capacity = cut_below_micro(optimal_capacity, dict_asset[LABEL])
1✔
662
            if TIMESERIES_PEAK in dict_asset:
1✔
663
                if dict_asset[TIMESERIES_PEAK][VALUE] > 0:
1✔
664
                    dict_asset.update(
1✔
665
                        {
666
                            OPTIMIZED_ADD_CAP: {
667
                                VALUE: optimal_capacity
668
                                / dict_asset[TIMESERIES_PEAK][VALUE],
669
                                UNIT: dict_asset[UNIT],
670
                            }
671
                        }
672
                    )
673
                else:
674
                    logging.warning(
×
675
                        "Time series peak of asset %s negative or zero! Check timeseries. "
676
                        "No optimized capacity derived.",
677
                        dict_asset[LABEL],
678
                    )
679
                    pass
×
680
            else:
681
                dict_asset.update(
1✔
682
                    {
683
                        OPTIMIZED_ADD_CAP: {
684
                            VALUE: optimal_capacity,
685
                            UNIT: dict_asset[UNIT],
686
                        }
687
                    }
688
                )
689
            logging.debug(
1✔
690
                "Accessed optimized capacity of asset %s: %s",
691
                dict_asset[LABEL],
692
                optimal_capacity,
693
            )
694
        else:
695
            # only set a default optimized add cap value if the key does not exist already
696
            # this prevent erasing the value in case of multiple in/output busses
697
            if OPTIMIZED_ADD_CAP not in dict_asset:
1✔
698
                dict_asset.update(
1✔
699
                    {OPTIMIZED_ADD_CAP: {VALUE: 0, UNIT: dict_asset[UNIT]}}
700
                )
701

702

703
def get_flow(settings, bus, dict_asset, flow_tuple, multi_bus=None):
1✔
704
    r"""
705
    Adds flow of `bus` and total flow amongst other information to `dict_asset`.
706

707
    Depending on `direction` the input or the output flow is used.
708

709
    Parameters
710
    ----------
711
    settings : dict
712
        Contains simulation settings from `simulation_settings.csv` with
713
        additional information like the amount of time steps simulated in the
714
        optimization ('periods').
715

716
    bus : dict
717
        Contains information about a specific bus. Information about the scalars, if they exist,
718
            like investment or initial capacity in key 'scalars' (pd.Series) and the
719
            flows between the component and the bus(ses) in key 'sequences' (pd.DataFrame).
720

721
    dict_asset : dict
722
        Contains information about the asset.
723

724
    flow_tuple : tuple
725
        Entry of the oemof-solph outputs to be evaluated
726

727
    multi_bus: str or None
728
        The name of the current bus (for asset connected to more than one bus)
729

730
    Returns
731
    -------
732
    Indirectly updates `dict_asset` with the flow of `bus`, the total flow, the annual
733
    total flow, the maximum of the flow ('peak_flow') and the average value of
734
    the flow ('average_flow').
735

736
    """
737
    flow = bus[OEMOF_SEQUENCES][(flow_tuple, OEMOF_FLOW)]
1✔
738
    flow = cut_below_micro(flow, dict_asset[LABEL] + FLOW)
1✔
739
    add_info_flows(
1✔
740
        evaluated_period=settings[EVALUATED_PERIOD][VALUE],
741
        dict_asset=dict_asset,
742
        flow=flow,
743
        bus_name=multi_bus,
744
    )
745
    if multi_bus is None:
1✔
746
        total_flow = dict_asset[TOTAL_FLOW][VALUE]
1✔
747
        bus_info = ""
1✔
748
    else:
749
        total_flow = dict_asset[TOTAL_FLOW][multi_bus]
×
750
        bus_info = f" for bus '{multi_bus}'"
×
751

752
    logging.debug(
1✔
753
        "Accessed simulated timeseries of asset %s (total sum: %s)%s",
754
        dict_asset[LABEL],
755
        round(total_flow),
756
        bus_info,
757
    )
758

759

760
def add_info_flows(evaluated_period, dict_asset, flow, type=None, bus_name=None):
1✔
761
    r"""
762
    Adds `flow` and total flow amongst other information to `dict_asset`.
763

764
    Parameters
765
    ----------
766
    evaluated_period : int
767
        The number of days simulated with the energy system model.
768
    dict_asset : dict
769
        Contains information about the asset `flow` belongs to.
770
    flow : pd.Series
771
        Time series of the flow.
772
    type: str, default: None
773
        type of the flow, only exception is "STORAGE_CAPACITY".
774
    bus_name: str or None
775
        The name of the current bus (for asset connected to more than one bus)
776

777
    Returns
778
    -------
779
    Indirectly updates `dict_asset` with the `flow`, the total flow, the annual
780
    total flow, the maximum of the flow ('peak_flow') and the average value of
781
    the flow ('average_flow'). As Storage capacity is not a flow, an aggregation of the timeseries does not make sense
782
    and the parameters TOTAL_FLOW, ANNUAL_TOTAL_FLOW, PEAK_FLOW, AVERAGE_FLOW are added set to None.
783

784
    Notes
785
    -----
786

787
    Tested with:
788
    - E1.test_add_info_flows_365_days()
789
    - E1.test_add_info_flows_1_day()
790
    - E1.test_add_info_flows_storage_capacity()
791
    """
792
    total_flow = sum(flow)
1✔
793
    # import ipdb;ipdb.set_trace()
794
    if bus_name is None:
1✔
795
        dict_asset.update({FLOW: flow})
1✔
796
    else:
797
        if FLOW not in dict_asset:
×
798
            dict_asset.update({FLOW: {bus_name: flow}})
×
799
        else:
800
            dict_asset[FLOW][bus_name] = flow
×
801

802
    if type == STORAGE_CAPACITY:
1✔
803
        # The oemof-solph "flow" connected to the storage capacity describes the energy stored in the storage asset, not the actual flow. As such, the below parameters are non-sensical, especially TOTAL_FLOW and ANNUAL_TOTAL_FLOW. PEAK_FLOW and AVERAGE_FLOW are, as a consequence, also not captured. Instead, the AVERAGE_SOC is calculated in a later processing step.
804
        for parameter in [TOTAL_FLOW, ANNUAL_TOTAL_FLOW, PEAK_FLOW, AVERAGE_FLOW]:
1✔
805
            dict_asset.update({parameter: {VALUE: None, UNIT: "NaN"}})
1✔
806

807
    else:
808
        if bus_name is None:
1✔
809
            dict_asset.update(
1✔
810
                {
811
                    TOTAL_FLOW: {VALUE: total_flow, UNIT: "kWh"},
812
                    ANNUAL_TOTAL_FLOW: {
813
                        VALUE: total_flow * 365 / evaluated_period,
814
                        UNIT: "kWh",
815
                    },
816
                    PEAK_FLOW: {VALUE: max(flow), UNIT: "kW"},
817
                    AVERAGE_FLOW: {VALUE: flow.mean(), UNIT: "kW"},
818
                }
819
            )
820

821
        else:
822
            if TOTAL_FLOW not in dict_asset:
×
823
                dict_asset.update(
×
824
                    {TOTAL_FLOW: {bus_name: total_flow, VALUE: total_flow, UNIT: "kWh"}}
825
                )
826
            else:
827
                dict_asset[TOTAL_FLOW][bus_name] = total_flow
×
828
                dict_asset[TOTAL_FLOW][VALUE] += total_flow * 365 / evaluated_period
×
829

830
            if ANNUAL_TOTAL_FLOW not in dict_asset:
×
831
                dict_asset.update(
×
832
                    {
833
                        ANNUAL_TOTAL_FLOW: {
834
                            bus_name: total_flow * 365 / evaluated_period,
835
                            VALUE: total_flow * 365 / evaluated_period,
836
                            UNIT: "kWh",
837
                        }
838
                    }
839
                )
840
            else:
841
                dict_asset[ANNUAL_TOTAL_FLOW][bus_name] = (
×
842
                    total_flow * 365 / evaluated_period
843
                )
844
                dict_asset[ANNUAL_TOTAL_FLOW][VALUE] += (
×
845
                    total_flow * 365 / evaluated_period
846
                )
847

848
            if PEAK_FLOW not in dict_asset:
×
849
                dict_asset.update({PEAK_FLOW: {bus_name: max(flow), UNIT: "kW"}})
×
850
            else:
851
                dict_asset[PEAK_FLOW][bus_name] = max(flow)
×
852

853
            if AVERAGE_FLOW not in dict_asset:
×
854
                dict_asset.update({AVERAGE_FLOW: {bus_name: flow.mean(), UNIT: "kW"}})
×
855
            else:
856
                dict_asset[AVERAGE_FLOW][bus_name] = flow.mean()
×
857

858

859
def convert_demand_to_dataframe(dict_values, sector_demands=None):
1✔
860
    """Dataframe used for the demands table of the report
861

862
    Parameters
863
    ----------
864
    dict_values: dict
865
        output values of MVS
866

867
    sector_demands: str
868
        Name of the sector of the energy system whose demands must be returned as a df by this function
869
        Default: None
870

871
    Returns
872
    -------
873
    :class:`pandas.DataFrame<frame>`
874

875
    """
876

877
    # Make a dict which is a sub-dict of the JSON results file with only the consumption components of the energy system
878
    demands = copy.deepcopy(dict_values[ENERGY_CONSUMPTION])
×
879

880
    # The following block removes all the non-current sectoral demands from the demands dict
881
    if sector_demands is not None:
×
882
        non_sec_demands = []
×
883
        # Loop through the demands checking if there are keys which do not belong to the current sector
884
        for demand_key in demands.keys():
×
885
            if demands[demand_key][ENERGY_VECTOR] != (sector_demands.title()):
×
886
                non_sec_demands.append(demand_key)
×
887
        # Drop the non-sectoral demands from the demands dict
888
        for demand_to_drop in non_sec_demands:
×
889
            del demands[demand_to_drop]
×
890

891
    # Removing all the keys that do not represent actual demands
892
    drop_list = []
×
893

894
    # Loop though the demands identifying irrelevant demands
895
    for column_label in demands:
×
896
        # Identifies excess sink in demands for removal
897
        if EXCESS_SINK in column_label:
×
898
            drop_list.append(column_label)
×
899
        # Identifies DSO_feedin sink in demands for removal
900
        elif DSO_FEEDIN in column_label:
×
901
            drop_list.append(column_label)
×
902

903
    # Remove some elements from drop_list (ie. sinks that are not demands) from data
904
    for item in drop_list:
×
905
        del demands[item]
×
906

907
    # Create empty dict to hold the current-sector demands' data
908
    demand_data = {}
×
909

910
    # Populate the above dict with data for each of the demand in the current sector
911
    for dem in list(demands.keys()):
×
912
        demand_data.update(
×
913
            {
914
                dem: [
915
                    demands[dem][UNIT],
916
                    demands[dem][ENERGY_VECTOR],
917
                    demands[dem][TIMESERIES_PEAK][VALUE],
918
                    demands[dem][TIMESERIES_AVERAGE][VALUE],
919
                    demands[dem][TIMESERIES_TOTAL][VALUE],
920
                ]
921
            }
922
        )
923
    # Creating a dataframe with all of the demands from the above dict
924
    df_dem = pd.DataFrame.from_dict(
×
925
        demand_data,
926
        orient="index",
927
        columns=[
928
            UNIT,
929
            "Type of Demand",
930
            "Peak Demand",
931
            "Mean Demand",
932
            "Total Annual Demand",
933
        ],
934
    )
935

936
    # Operations on the index of the dataframe created above
937
    df_dem.index.name = "Demands"
×
938
    df_dem = df_dem.reset_index()
×
939
    df_dem = df_dem.round(2)
×
940

941
    return df_dem
×
942

943

944
def convert_components_to_dataframe(dict_values):
1✔
945
    """Dataframe used for the component table of the report
946

947
    Parameters
948
    ----------
949
    dict_values: dict
950
        output values of MVS
951

952
    Returns
953
    -------
954
    :class:`pandas.DataFrame<frame>`
955

956
    Notes
957
    -----
958

959
    Tested with:
960
        - test_E1_process_results.test_convert_components_to_dataframe()
961
    """
962

963
    # Read the subdicts energyProduction, energyConversion and energyStorage as separate dicts
964
    dict_energy_production = dict_values[ENERGY_PRODUCTION]
1✔
965
    dict_energy_conversion = dict_values[ENERGY_CONVERSION]
1✔
966
    dict_energy_storage = dict_values[ENERGY_STORAGE]
1✔
967

968
    # Read the keys of the above dicts into separate lists
969
    keys_production = list(dict_energy_production.keys())
1✔
970
    keys_conversion = list(dict_energy_conversion.keys())
1✔
971
    keys_storage = list(dict_energy_storage.keys())
1✔
972

973
    # Dict to hold the data for creating a pandas dataframe
974
    components = {}
1✔
975

976
    # Energy production and conversion have the same tree structure, can be processed together:
977

978
    # Add the above dictionaries and lists of keys into new lists for iterating through, later
979
    comp_dict_list = [dict_energy_production, dict_energy_conversion]
1✔
980
    components_list = [keys_production, keys_conversion]
1✔
981

982
    # Defining the columns of the table and filling them up with the appropriate data
983
    for (component_key, comp_dict) in zip(components_list, comp_dict_list):
1✔
984
        for comps in component_key:
1✔
985
            # Define whether optimization takes place
986
            optimize = translate_optimizeCap_from_boolean_to_yes_no(
1✔
987
                comp_dict[comps][OPTIMIZE_CAP][VALUE]
988
            )
989
            components.update(
1✔
990
                {
991
                    comps: [
992
                        comp_dict[comps][OEMOF_ASSET_TYPE],
993
                        comp_dict[comps][ENERGY_VECTOR],
994
                        comp_dict[comps][UNIT],
995
                        comp_dict[comps][INSTALLED_CAP][VALUE],
996
                        optimize,
997
                    ]
998
                }
999
            )
1000

1001
    # Energy storage assets have different structure, added individually
1002
    for storage_component in keys_storage:
1✔
1003
        for sub_stor_comp in [INPUT_POWER, STORAGE_CAPACITY, OUTPUT_POWER]:
1✔
1004
            comp_label = dict_energy_storage[storage_component][sub_stor_comp][LABEL]
1✔
1005
            # Define whether optimization takes place
1006
            # Currently, storage optimization setting applies to all sub-categories.
1007
            # Can be re-used when storage asset sub-components can be optimized individually:
1008
            # dict_energy_storage[storage_component][sub_stor_comp][OPTIMIZE_CAP][VALUE]
1009
            optimize = translate_optimizeCap_from_boolean_to_yes_no(
1✔
1010
                dict_energy_storage[storage_component][OPTIMIZE_CAP][VALUE]
1011
            )
1012
            components.update(
1✔
1013
                {
1014
                    comp_label: [
1015
                        dict_energy_storage[storage_component][OEMOF_ASSET_TYPE],
1016
                        dict_energy_storage[storage_component][ENERGY_VECTOR],
1017
                        dict_energy_storage[storage_component][sub_stor_comp][
1018
                            INSTALLED_CAP
1019
                        ][UNIT],
1020
                        dict_energy_storage[storage_component][sub_stor_comp][
1021
                            INSTALLED_CAP
1022
                        ][VALUE],
1023
                        optimize,
1024
                    ]
1025
                }
1026
            )
1027

1028
    # Create a pandas dataframe from the dictionary created above
1029
    df_comp = pd.DataFrame.from_dict(
1✔
1030
        components,
1031
        orient="index",
1032
        columns=[
1033
            "Type of Component",
1034
            "Energy Vector",
1035
            UNIT,
1036
            "Installed Capacity",
1037
            "Capacity optimization",
1038
        ],
1039
    )
1040
    df_comp.index.name = "Component"
1✔
1041
    df_comp = df_comp.reset_index()
1✔
1042
    return df_comp
1✔
1043

1044

1045
def translate_optimizeCap_from_boolean_to_yes_no(optimize_cap):
1✔
1046
    r"""
1047
    Translates the boolean OPTIMIZE_CAP to a yes-no value for readability of auto report
1048

1049
    Parameters
1050
    ----------
1051
    optimize_cap: bool
1052
        Setting whether asset is optimized or not
1053

1054
    Returns
1055
    -------
1056
    optimize: str
1057
        If OPTIMIZE_CAP==True: "Yes", else "No".
1058

1059
    Notes
1060
    -----
1061
    Tested with:
1062
    - test_E1_process_results.test_translate_optimizeCap_from_boolean_to_yes_no()
1063
    """
1064
    if optimize_cap is True:
1✔
1065
        optimize = "Yes"
1✔
1066
    else:
1067
        optimize = "No"
1✔
1068
    return optimize
1✔
1069

1070

1071
def convert_scalar_matrix_to_dataframe(dict_values):
1✔
1072
    """Dataframe used for the scalar matrix table of the report
1073

1074
    Parameters
1075
    ----------
1076
    dict_values: dict
1077
        output values of MVS
1078

1079
    Returns
1080
    -------
1081
    :class:`pandas.DataFrame<frame>`
1082

1083
    """
1084

1085
    # Read in the scalar matrix as pandas dataframe
1086
    df_scalar_matrix = dict_values[KPI][KPI_SCALAR_MATRIX]
×
1087

1088
    # Changing the index to a sequence of 0,1,2...
1089
    df_scalar_matrix = df_scalar_matrix.reset_index()
×
1090

1091
    # Dropping irrelevant columns from the dataframe
1092
    df_scalar_matrix = df_scalar_matrix.drop(
×
1093
        ["index", TOTAL_FLOW, PEAK_FLOW, AVERAGE_FLOW], axis=1
1094
    )
1095

1096
    # Renaming the columns
1097
    df_scalar_matrix = df_scalar_matrix.rename(
×
1098
        columns={
1099
            LABEL: "Component/Parameter",
1100
            OPTIMIZED_ADD_CAP: "CAP",
1101
            ANNUAL_TOTAL_FLOW: "Aggregated Flow",
1102
        }
1103
    )
1104
    # Rounding the numeric values to two significant digits
1105
    df_scalar_matrix = df_scalar_matrix.round(2)
×
1106

1107
    return df_scalar_matrix
×
1108

1109

1110
def convert_cost_matrix_to_dataframe(dict_values):
1✔
1111
    """Dataframe used for the cost matrix table of the report
1112

1113
    Parameters
1114
    ----------
1115
    dict_values: dict
1116
        output values of MVS
1117

1118
    Returns
1119
    -------
1120
    :class:`pandas.DataFrame<frame>`
1121

1122
    """
1123

1124
    # Read in the cost matrix as a pandas dataframe
1125
    df_cost_matrix = dict_values[KPI][KPI_COST_MATRIX]
×
1126

1127
    # Changing the index to a sequence of 0,1,2...
1128
    df_cost_matrix = df_cost_matrix.reset_index()
×
1129

1130
    # Drop some irrelevant columns from the dataframe
1131
    df_cost_matrix = df_cost_matrix.drop(
×
1132
        ["index", COST_OPERATIONAL_TOTAL, COST_INVESTMENT, COST_DISPATCH, COST_OM],
1133
        axis=1,
1134
    )
1135

1136
    # Rename some of the column names
1137
    df_cost_matrix = df_cost_matrix.rename(
×
1138
        columns={
1139
            LABEL: "Component",
1140
            COST_TOTAL: "Total costs",
1141
            COST_UPFRONT: "Upfront Investment Costs",
1142
        }
1143
    )
1144

1145
    # Round the numeric values to two significant digits
1146
    df_cost_matrix = df_cost_matrix.round(2)
×
1147
    return df_cost_matrix
×
1148

1149

1150
def convert_costs_to_dataframe(dict_values):
1✔
1151
    """Dataframe used for the costs piecharts of the report
1152

1153
    Parameters
1154
    ----------
1155
    dict_values: dict
1156
        output values of MVS
1157

1158
    Returns
1159
    -------
1160
    :class:`pandas.DataFrame<frame>`
1161

1162
    """
1163
    # Get the cost matrix from the results JSON file into a pandas DF
1164
    df_pie_plot = dict_values[KPI][KPI_COST_MATRIX]
×
1165

1166
    # List of the needed parameters
1167
    costs_needed = [LABEL, ANNUITY_TOTAL, COST_INVESTMENT, COST_OPERATIONAL_TOTAL]
×
1168

1169
    # Drop all the irrelevant columns
1170
    df_pie_plot = df_pie_plot[costs_needed]
×
1171

1172
    # Add a row with total of each column, except label
1173
    df_pie_plot = pd.concat([df_pie_plot, df_pie_plot.sum().to_frame().T])
×
1174

1175
    # Add a label for the row holding the sum of each column
1176
    df_pie_plot.iloc[-1, 0] = "Total"
×
1177

1178
    return df_pie_plot
×
1179

1180

1181
def convert_scalars_to_dataframe(dict_values):
1✔
1182
    """
1183
    Processes the scalar system-wide KPI so that they can be included in the report
1184

1185
    Parameters
1186
    ----------
1187
    dict_values: dict
1188
        output values of MVS
1189

1190
    Returns
1191
    -------
1192
    kpi_scalars_dataframe: :class:`pandas.DataFrame<frame>`
1193
        Dataframe to be displayed as a table in the report
1194

1195
    Notes
1196
    -----
1197
    Currently, as the KPI_SCALARS_DICT does not hold any units, the table printed in the report is unit-les.
1198
    """
1199

1200
    units_cost_kpi = get_units_of_cost_matrix_entries(
×
1201
        dict_values[ECONOMIC_DATA], dict_values[KPI][KPI_SCALARS_DICT]
1202
    )
1203

1204
    kpi_scalars_dataframe = pd.DataFrame(
×
1205
        dict_values[KPI][KPI_SCALARS_DICT], index=[VALUE]
1206
    )
1207
    kpi_names = kpi_scalars_dataframe.columns
×
1208
    kpi_scalars_dataframe = kpi_scalars_dataframe.transpose()
×
1209
    kpi_scalars_dataframe[KPI] = kpi_names
×
1210
    kpi_scalars_dataframe[UNIT] = units_cost_kpi
×
1211
    kpi_scalars_dataframe = kpi_scalars_dataframe[[KPI, UNIT, VALUE]]
×
1212

1213
    return kpi_scalars_dataframe
×
1214

1215

1216
def convert_kpi_sector_to_dataframe(dict_values):
1✔
1217
    """
1218
    Processes the sector KPIs so that they can be included in the report
1219

1220
    Parameters
1221
    ----------
1222
    dict_values: dict
1223
        output values of MVS
1224

1225
    Returns
1226
    -------
1227
    kpi_sectors_dataframe: :class:`pandas.DataFrame<frame>`
1228
        Dataframe to be displayed as a table in the report
1229

1230
    Notes
1231
    -----
1232
    Currently, as the KPI_UNCOUPLED_DICT does not hold any units, the table printed in the report is unit-les.
1233
    """
1234

1235
    if isinstance(dict_values[KPI][KPI_UNCOUPLED_DICT], dict):
×
1236
        kpi_sectors_dataframe = pd.DataFrame.from_dict(
×
1237
            dict_values[KPI][KPI_UNCOUPLED_DICT], orient="index"
1238
        )
1239
    else:
1240
        kpi_sectors_dataframe = dict_values[KPI][KPI_UNCOUPLED_DICT]
×
1241
    # Formats the kpi_sectors dataframe for nicer display
1242
    cols = list(kpi_sectors_dataframe.columns)
×
1243
    kpi_sectors_dataframe["KPI"] = kpi_sectors_dataframe.index  #
×
1244
    kpi_sectors_dataframe = kpi_sectors_dataframe[["KPI"] + cols]
×
1245

1246
    return kpi_sectors_dataframe
×
1247

1248

1249
def get_units_of_cost_matrix_entries(dict_economic, kpi_list):
1✔
1250
    """
1251
    Determines the units of the costs KPI to be stored to :class: DataFrame.
1252

1253
    Parameters
1254
    ----------
1255
    dict_economic:
1256
        Economic project data
1257

1258
    kpi_list:
1259
        List of cost matrix entries
1260

1261
    Returns
1262
    -------
1263
    unit_list: list
1264
        List of units for the :class: DataFrame to be created
1265
    """
1266

1267
    unit_list = []
1✔
1268
    kpi_cost_unit_dict = {
1✔
1269
        LABEL: None,
1270
        UNIT: None,
1271
        COST_TOTAL: dict_economic[CURR],
1272
        COST_OPERATIONAL_TOTAL: dict_economic[CURR],
1273
        COST_INVESTMENT: dict_economic[CURR],
1274
        COST_UPFRONT: dict_economic[CURR],
1275
        COST_DISPATCH: dict_economic[CURR],
1276
        COST_OM: dict_economic[CURR],
1277
        ANNUITY_TOTAL: dict_economic[CURR] + "/" + UNIT_YEAR,
1278
        ANNUITY_OM: dict_economic[CURR] + "/" + UNIT_YEAR,
1279
        LCOE_ASSET: dict_economic[CURR] + "/" + "energy carrier unit",
1280
    }
1281
    for key in kpi_list:
1✔
1282
        if key not in kpi_cost_unit_dict:
1✔
1283
            unit_list.append("NA")
1✔
1284
        else:
1285
            unit_list.append(kpi_cost_unit_dict[key])
1✔
1286
    return unit_list
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc