• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

rl-institut / multi-vector-simulator / 4084543790

pending completion
4084543790

push

github

GitHub
Merge pull request #952 from rl-institut/fix/chp_component

152 of 152 new or added lines in 9 files covered. (100.0%)

5899 of 7665 relevant lines covered (76.96%)

0.77 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

67.02
/src/multi_vector_simulator/E1_process_results.py
1
"""
2
Module E1 process results
3
=========================
4

5
Module E1 processes the oemof results.
6
- receive time series per bus for all assets
7
- write time series to dictionary
8
- get optimal capacity of optimized assets
9
- add the evaluation of time series
10

11
"""
12
import logging
1✔
13
import copy
1✔
14
import pandas as pd
1✔
15

16
from multi_vector_simulator.utils.constants import TYPE_NONE, TOTAL_FLOW
1✔
17
from multi_vector_simulator.utils.constants_json_strings import (
1✔
18
    ECONOMIC_DATA,
19
    FLOW,
20
    INSTALLED_CAP,
21
    INPUT_POWER,
22
    OUTPUT_POWER,
23
    STORAGE_CAPACITY,
24
    TIME_INDEX,
25
    INFLOW_DIRECTION,
26
    OUTFLOW_DIRECTION,
27
    KPI_SCALARS_DICT,
28
    OPTIMIZED_FLOWS,
29
    UNIT,
30
    CURR,
31
    UNIT_YEAR,
32
    ENERGY_CONSUMPTION,
33
    LABEL,
34
    VALUE,
35
    OPTIMIZE_CAP,
36
    SIMULATION_SETTINGS,
37
    EVALUATED_PERIOD,
38
    TIMESERIES_PEAK,
39
    TIMESERIES_TOTAL,
40
    TIMESERIES_AVERAGE,
41
    DSO_FEEDIN,
42
    EXCESS_SINK,
43
    ENERGY_CONVERSION,
44
    ENERGY_PRODUCTION,
45
    ENERGY_STORAGE,
46
    OEMOF_ASSET_TYPE,
47
    ENERGY_VECTOR,
48
    KPI,
49
    KPI_COST_MATRIX,
50
    KPI_SCALAR_MATRIX,
51
    TOTAL_FLOW,
52
    PEAK_FLOW,
53
    AVERAGE_FLOW,
54
    OPTIMIZED_ADD_CAP,
55
    ANNUAL_TOTAL_FLOW,
56
    COST_OPERATIONAL_TOTAL,
57
    COST_INVESTMENT,
58
    COST_DISPATCH,
59
    COST_OM,
60
    COST_TOTAL,
61
    COST_UPFRONT,
62
    ANNUITY_TOTAL,
63
    ANNUITY_OM,
64
    LCOE_ASSET,
65
    EMISSION_FACTOR,
66
    TOTAL_EMISSIONS,
67
    TIMESERIES_SOC,
68
    KPI_UNCOUPLED_DICT,
69
    FIX_COST,
70
    LIFETIME_PRICE_DISPATCH,
71
    AVERAGE_SOC,
72
)
73

74
# Oemof.solph variables
75
OEMOF_FLOW = "flow"
1✔
76
OEMOF_SEQUENCES = "sequences"
1✔
77
OEMOF_INVEST = "invest"
1✔
78
OEMOF_SCALARS = "scalars"
1✔
79
OEMOF_STORAGE_CONTENT = "storage_content"
1✔
80

81
# Determines which assets are defined by...
82
# a influx from a bus
83
ASSET_GROUPS_DEFINED_BY_INFLUX = [ENERGY_CONSUMPTION]
1✔
84
# b outflux into a bus
85
ASSET_GROUPS_DEFINED_BY_OUTFLUX = [ENERGY_CONVERSION, ENERGY_PRODUCTION]
1✔
86

87
# Threshold for precision limit:
88
THRESHOLD = 10 ** (-6)
1✔
89

90

91
def cut_below_micro(value, label):
1✔
92
    r"""
93
    Function trims results of oemof optimization to positive values and rounds to 0, if within a certain precision threshold (of -10^-6)
94

95
    Oemof termination is dependent on the simulation settings of oemof solph. Thus, it can terminate the optimization if the results are with certain bounds, which can sometimes lead to negative decision variables (capacities, flows). Negative values do not make sense in this context. If the values are between -10^-6 and 0, we assume that they can be rounded to 0, as they result from the precision settings of the solver. In that case the value is overwritten for the futher post-processing. This should also avoid SOC timeseries with doubtful values outside of [0,1]. If any value is a higher negative value then the threshold, its value is not changed but a warning raised.
96
    Similarily, if a positive devision variable is detected that has a value lower then the theshold, it is assumed that this only happends because of the solver settings, and the values below the theshold are rounded to 0.
97

98
    Parameters
99
    ----------
100
    value: float or pd.Series
101
        Decision variable determined by oemof
102

103
    label: str
104
        String to be mentioned in the debug messages
105

106
    Returns
107
    -------
108

109
    value: float of pd.Series
110
        Decision variable with rounded values in case that slight negative values or positive values were observed.
111

112
    Notes
113
    -----
114

115
    Tested with:
116
    - E1.test_cut_below_micro_scalar_value_below_0_larger_threshold
117
    - E1.test_cut_below_micro_scalar_value_below_0_smaller_threshold
118
    - E1.test_cut_below_micro_scalar_value_0
119
    - E1.test_cut_below_micro_scalar_value_larger_0
120
    - E1.test_cut_below_micro_scalar_value_larger_0_smaller_threshold
121
    - E1.test_cut_below_micro_pd_Series_below_0_larger_threshold
122
    - E1.test_cut_below_micro_pd_Series_below_0_smaller_threshold
123
    - E1.test_cut_below_micro_pd_Series_0
124
    - E1.test_cut_below_micro_pd_Series_larger_0
125
    - E1.test_cut_below_micro_pd_Series_larger_0_smaller_threshold
126
    """
127
    text_block_start = f"The value of {label} is below 0"
1✔
128
    text_block_set_0 = f"Negative value (s) are smaller than {-THRESHOLD}. This is likely a result of the termination/precision settings of the cbc solver. As the difference is marginal, the value will be set to 0. "
1✔
129
    text_block_oemof = "This is so far below 0, that the value is not changed. All oemof decision variables should be positive so this needs to be investigated. "
1✔
130

131
    logging.debug(
1✔
132
        f"Check if the dispatch of asset {label} as per the oemof results is within the defined margin of precision ({THRESHOLD})"
133
    )
134

135
    # flows
136
    if isinstance(value, pd.Series):
1✔
137
        # Identifies any negative values. Decision variables should not have a negative value
138
        if (value < 0).any():
1✔
139
            log_msg = text_block_start
1✔
140
            # Counts the incidents, in which the value is below 0.
141
            if isinstance(value, pd.Series):
1✔
142
                instances = sum(value < 0)
1✔
143
                log_msg += f" in {instances} instances. "
1✔
144
            # Checks that all values are at least within the threshold for negative values.
145
            if (value > -THRESHOLD).all():
1✔
146
                log_msg += text_block_set_0
1✔
147
                logging.debug(log_msg)
1✔
148
                value = value.clip(lower=0)
1✔
149
            # If any value has a large negative value (lower then threshold), no values are changed.
150
            else:
151
                test = value.clip(upper=-THRESHOLD).abs()
1✔
152
                log_msg += f"At least one value exceeds the scale of {-THRESHOLD}. The highest negative value is -{max(test)}. "
1✔
153
                log_msg += text_block_oemof
1✔
154
                logging.warning(log_msg)
1✔
155

156
        # Determine if there are any positive values that are between 0 and the threshold:
157
        # Clip to interval
158
        positive_threshold = value.clip(lower=0, upper=THRESHOLD)
1✔
159
        # Determine instances in which bounds are met: 1=either 0 or larger threshold, 0=smaller threshold
160
        positive_threshold = (positive_threshold == 0) + (
1✔
161
            positive_threshold == THRESHOLD
162
        )
163
        # Instances in which values are in determined interval:
164
        instances = len(value) - sum(positive_threshold)
1✔
165
        if instances > 0:
1✔
166
            logging.debug(
1✔
167
                f"There are {instances} instances in which there are positive values smaller then the threshold."
168
            )
169
            # Multiply with positive_threshold (1=either 0 or larger threshold, 0=smaller threshold)
170
            value = value * positive_threshold
1✔
171

172
    # capacities
173
    else:
174
        # Value is lower 0, which should not be possible for decision variables
175
        if value < 0:
1✔
176
            log_msg = text_block_start
1✔
177
            # Value between [threshold, 0] = [-10**(-6)], ie. is so small that it can be neglected.
178
            if value > -THRESHOLD:
1✔
179
                log_msg += text_block_set_0
1✔
180
                logging.debug(log_msg)
1✔
181
                value = 0
1✔
182
            # Value is below 0 but already large enough that it should not be neglected.
183
            else:
184
                log_msg += f"The value exceeds the scale of {-THRESHOLD}, with {value}."
1✔
185
                log_msg += text_block_oemof
1✔
186
                logging.warning(log_msg)
1✔
187
        # Value is above 0 but below threshold, should be rounded
188
        elif value < THRESHOLD:
1✔
189
            logging.debug(
1✔
190
                f"The positive value {value} is below the {THRESHOLD}, and rounded to 0."
191
            )
192
            value = 0
1✔
193

194
    return value
1✔
195

196

197
def get_timeseries_per_bus(dict_values, bus_data):
1✔
198
    r"""
199
    Reads simulation results of all busses and stores time series.
200

201
    Parameters
202
    ----------
203
    dict_values : dict
204
        Contains all input data of the simulation.
205
    bus_data : dict Contains information about all busses in a nested dict.
206

207
        1st level keys: bus names;
208
        2nd level keys:
209

210
            'scalars': (pd.Series) (does not exist in all dicts)
211
            'sequences': (pd.DataFrame) - contains flows between components and busses
212

213
    Notes
214
    -----
215
    Tested with:
216
    - test_get_timeseries_per_bus_two_timeseries_for_directly_connected_storage()
217

218
    #Todo: This is a duplicate of the `E1.get_flow()` assertions, and thus `E1.cut_below_micro` is applied twice for each flow. This should rather be merged into the other functions.
219

220
    Returns
221
    -------
222
    Indirectly updated `dict_values` with 'optimizedFlows' - one data frame for each bus.
223

224
    """
225
    logging.debug(
1✔
226
        "Time series for plots and 'timeseries.xlsx' are added to `dict_values[OPTIMIZED_FLOWS]` in `E1.get_timeseries_per_bus`; check there in case of problems."
227
    )
228
    bus_data_timeseries = {}
1✔
229
    for bus in bus_data.keys():
1✔
230
        bus_data_timeseries.update(
1✔
231
            {bus: pd.DataFrame(index=dict_values[SIMULATION_SETTINGS][TIME_INDEX])}
232
        )
233

234
        # obtain flows that flow into the bus
235
        to_bus = {
1✔
236
            key[0][0]: key
237
            for key in bus_data[bus][OEMOF_SEQUENCES].keys()
238
            if key[0][1] == bus and key[1] == OEMOF_FLOW
239
        }
240
        for asset in to_bus:
1✔
241
            flow = bus_data[bus][OEMOF_SEQUENCES][to_bus[asset]]
1✔
242
            flow = cut_below_micro(flow, bus + "/" + asset)
1✔
243
            bus_data_timeseries[bus][asset] = flow
1✔
244
        # obtain flows that flow out of the bus
245
        from_bus = {
1✔
246
            key[0][1]: key
247
            for key in bus_data[bus][OEMOF_SEQUENCES].keys()
248
            if key[0][0] == bus and key[1] == OEMOF_FLOW
249
        }
250
        for asset in from_bus:
1✔
251
            try:
1✔
252
                # if `asset` already exists add input/output power to column name
253
                # (occurs for storages that are directly added to a bus)
254
                bus_data_timeseries[bus][asset]
1✔
255
                # asset is already in bus_data_timeseries[bus]. Therefore a renaming is necessary:
256
                bus_data_timeseries[bus].rename(
1✔
257
                    columns={asset: " ".join([asset, OUTPUT_POWER])}, inplace=True
258
                )
259
                # Now the "from_bus" ie. the charging/input power of the storage asset is added to the data set:
260
                bus_data_timeseries[bus][" ".join([asset, INPUT_POWER])] = -bus_data[
1✔
261
                    bus
262
                ][OEMOF_SEQUENCES][from_bus[asset]]
263
            except KeyError:
1✔
264
                # The asset was not previously added to the `OPTIMIZED_FLOWS`, ie. is not a storage asset
265
                bus_data_timeseries[bus][asset] = -bus_data[bus][OEMOF_SEQUENCES][
1✔
266
                    from_bus[asset]
267
                ]
268

269
    dict_values.update({OPTIMIZED_FLOWS: bus_data_timeseries})
1✔
270

271

272
def get_storage_results(settings, storage_bus, dict_asset):
1✔
273
    r"""
274
    Reads storage results of simulation and stores them in `dict_asset`.
275

276
    Parameters
277
    ----------
278
    settings : dict
279
        Contains simulation settings from `simulation_settings.csv` with
280
        additional information like the amount of time steps simulated in the
281
        optimization ('periods').
282
    storage_bus : dict
283
        Contains information about the storage bus. Information about the scalars
284
        like investment or initial capacity in key 'scalars' (pd.Series) and the
285
        flows between the component and the busses in key 'sequences' (pd.DataFrame).
286
    dict_asset : dict
287
        Contains information about the storage like capacity, charging power, etc.
288

289
    Returns
290
    -------
291
    Indirectly updates `dict_asset` with simulation results concerning the
292
    storage.
293

294
    """
295
    power_charge = storage_bus[OEMOF_SEQUENCES][
1✔
296
        ((dict_asset[INFLOW_DIRECTION], dict_asset[LABEL]), OEMOF_FLOW)
297
    ]
298
    power_charge = cut_below_micro(power_charge, dict_asset[LABEL] + " charge flow")
1✔
299
    add_info_flows(
1✔
300
        evaluated_period=settings[EVALUATED_PERIOD][VALUE],
301
        dict_asset=dict_asset[INPUT_POWER],
302
        flow=power_charge,
303
    )
304

305
    power_discharge = storage_bus[OEMOF_SEQUENCES][
1✔
306
        ((dict_asset[LABEL], dict_asset[OUTFLOW_DIRECTION]), OEMOF_FLOW)
307
    ]
308
    power_discharge = cut_below_micro(
1✔
309
        power_discharge, dict_asset[LABEL] + " discharge flow"
310
    )
311

312
    add_info_flows(
1✔
313
        evaluated_period=settings[EVALUATED_PERIOD][VALUE],
314
        dict_asset=dict_asset[OUTPUT_POWER],
315
        flow=power_discharge,
316
    )
317

318
    storage_capacity = storage_bus[OEMOF_SEQUENCES][
1✔
319
        ((dict_asset[LABEL], TYPE_NONE), OEMOF_STORAGE_CONTENT)
320
    ]
321
    storage_capacity = cut_below_micro(
1✔
322
        storage_capacity, dict_asset[LABEL] + " " + STORAGE_CAPACITY
323
    )
324

325
    add_info_flows(
1✔
326
        evaluated_period=settings[EVALUATED_PERIOD][VALUE],
327
        dict_asset=dict_asset[STORAGE_CAPACITY],
328
        flow=storage_capacity,
329
        type=STORAGE_CAPACITY,
330
    )
331

332
    if OPTIMIZE_CAP in dict_asset:
1✔
333
        if dict_asset[OPTIMIZE_CAP][VALUE] is True:
1✔
334
            power_charge = storage_bus[OEMOF_SCALARS][
1✔
335
                ((dict_asset[INFLOW_DIRECTION], dict_asset[LABEL]), OEMOF_INVEST)
336
            ]
337
            dict_asset[INPUT_POWER].update(
1✔
338
                {
339
                    OPTIMIZED_ADD_CAP: {
340
                        VALUE: power_charge,
341
                        UNIT: dict_asset[INPUT_POWER][UNIT],
342
                    }
343
                }
344
            )
345
            logging.debug(
1✔
346
                "Accessed optimized capacity of asset %s: %s",
347
                dict_asset[INPUT_POWER][LABEL],
348
                power_charge,
349
            )
350

351
            power_discharge = storage_bus[OEMOF_SCALARS][
1✔
352
                ((dict_asset[LABEL], dict_asset[OUTFLOW_DIRECTION]), OEMOF_INVEST)
353
            ]
354
            dict_asset[OUTPUT_POWER].update(
1✔
355
                {
356
                    OPTIMIZED_ADD_CAP: {
357
                        VALUE: power_discharge,
358
                        UNIT: dict_asset[OUTPUT_POWER][UNIT],
359
                    }
360
                }
361
            )
362
            logging.debug(
1✔
363
                "Accessed optimized capacity of asset %s: %s",
364
                dict_asset[OUTPUT_POWER][LABEL],
365
                power_discharge,
366
            )
367

368
            storage_capacity = storage_bus[OEMOF_SCALARS][
1✔
369
                ((dict_asset[LABEL], TYPE_NONE), OEMOF_INVEST)
370
            ]
371
            dict_asset[STORAGE_CAPACITY].update(
1✔
372
                {
373
                    OPTIMIZED_ADD_CAP: {
374
                        VALUE: storage_capacity,
375
                        UNIT: dict_asset[STORAGE_CAPACITY][UNIT],
376
                    }
377
                }
378
            )
379
            logging.debug(
1✔
380
                "Accessed optimized capacity of asset %s: %s",
381
                dict_asset[STORAGE_CAPACITY][LABEL],
382
                storage_capacity,
383
            )
384

385
        else:
386
            dict_asset[INPUT_POWER].update(
1✔
387
                {
388
                    OPTIMIZED_ADD_CAP: {
389
                        VALUE: 0,
390
                        UNIT: dict_asset[STORAGE_CAPACITY][UNIT],
391
                    }
392
                }
393
            )
394
            dict_asset[OUTPUT_POWER].update(
1✔
395
                {
396
                    OPTIMIZED_ADD_CAP: {
397
                        VALUE: 0,
398
                        UNIT: dict_asset[STORAGE_CAPACITY][UNIT],
399
                    }
400
                }
401
            )
402
            dict_asset[STORAGE_CAPACITY].update(
1✔
403
                {
404
                    OPTIMIZED_ADD_CAP: {
405
                        VALUE: 0,
406
                        UNIT: dict_asset[STORAGE_CAPACITY][UNIT],
407
                    }
408
                }
409
            )
410

411
    get_state_of_charge_info(dict_asset)
1✔
412

413

414
def get_state_of_charge_info(dict_asset):
1✔
415
    r"""
416
    Adds state of charge timeseries and average value of the timeseries to the storage dict.
417

418
    Parameters
419
    ----------
420
    dict_asset: dict
421
        Dict of the asset, specifically including the STORAGE_CAPACITY
422

423
    Returns
424
    -------
425
    Updated dict_asset
426

427
    Notes
428
    -----
429

430
    Tested with:
431
    - E1.test_get_state_of_charge_info()
432
    """
433
    timeseries_soc = dict_asset[STORAGE_CAPACITY][FLOW] / (
1✔
434
        dict_asset[STORAGE_CAPACITY][INSTALLED_CAP][VALUE]
435
        + dict_asset[STORAGE_CAPACITY][OPTIMIZED_ADD_CAP][VALUE]
436
    )
437
    dict_asset.update(
1✔
438
        {
439
            TIMESERIES_SOC: timeseries_soc,
440
            AVERAGE_SOC: {VALUE: timeseries_soc.mean(), UNIT: "factor"},
441
        }
442
    )
443

444

445
def get_results(settings, bus_data, dict_asset, asset_group):
1✔
446
    r"""
447
    Reads results of the asset defined in `dict_asset` and stores them in `dict_asset`.
448

449
    Parameters
450
    ----------
451
    settings : dict
452
        Contains simulation settings from `simulation_settings.csv` with
453
        additional information like the amount of time steps simulated in the
454
        optimization ('periods').
455

456
    bus_data : dict
457
        Contains information about all busses in a nested dict.
458
        1st level keys: bus names;
459
        2nd level keys:
460

461
            'scalars': (pd.Series) (does not exist in all dicts)
462
            'sequences': (pd.DataFrame) - contains flows between components and busses
463

464
    dict_asset : dict
465
        Contains information about the asset.
466

467
    asset_group: str
468
       Asset group to which the evaluated asset belongs
469

470
    Returns
471
    -------
472
    Indirectly updates `dict_asset` with results.
473

474
    """
475
    # Get which parameter/bus needs to be evaluated
476
    parameter_to_be_evaluated = get_parameter_to_be_evaluated_from_oemof_results(
1✔
477
        asset_group, dict_asset[LABEL]
478
    )
479

480
    # Check if the parameter/bus is defined for dict_asset
481
    if parameter_to_be_evaluated not in dict_asset:
1✔
482
        logging.warning(
×
483
            f"The asset {dict_asset[LCOE_ASSET]} of group {asset_group} should contain parameter {parameter_to_be_evaluated}, but it does not."
484
        )
485

486
    # Determine bus that needs to be evaluated
487
    bus_name = dict_asset[parameter_to_be_evaluated]
1✔
488

489
    # Determine flows of the asset, also if flows are connected to multiple busses
490
    if not isinstance(bus_name, list):
1✔
491
        flow_tuple = get_tuple_for_oemof_results(
1✔
492
            dict_asset[LABEL], asset_group, bus_name
493
        )
494

495
        # Get flow information
496
        get_flow(
1✔
497
            settings=settings,
498
            bus=bus_data[bus_name],
499
            dict_asset=dict_asset,
500
            flow_tuple=flow_tuple,
501
        )
502
        # Get capacity information
503
        get_optimal_cap(bus_data[bus_name], dict_asset, flow_tuple)
1✔
504

505
    else:
506
        # Asset is connected to multiple busses, evaluate all
507
        for bus_instance in bus_name:
×
508
            flow_tuple = get_tuple_for_oemof_results(
×
509
                dict_asset[LABEL], asset_group, bus_instance
510
            )
511
            get_flow(
×
512
                settings=settings,
513
                bus=bus_data[bus_instance],
514
                dict_asset=dict_asset,
515
                flow_tuple=flow_tuple,
516
                multi_bus=bus_instance,
517
            )
518
            # Get capacity information
519
            get_optimal_cap(bus_data[bus_instance], dict_asset, flow_tuple)
×
520

521
        # For assets with multiple output busses
522
        if parameter_to_be_evaluated == OUTFLOW_DIRECTION:
×
523
            cumulative_flow = 0
×
524
            for bus_instance in bus_name:
×
525
                cumulative_flow += dict_asset[FLOW][bus_instance]
×
526
            dict_asset[PEAK_FLOW][VALUE] = max(cumulative_flow)
×
527
            dict_asset[PEAK_FLOW][VALUE] = cumulative_flow.mean()
×
528
        elif parameter_to_be_evaluated == INFLOW_DIRECTION:
×
529
            logging.error(
×
530
                "The result processing of asset with multiple inputs might not be done correctly"
531
            )
532

533

534
def get_parameter_to_be_evaluated_from_oemof_results(asset_group, asset_label):
1✔
535
    r"""
536
    Determine the parameter that needs to be evaluated to determine an asset`s optimized flow and capacity.
537

538
    Parameters
539
    ----------
540
    asset_group: str
541
        Asset group to which the evaluated asset belongs
542

543
    asset_label: str
544
        Label of the asset, needed for log message
545

546
    Returns
547
    -------
548
    parameter_to_be_evaluated: str
549
        Parameter that will be processed to get the dispatch and capacity of an asset
550

551
    Notes
552
    -----
553
    Tested by:
554
    - test_get_parameter_to_be_evaluated_from_oemof_results()
555
    """
556
    if asset_group in ASSET_GROUPS_DEFINED_BY_INFLUX:
1✔
557
        parameter_to_be_evaluated = INFLOW_DIRECTION
1✔
558

559
    elif asset_group in ASSET_GROUPS_DEFINED_BY_OUTFLUX:
1✔
560
        parameter_to_be_evaluated = OUTFLOW_DIRECTION
1✔
561

562
    else:
563
        logging.warning(
×
564
            f"The asset {asset_label} is of group {asset_group}, which is not defined in E1.get_results()."
565
        )
566

567
    return parameter_to_be_evaluated
1✔
568

569

570
def get_tuple_for_oemof_results(asset_label, asset_group, bus):
1✔
571
    r"""
572
    Determines the tuple with which to access the oemof-solph results
573

574
    The order of the parameters in the tuple depends on the direction of the flow.
575
    If the asset is defined...
576
    a) ...by its influx from a bus, the bus has to be named first in the touple
577
    b) ...by its outflux into a bus, the asset has to be named first in the touple
578

579
    Parameters
580
    ----------
581
    asset_label: str
582
        Name of the asset
583

584
    asset_group: str
585
        Asset group the asset belongs to
586

587
    bus: str
588
        Bus that is to be accessed for the asset´s information
589

590
    Returns
591
    -------
592
    flow_tuple: tuple of str
593
        Keys to be accessed in the oemof-solph results
594

595
    Notes
596
    -----
597
    Tested with
598
    - test_get_tuple_for_oemof_results()
599
    """
600
    # Determine which flux is evaluated for the flow
601
    if asset_group in ASSET_GROUPS_DEFINED_BY_INFLUX:
1✔
602
        flow_tuple = (bus, asset_label)
1✔
603
    elif asset_group in ASSET_GROUPS_DEFINED_BY_OUTFLUX:
1✔
604
        flow_tuple = (asset_label, bus)
1✔
605
    else:
606
        logging.warning(
×
607
            f"The asset {asset_label} is of group {asset_group}, but it is not defined in E1.get_results() which flux is to be evaluated."
608
        )
609

610
    return flow_tuple
1✔
611

612

613
def get_optimal_cap(bus, dict_asset, flow_tuple):
1✔
614
    r"""
615
    Retrieves optimized capacity of asset specified in `dict_asset`.
616

617
    Parameters
618
    ----------
619
    bus : dict
620
        Contains information about the busses linked to the asset specified in
621
        `dict_asset`. Information about the scalars like investment or initial
622
        capacity in key 'scalars' (pd.Series) and the flows between the
623
        component and the busses in key 'sequences' (pd.DataFrame).
624

625
    dict_asset : dict
626
        Contains information about the asset.
627

628
    flow_tuple : tuple
629
        Key of the oemof-solph outputs dict mapping the value to be evaluated
630

631
    Returns
632
    -------
633
    Indirectly updated `dict_asset` with optimal capacity to be added
634
    ('optimizedAddCap').
635

636
    TODOS
637
    ^^^^^
638
    * direction as optimal parameter or with default value None (direction is
639
        not needed if 'optimizeCap' is not in `dict_asset` or if it's value is False
640

641
    """
642
    if OPTIMIZE_CAP in dict_asset:
1✔
643
        if (
1✔
644
            dict_asset[OPTIMIZE_CAP][VALUE] is True
645
            and (flow_tuple, OEMOF_INVEST) in bus[OEMOF_SCALARS]
646
        ):
647
            optimal_capacity = bus[OEMOF_SCALARS][(flow_tuple, OEMOF_INVEST)]
1✔
648
            optimal_capacity = cut_below_micro(optimal_capacity, dict_asset[LABEL])
1✔
649
            if TIMESERIES_PEAK in dict_asset:
1✔
650
                if dict_asset[TIMESERIES_PEAK][VALUE] > 0:
1✔
651
                    dict_asset.update(
1✔
652
                        {
653
                            OPTIMIZED_ADD_CAP: {
654
                                VALUE: optimal_capacity
655
                                / dict_asset[TIMESERIES_PEAK][VALUE],
656
                                UNIT: dict_asset[UNIT],
657
                            }
658
                        }
659
                    )
660
                else:
661
                    logging.warning(
×
662
                        "Time series peak of asset %s negative or zero! Check timeseries. "
663
                        "No optimized capacity derived.",
664
                        dict_asset[LABEL],
665
                    )
666
                    pass
×
667
            else:
668
                dict_asset.update(
1✔
669
                    {
670
                        OPTIMIZED_ADD_CAP: {
671
                            VALUE: optimal_capacity,
672
                            UNIT: dict_asset[UNIT],
673
                        }
674
                    }
675
                )
676
            logging.debug(
1✔
677
                "Accessed optimized capacity of asset %s: %s",
678
                dict_asset[LABEL],
679
                optimal_capacity,
680
            )
681
        else:
682
            # only set a default optimized add cap value if the key does not exist already
683
            # this prevent erasing the value in case of multiple in/output busses
684
            if OPTIMIZED_ADD_CAP not in dict_asset:
1✔
685
                dict_asset.update(
1✔
686
                    {OPTIMIZED_ADD_CAP: {VALUE: 0, UNIT: dict_asset[UNIT]}}
687
                )
688

689

690
def get_flow(settings, bus, dict_asset, flow_tuple, multi_bus=None):
1✔
691
    r"""
692
    Adds flow of `bus` and total flow amongst other information to `dict_asset`.
693

694
    Depending on `direction` the input or the output flow is used.
695

696
    Parameters
697
    ----------
698
    settings : dict
699
        Contains simulation settings from `simulation_settings.csv` with
700
        additional information like the amount of time steps simulated in the
701
        optimization ('periods').
702

703
    bus : dict
704
        Contains information about a specific bus. Information about the scalars, if they exist,
705
            like investment or initial capacity in key 'scalars' (pd.Series) and the
706
            flows between the component and the bus(ses) in key 'sequences' (pd.DataFrame).
707

708
    dict_asset : dict
709
        Contains information about the asset.
710

711
    flow_tuple : tuple
712
        Entry of the oemof-solph outputs to be evaluated
713

714
    multi_bus: str or None
715
        The name of the current bus (for asset connected to more than one bus)
716

717
    Returns
718
    -------
719
    Indirectly updates `dict_asset` with the flow of `bus`, the total flow, the annual
720
    total flow, the maximum of the flow ('peak_flow') and the average value of
721
    the flow ('average_flow').
722

723
    """
724
    flow = bus[OEMOF_SEQUENCES][(flow_tuple, OEMOF_FLOW)]
1✔
725
    flow = cut_below_micro(flow, dict_asset[LABEL] + FLOW)
1✔
726
    add_info_flows(
1✔
727
        evaluated_period=settings[EVALUATED_PERIOD][VALUE],
728
        dict_asset=dict_asset,
729
        flow=flow,
730
        bus_name=multi_bus,
731
    )
732
    if multi_bus is None:
1✔
733
        total_flow = dict_asset[TOTAL_FLOW][VALUE]
1✔
734
        bus_info = ""
1✔
735
    else:
736
        total_flow = dict_asset[TOTAL_FLOW][multi_bus]
×
737
        bus_info = f" for bus '{multi_bus}'"
×
738

739
    logging.debug(
1✔
740
        "Accessed simulated timeseries of asset %s (total sum: %s)%s",
741
        dict_asset[LABEL],
742
        round(total_flow),
743
        bus_info,
744
    )
745

746

747
def add_info_flows(evaluated_period, dict_asset, flow, type=None, bus_name=None):
1✔
748
    r"""
749
    Adds `flow` and total flow amongst other information to `dict_asset`.
750

751
    Parameters
752
    ----------
753
    evaluated_period : int
754
        The number of days simulated with the energy system model.
755
    dict_asset : dict
756
        Contains information about the asset `flow` belongs to.
757
    flow : pd.Series
758
        Time series of the flow.
759
    type: str, default: None
760
        type of the flow, only exception is "STORAGE_CAPACITY".
761
    bus_name: str or None
762
        The name of the current bus (for asset connected to more than one bus)
763

764
    Returns
765
    -------
766
    Indirectly updates `dict_asset` with the `flow`, the total flow, the annual
767
    total flow, the maximum of the flow ('peak_flow') and the average value of
768
    the flow ('average_flow'). As Storage capacity is not a flow, an aggregation of the timeseries does not make sense
769
    and the parameters TOTAL_FLOW, ANNUAL_TOTAL_FLOW, PEAK_FLOW, AVERAGE_FLOW are added set to None.
770

771
    Notes
772
    -----
773

774
    Tested with:
775
    - E1.test_add_info_flows_365_days()
776
    - E1.test_add_info_flows_1_day()
777
    - E1.test_add_info_flows_storage_capacity()
778
    """
779
    total_flow = sum(flow)
1✔
780
    # import ipdb;ipdb.set_trace()
781
    if bus_name is None:
1✔
782
        dict_asset.update({FLOW: flow})
1✔
783
    else:
784
        if FLOW not in dict_asset:
×
785
            dict_asset.update({FLOW: {bus_name: flow}})
×
786
        else:
787
            dict_asset[FLOW][bus_name] = flow
×
788

789
    if type == STORAGE_CAPACITY:
1✔
790
        # The oemof-solph "flow" connected to the storage capacity describes the energy stored in the storage asset, not the actual flow. As such, the below parameters are non-sensical, especially TOTAL_FLOW and ANNUAL_TOTAL_FLOW. PEAK_FLOW and AVERAGE_FLOW are, as a consequence, also not captured. Instead, the AVERAGE_SOC is calculated in a later processing step.
791
        for parameter in [TOTAL_FLOW, ANNUAL_TOTAL_FLOW, PEAK_FLOW, AVERAGE_FLOW]:
1✔
792
            dict_asset.update({parameter: {VALUE: None, UNIT: "NaN"}})
1✔
793

794
    else:
795
        if bus_name is None:
1✔
796
            dict_asset.update(
1✔
797
                {
798
                    TOTAL_FLOW: {VALUE: total_flow, UNIT: "kWh"},
799
                    ANNUAL_TOTAL_FLOW: {
800
                        VALUE: total_flow * 365 / evaluated_period,
801
                        UNIT: "kWh",
802
                    },
803
                    PEAK_FLOW: {VALUE: max(flow), UNIT: "kW"},
804
                    AVERAGE_FLOW: {VALUE: flow.mean(), UNIT: "kW"},
805
                }
806
            )
807

808
        else:
809
            if TOTAL_FLOW not in dict_asset:
×
810
                dict_asset.update(
×
811
                    {TOTAL_FLOW: {bus_name: total_flow, VALUE: total_flow, UNIT: "kWh"}}
812
                )
813
            else:
814
                dict_asset[TOTAL_FLOW][bus_name] = total_flow
×
815
                dict_asset[TOTAL_FLOW][VALUE] += total_flow * 365 / evaluated_period
×
816

817
            if ANNUAL_TOTAL_FLOW not in dict_asset:
×
818
                dict_asset.update(
×
819
                    {
820
                        ANNUAL_TOTAL_FLOW: {
821
                            bus_name: total_flow * 365 / evaluated_period,
822
                            VALUE: total_flow * 365 / evaluated_period,
823
                            UNIT: "kWh",
824
                        }
825
                    }
826
                )
827
            else:
828
                dict_asset[ANNUAL_TOTAL_FLOW][bus_name] = (
×
829
                    total_flow * 365 / evaluated_period
830
                )
831
                dict_asset[ANNUAL_TOTAL_FLOW][VALUE] += (
×
832
                    total_flow * 365 / evaluated_period
833
                )
834

835
            if PEAK_FLOW not in dict_asset:
×
836
                dict_asset.update({PEAK_FLOW: {bus_name: max(flow), UNIT: "kW"}})
×
837
            else:
838
                dict_asset[PEAK_FLOW][bus_name] = max(flow)
×
839

840
            if AVERAGE_FLOW not in dict_asset:
×
841
                dict_asset.update({AVERAGE_FLOW: {bus_name: flow.mean(), UNIT: "kW"}})
×
842
            else:
843
                dict_asset[AVERAGE_FLOW][bus_name] = flow.mean()
×
844

845

846
def convert_demand_to_dataframe(dict_values, sector_demands=None):
1✔
847
    """Dataframe used for the demands table of the report
848

849
    Parameters
850
    ----------
851
    dict_values: dict
852
        output values of MVS
853

854
    sector_demands: str
855
        Name of the sector of the energy system whose demands must be returned as a df by this function
856
        Default: None
857

858
    Returns
859
    -------
860
    :class:`pandas.DataFrame<frame>`
861

862
    """
863

864
    # Make a dict which is a sub-dict of the JSON results file with only the consumption components of the energy system
865
    demands = copy.deepcopy(dict_values[ENERGY_CONSUMPTION])
×
866

867
    # The following block removes all the non-current sectoral demands from the demands dict
868
    if sector_demands is not None:
×
869
        non_sec_demands = []
×
870
        # Loop through the demands checking if there are keys which do not belong to the current sector
871
        for demand_key in demands.keys():
×
872
            if demands[demand_key][ENERGY_VECTOR] != (sector_demands.title()):
×
873
                non_sec_demands.append(demand_key)
×
874
        # Drop the non-sectoral demands from the demands dict
875
        for demand_to_drop in non_sec_demands:
×
876
            del demands[demand_to_drop]
×
877

878
    # Removing all the keys that do not represent actual demands
879
    drop_list = []
×
880

881
    # Loop though the demands identifying irrelevant demands
882
    for column_label in demands:
×
883
        # Identifies excess sink in demands for removal
884
        if EXCESS_SINK in column_label:
×
885
            drop_list.append(column_label)
×
886
        # Identifies DSO_feedin sink in demands for removal
887
        elif DSO_FEEDIN in column_label:
×
888
            drop_list.append(column_label)
×
889

890
    # Remove some elements from drop_list (ie. sinks that are not demands) from data
891
    for item in drop_list:
×
892
        del demands[item]
×
893

894
    # Create empty dict to hold the current-sector demands' data
895
    demand_data = {}
×
896

897
    # Populate the above dict with data for each of the demand in the current sector
898
    for dem in list(demands.keys()):
×
899
        demand_data.update(
×
900
            {
901
                dem: [
902
                    demands[dem][UNIT],
903
                    demands[dem][ENERGY_VECTOR],
904
                    demands[dem][TIMESERIES_PEAK][VALUE],
905
                    demands[dem][TIMESERIES_AVERAGE][VALUE],
906
                    demands[dem][TIMESERIES_TOTAL][VALUE],
907
                ]
908
            }
909
        )
910
    # Creating a dataframe with all of the demands from the above dict
911
    df_dem = pd.DataFrame.from_dict(
×
912
        demand_data,
913
        orient="index",
914
        columns=[
915
            UNIT,
916
            "Type of Demand",
917
            "Peak Demand",
918
            "Mean Demand",
919
            "Total Annual Demand",
920
        ],
921
    )
922

923
    # Operations on the index of the dataframe created above
924
    df_dem.index.name = "Demands"
×
925
    df_dem = df_dem.reset_index()
×
926
    df_dem = df_dem.round(2)
×
927

928
    return df_dem
×
929

930

931
def convert_components_to_dataframe(dict_values):
1✔
932
    """Dataframe used for the component table of the report
933

934
    Parameters
935
    ----------
936
    dict_values: dict
937
        output values of MVS
938

939
    Returns
940
    -------
941
    :class:`pandas.DataFrame<frame>`
942

943
    Notes
944
    -----
945

946
    Tested with:
947
        - test_E1_process_results.test_convert_components_to_dataframe()
948
    """
949

950
    # Read the subdicts energyProduction, energyConversion and energyStorage as separate dicts
951
    dict_energy_production = dict_values[ENERGY_PRODUCTION]
1✔
952
    dict_energy_conversion = dict_values[ENERGY_CONVERSION]
1✔
953
    dict_energy_storage = dict_values[ENERGY_STORAGE]
1✔
954

955
    # Read the keys of the above dicts into separate lists
956
    keys_production = list(dict_energy_production.keys())
1✔
957
    keys_conversion = list(dict_energy_conversion.keys())
1✔
958
    keys_storage = list(dict_energy_storage.keys())
1✔
959

960
    # Dict to hold the data for creating a pandas dataframe
961
    components = {}
1✔
962

963
    # Energy production and conversion have the same tree structure, can be processed together:
964

965
    # Add the above dictionaries and lists of keys into new lists for iterating through, later
966
    comp_dict_list = [dict_energy_production, dict_energy_conversion]
1✔
967
    components_list = [keys_production, keys_conversion]
1✔
968

969
    # Defining the columns of the table and filling them up with the appropriate data
970
    for (component_key, comp_dict) in zip(components_list, comp_dict_list):
1✔
971
        for comps in component_key:
1✔
972
            # Define whether optimization takes place
973
            optimize = translate_optimizeCap_from_boolean_to_yes_no(
1✔
974
                comp_dict[comps][OPTIMIZE_CAP][VALUE]
975
            )
976
            components.update(
1✔
977
                {
978
                    comps: [
979
                        comp_dict[comps][OEMOF_ASSET_TYPE],
980
                        comp_dict[comps][ENERGY_VECTOR],
981
                        comp_dict[comps][UNIT],
982
                        comp_dict[comps][INSTALLED_CAP][VALUE],
983
                        optimize,
984
                    ]
985
                }
986
            )
987

988
    # Energy storage assets have different structure, added individually
989
    for storage_component in keys_storage:
1✔
990
        for sub_stor_comp in [INPUT_POWER, STORAGE_CAPACITY, OUTPUT_POWER]:
1✔
991
            comp_label = dict_energy_storage[storage_component][sub_stor_comp][LABEL]
1✔
992
            # Define whether optimization takes place
993
            # Currently, storage optimization setting applies to all sub-categories.
994
            # Can be re-used when storage asset sub-components can be optimized individually:
995
            # dict_energy_storage[storage_component][sub_stor_comp][OPTIMIZE_CAP][VALUE]
996
            optimize = translate_optimizeCap_from_boolean_to_yes_no(
1✔
997
                dict_energy_storage[storage_component][OPTIMIZE_CAP][VALUE]
998
            )
999
            components.update(
1✔
1000
                {
1001
                    comp_label: [
1002
                        dict_energy_storage[storage_component][OEMOF_ASSET_TYPE],
1003
                        dict_energy_storage[storage_component][ENERGY_VECTOR],
1004
                        dict_energy_storage[storage_component][sub_stor_comp][
1005
                            INSTALLED_CAP
1006
                        ][UNIT],
1007
                        dict_energy_storage[storage_component][sub_stor_comp][
1008
                            INSTALLED_CAP
1009
                        ][VALUE],
1010
                        optimize,
1011
                    ]
1012
                }
1013
            )
1014

1015
    # Create a pandas dataframe from the dictionary created above
1016
    df_comp = pd.DataFrame.from_dict(
1✔
1017
        components,
1018
        orient="index",
1019
        columns=[
1020
            "Type of Component",
1021
            "Energy Vector",
1022
            UNIT,
1023
            "Installed Capacity",
1024
            "Capacity optimization",
1025
        ],
1026
    )
1027
    df_comp.index.name = "Component"
1✔
1028
    df_comp = df_comp.reset_index()
1✔
1029
    return df_comp
1✔
1030

1031

1032
def translate_optimizeCap_from_boolean_to_yes_no(optimize_cap):
1✔
1033
    r"""
1034
    Translates the boolean OPTIMIZE_CAP to a yes-no value for readability of auto report
1035

1036
    Parameters
1037
    ----------
1038
    optimize_cap: bool
1039
        Setting whether asset is optimized or not
1040

1041
    Returns
1042
    -------
1043
    optimize: str
1044
        If OPTIMIZE_CAP==True: "Yes", else "No".
1045

1046
    Notes
1047
    -----
1048
    Tested with:
1049
    - test_E1_process_results.test_translate_optimizeCap_from_boolean_to_yes_no()
1050
    """
1051
    if optimize_cap is True:
1✔
1052
        optimize = "Yes"
1✔
1053
    else:
1054
        optimize = "No"
1✔
1055
    return optimize
1✔
1056

1057

1058
def convert_scalar_matrix_to_dataframe(dict_values):
1✔
1059
    """Dataframe used for the scalar matrix table of the report
1060

1061
    Parameters
1062
    ----------
1063
    dict_values: dict
1064
        output values of MVS
1065

1066
    Returns
1067
    -------
1068
    :class:`pandas.DataFrame<frame>`
1069

1070
    """
1071

1072
    # Read in the scalar matrix as pandas dataframe
1073
    df_scalar_matrix = dict_values[KPI][KPI_SCALAR_MATRIX]
×
1074

1075
    # Changing the index to a sequence of 0,1,2...
1076
    df_scalar_matrix = df_scalar_matrix.reset_index()
×
1077

1078
    # Dropping irrelevant columns from the dataframe
1079
    df_scalar_matrix = df_scalar_matrix.drop(
×
1080
        ["index", TOTAL_FLOW, PEAK_FLOW, AVERAGE_FLOW], axis=1
1081
    )
1082

1083
    # Renaming the columns
1084
    df_scalar_matrix = df_scalar_matrix.rename(
×
1085
        columns={
1086
            LABEL: "Component/Parameter",
1087
            OPTIMIZED_ADD_CAP: "CAP",
1088
            ANNUAL_TOTAL_FLOW: "Aggregated Flow",
1089
        }
1090
    )
1091
    # Rounding the numeric values to two significant digits
1092
    df_scalar_matrix = df_scalar_matrix.round(2)
×
1093

1094
    return df_scalar_matrix
×
1095

1096

1097
def convert_cost_matrix_to_dataframe(dict_values):
1✔
1098
    """Dataframe used for the cost matrix table of the report
1099

1100
    Parameters
1101
    ----------
1102
    dict_values: dict
1103
        output values of MVS
1104

1105
    Returns
1106
    -------
1107
    :class:`pandas.DataFrame<frame>`
1108

1109
    """
1110

1111
    # Read in the cost matrix as a pandas dataframe
1112
    df_cost_matrix = dict_values[KPI][KPI_COST_MATRIX]
×
1113

1114
    # Changing the index to a sequence of 0,1,2...
1115
    df_cost_matrix = df_cost_matrix.reset_index()
×
1116

1117
    # Drop some irrelevant columns from the dataframe
1118
    df_cost_matrix = df_cost_matrix.drop(
×
1119
        ["index", COST_OPERATIONAL_TOTAL, COST_INVESTMENT, COST_DISPATCH, COST_OM],
1120
        axis=1,
1121
    )
1122

1123
    # Rename some of the column names
1124
    df_cost_matrix = df_cost_matrix.rename(
×
1125
        columns={
1126
            LABEL: "Component",
1127
            COST_TOTAL: "Total costs",
1128
            COST_UPFRONT: "Upfront Investment Costs",
1129
        }
1130
    )
1131

1132
    # Round the numeric values to two significant digits
1133
    df_cost_matrix = df_cost_matrix.round(2)
×
1134
    return df_cost_matrix
×
1135

1136

1137
def convert_costs_to_dataframe(dict_values):
1✔
1138
    """Dataframe used for the costs piecharts of the report
1139

1140
    Parameters
1141
    ----------
1142
    dict_values: dict
1143
        output values of MVS
1144

1145
    Returns
1146
    -------
1147
    :class:`pandas.DataFrame<frame>`
1148

1149
    """
1150
    # Get the cost matrix from the results JSON file into a pandas DF
1151
    df_pie_plot = dict_values[KPI][KPI_COST_MATRIX]
×
1152

1153
    # List of the needed parameters
1154
    costs_needed = [LABEL, ANNUITY_TOTAL, COST_INVESTMENT, COST_OPERATIONAL_TOTAL]
×
1155

1156
    # Drop all the irrelevant columns
1157
    df_pie_plot = df_pie_plot[costs_needed]
×
1158

1159
    # Add a row with total of each column, except label
1160
    df_pie_plot = pd.concat([df_pie_plot, df_pie_plot.sum().to_frame().T])
×
1161

1162
    # Add a label for the row holding the sum of each column
1163
    df_pie_plot.iloc[-1, 0] = "Total"
×
1164

1165
    return df_pie_plot
×
1166

1167

1168
def convert_scalars_to_dataframe(dict_values):
1✔
1169
    """
1170
    Processes the scalar system-wide KPI so that they can be included in the report
1171

1172
    Parameters
1173
    ----------
1174
    dict_values: dict
1175
        output values of MVS
1176

1177
    Returns
1178
    -------
1179
    kpi_scalars_dataframe: :class:`pandas.DataFrame<frame>`
1180
        Dataframe to be displayed as a table in the report
1181

1182
    Notes
1183
    -----
1184
    Currently, as the KPI_SCALARS_DICT does not hold any units, the table printed in the report is unit-les.
1185
    """
1186

1187
    units_cost_kpi = get_units_of_cost_matrix_entries(
×
1188
        dict_values[ECONOMIC_DATA], dict_values[KPI][KPI_SCALARS_DICT]
1189
    )
1190

1191
    kpi_scalars_dataframe = pd.DataFrame(
×
1192
        dict_values[KPI][KPI_SCALARS_DICT], index=[VALUE]
1193
    )
1194
    kpi_names = kpi_scalars_dataframe.columns
×
1195
    kpi_scalars_dataframe = kpi_scalars_dataframe.transpose()
×
1196
    kpi_scalars_dataframe[KPI] = kpi_names
×
1197
    kpi_scalars_dataframe[UNIT] = units_cost_kpi
×
1198
    kpi_scalars_dataframe = kpi_scalars_dataframe[[KPI, UNIT, VALUE]]
×
1199

1200
    return kpi_scalars_dataframe
×
1201

1202

1203
def convert_kpi_sector_to_dataframe(dict_values):
1✔
1204
    """
1205
    Processes the sector KPIs so that they can be included in the report
1206

1207
    Parameters
1208
    ----------
1209
    dict_values: dict
1210
        output values of MVS
1211

1212
    Returns
1213
    -------
1214
    kpi_sectors_dataframe: :class:`pandas.DataFrame<frame>`
1215
        Dataframe to be displayed as a table in the report
1216

1217
    Notes
1218
    -----
1219
    Currently, as the KPI_UNCOUPLED_DICT does not hold any units, the table printed in the report is unit-les.
1220
    """
1221

1222
    if isinstance(dict_values[KPI][KPI_UNCOUPLED_DICT], dict):
×
1223
        kpi_sectors_dataframe = pd.DataFrame.from_dict(
×
1224
            dict_values[KPI][KPI_UNCOUPLED_DICT], orient="index"
1225
        )
1226
    else:
1227
        kpi_sectors_dataframe = dict_values[KPI][KPI_UNCOUPLED_DICT]
×
1228
    # Formats the kpi_sectors dataframe for nicer display
1229
    cols = list(kpi_sectors_dataframe.columns)
×
1230
    kpi_sectors_dataframe["KPI"] = kpi_sectors_dataframe.index  #
×
1231
    kpi_sectors_dataframe = kpi_sectors_dataframe[["KPI"] + cols]
×
1232

1233
    return kpi_sectors_dataframe
×
1234

1235

1236
def get_units_of_cost_matrix_entries(dict_economic, kpi_list):
1✔
1237
    """
1238
    Determines the units of the costs KPI to be stored to :class: DataFrame.
1239

1240
    Parameters
1241
    ----------
1242
    dict_economic:
1243
        Economic project data
1244

1245
    kpi_list:
1246
        List of cost matrix entries
1247

1248
    Returns
1249
    -------
1250
    unit_list: list
1251
        List of units for the :class: DataFrame to be created
1252
    """
1253

1254
    unit_list = []
1✔
1255
    kpi_cost_unit_dict = {
1✔
1256
        LABEL: None,
1257
        UNIT: None,
1258
        COST_TOTAL: dict_economic[CURR],
1259
        COST_OPERATIONAL_TOTAL: dict_economic[CURR],
1260
        COST_INVESTMENT: dict_economic[CURR],
1261
        COST_UPFRONT: dict_economic[CURR],
1262
        COST_DISPATCH: dict_economic[CURR],
1263
        COST_OM: dict_economic[CURR],
1264
        ANNUITY_TOTAL: dict_economic[CURR] + "/" + UNIT_YEAR,
1265
        ANNUITY_OM: dict_economic[CURR] + "/" + UNIT_YEAR,
1266
        LCOE_ASSET: dict_economic[CURR] + "/" + "energy carrier unit",
1267
    }
1268
    for key in kpi_list:
1✔
1269
        if key not in kpi_cost_unit_dict:
1✔
1270
            unit_list.append("NA")
1✔
1271
        else:
1272
            unit_list.append(kpi_cost_unit_dict[key])
1✔
1273
    return unit_list
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc