• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

oemof / oemof-solph / 20316415839

17 Dec 2025 08:32PM UTC coverage: 81.978% (+0.7%) from 81.235%
20316415839

Pull #1226

github

web-flow
Merge 27045bb72 into 0e48bc68b
Pull Request #1226: Examples timeseries retcon paper

955 of 1268 branches covered (75.32%)

Branch coverage included in aggregate %.

2784 of 3293 relevant lines covered (84.54%)

0.85 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

87.35
/src/oemof/solph/processing.py
1
# -*- coding: utf-8 -*-
2

3
"""Modules for providing a convenient data structure for solph results.
4

5
Information about the possible usage is provided within the examples.
6

7
SPDX-FileCopyrightText: Uwe Krien <krien@uni-bremen.de>
8
SPDX-FileCopyrightText: Simon Hilpert
9
SPDX-FileCopyrightText: Cord Kaldemeyer
10
SPDX-FileCopyrightText: Stephan Günther
11
SPDX-FileCopyrightText: henhuy
12
SPDX-FileCopyrightText: Johannes Kochems
13
SPDX-FileCopyrightText: Patrik Schönfeldt <patrik.schoenfeldt@dlr.de>
14

15
SPDX-License-Identifier: MIT
16

17
"""
18
import itertools
1✔
19
import numbers
1✔
20
import operator
1✔
21
import sys
1✔
22
from collections import abc
1✔
23
from itertools import groupby
1✔
24
from typing import Dict
1✔
25
from typing import Tuple
1✔
26

27
import numpy as np
1✔
28
import pandas as pd
1✔
29
from oemof.network.network import Entity
1✔
30
from pyomo.core.base.piecewise import IndexedPiecewise
1✔
31
from pyomo.core.base.var import Var
1✔
32

33
from oemof.solph.components._generic_storage import GenericStorage
1✔
34

35
from ._plumbing import _FakeSequence
1✔
36
from .helpers import flatten
1✔
37

38
PERIOD_INDEXES = ("invest", "total", "old", "old_end", "old_exo")
1✔
39

40

41
def get_tuple(x):
1✔
42
    """Get oemof tuple within iterable or create it
43

44
    Tuples from Pyomo are of type `(n, n, int)`, `(n, n)` and `(n, int)`.
45
    For single nodes `n` a tuple with one object `(n,)` is created.
46
    """
47
    for i in x:
1✔
48
        if isinstance(i, tuple):
1✔
49
            return i
1✔
50
        elif issubclass(type(i), Entity):
1!
51
            return (i,)
×
52

53
    # for standalone variables, x is used as identifying tuple
54
    if isinstance(x, tuple):
1!
55
        return x
1✔
56

57

58
def get_timestep(x):
1✔
59
    """Get the timestep from oemof tuples
60

61
    The timestep from tuples `(n, n, int)`, `(n, n)`, `(n, int)` and (n,)
62
    is fetched as the last element. For time-independent data (scalars)
63
    zero ist returned.
64
    """
65
    if all(issubclass(type(n), Entity) for n in x):
1!
66
        return 0
×
67
    else:
68
        return x[-1]
1✔
69

70

71
def remove_timestep(x):
1✔
72
    """Remove the timestep from oemof tuples
73

74
    The timestep is removed from tuples of type `(n, n, int)` and `(n, int)`.
75
    """
76
    if all(issubclass(type(n), Entity) for n in x):
1✔
77
        return x
1✔
78
    else:
79
        return x[:-1]
1✔
80

81

82
def create_dataframe(om):
1✔
83
    """Create a result DataFrame with all optimization data
84

85
    Results from Pyomo are written into one common pandas.DataFrame where
86
    separate columns are created for the variable index e.g. for tuples
87
    of the flows and components or the timesteps.
88
    """
89
    # get all pyomo variables including their block
90
    block_vars = list(
1✔
91
        set([bv.parent_component() for bv in om.component_data_objects(Var)])
92
    )
93
    var_dict = {}
1✔
94
    for bv in block_vars:
1✔
95
        # Drop the auxiliary variables introduced by pyomo's Piecewise
96
        parent_component = bv.parent_block().parent_component()
1✔
97
        if not isinstance(parent_component, IndexedPiecewise):
1✔
98
            try:
1✔
99
                idx_set = getattr(bv, "_index_set")
1✔
100
            except AttributeError:
×
101
                # To make it compatible with Pyomo < 6.4.1
102
                idx_set = getattr(bv, "_index")
×
103

104
            for i in idx_set:
1✔
105
                key = (str(bv).split(".")[0], str(bv).split(".")[-1], i)
1✔
106
                value = bv[i].value
1✔
107
                var_dict[key] = value
1✔
108

109
    # use this to create a pandas dataframe
110
    df = pd.DataFrame(list(var_dict.items()), columns=["pyomo_tuple", "value"])
1✔
111
    df["variable_name"] = df["pyomo_tuple"].str[1]
1✔
112

113
    # adapt the dataframe by separating tuple data into columns depending
114
    # on which dimension the variable/parameter has (scalar/sequence).
115
    # columns for the oemof tuple and timestep are created
116
    df["oemof_tuple"] = df["pyomo_tuple"].map(get_tuple)
1✔
117
    df = df[df["oemof_tuple"].map(lambda x: x is not None)]
1✔
118
    df["timestep"] = df["oemof_tuple"].map(get_timestep)
1✔
119
    df["oemof_tuple"] = df["oemof_tuple"].map(remove_timestep)
1✔
120

121
    # Use another call of remove timestep to get rid of period not needed
122
    df.loc[df["variable_name"] == "flow", "oemof_tuple"] = df.loc[
1✔
123
        df["variable_name"] == "flow", "oemof_tuple"
124
    ].map(remove_timestep)
125

126
    # order the data by oemof tuple and timestep
127
    df = df.sort_values(["oemof_tuple", "timestep"], ascending=[True, True])
1✔
128

129
    # drop empty decision variables
130
    df = df.dropna(subset=["value"])
1✔
131

132
    return df
1✔
133

134

135
def divide_scalars_sequences(df_dict, k):
1✔
136
    """Split results into scalars and sequences results
137

138
    Parameters
139
    ----------
140
    df_dict: dict
141
        dict of pd.DataFrames, keyed by oemof tuples
142
    k: tuple
143
        oemof tuple for results processing
144
    """
145
    try:
1✔
146
        condition = df_dict[k][:-1].isnull().any()
1✔
147
        scalars = df_dict[k].loc[:, condition].dropna().iloc[0]
1✔
148
        sequences = df_dict[k].loc[:, ~condition]
1✔
149
        return {"scalars": scalars, "sequences": sequences}
1✔
150
    except IndexError:
×
151
        error_message = (
×
152
            "Cannot access index on result data. "
153
            + "Did the optimization terminate"
154
            + " without errors?"
155
        )
156
        raise IndexError(error_message)
×
157

158

159
def set_result_index(df_dict, k, result_index):
1✔
160
    """Define index for results
161

162
    Parameters
163
    ----------
164
    df_dict: dict
165
        dict of pd.DataFrames, keyed by oemof tuples
166
    k: tuple
167
        oemof tuple for results processing
168
    result_index: pd.Index
169
        Index to use for results
170
    """
171
    try:
1✔
172
        df_dict[k].index = result_index
1✔
173
    except ValueError:
1✔
174
        try:
1✔
175
            df_dict[k] = df_dict[k][:-1]
1✔
176
            df_dict[k].index = result_index
1✔
177
        except ValueError as e:
1✔
178
            msg = (
1✔
179
                "\nFlow: {0}-{1}. This could be caused by NaN-values "
180
                "in your input data."
181
            )
182
            raise type(e)(
1✔
183
                str(e) + msg.format(k[0].label, k[1].label)
184
            ).with_traceback(sys.exc_info()[2])
185

186

187
def set_sequences_index(df, result_index):
1✔
188
    try:
1✔
189
        df.index = result_index
1✔
190
    except ValueError:
×
191
        try:
×
192
            df = df[:-1]
×
193
            df.index = result_index
×
194
        except ValueError:
×
195
            raise ValueError("Results extraction failed!")
×
196

197

198
def results(model, remove_last_time_point=False):
1✔
199
    """Create a nested result dictionary from the result DataFrame
200

201
    The already rearranged results from Pyomo from the result DataFrame are
202
    transferred into a nested dictionary of pandas objects.
203
    The first level key of that dictionary is a node (denoting the respective
204
    flow or component).
205

206
    The second level keys are "sequences" and "scalars" for a *standard model*:
207

208
    * A pd.DataFrame holds all results that are time-dependent, i.e. given as
209
      a sequence and can be indexed with the energy system's timeindex.
210
    * A pd.Series holds all scalar values which are applicable for timestep 0
211
      (i.e. investments).
212

213
    For a *multi-period model*, the second level key for "sequences" remains
214
    the same while instead of "scalars", the key "period_scalars" is used:
215

216
    * For sequences, see standard model.
217
    * Instead of a pd.Series, a pd.DataFrame holds scalar values indexed
218
      by periods. These hold investment-related variables.
219

220
    Examples
221
    --------
222
    * *Standard model*: `results[idx]['scalars']`
223
      and flows `results[n, n]['sequences']`.
224
    * *Multi-period model*: `results[idx]['period_scalars']`
225
      and flows `results[n, n]['sequences']`.
226

227
    Parameters
228
    ----------
229
    model : oemof.solph.Model
230
        A solved oemof.solph model.
231
    remove_last_time_point : bool
232
        The last time point of all TIMEPOINT variables is removed to get the
233
        same length as the TIMESTEP (interval) variables without getting
234
        nan-values. By default, the last time point is removed if it has not
235
        been defined by the user in the EnergySystem but inferred. If all
236
        time points have been defined explicitly by the user the last time
237
        point will not be removed by default. In that case all interval
238
        variables will get one row with nan-values to have the same index
239
        for all variables.
240
    """
241
    # Extraction steps that are the same for both model types
242
    df = create_dataframe(model)
1✔
243

244
    # create a dict of dataframes keyed by oemof tuples
245
    df_dict = {
1✔
246
        k if len(k) > 1 else (k[0], None): v[
247
            ["timestep", "variable_name", "value"]
248
        ]
249
        for k, v in df.groupby("oemof_tuple")
250
    }
251

252
    # Define index
253
    if model.es.tsa_parameters:
1✔
254
        for p, period_data in enumerate(model.es.tsa_parameters):
1✔
255
            if p == 0:
1!
256
                if model.es.periods is None:
1✔
257
                    timeindex = model.es.timeindex
1✔
258
                else:
259
                    timeindex = model.es.periods[0]
1✔
260
                result_index = _disaggregate_tsa_timeindex(
1✔
261
                    timeindex, period_data
262
                )
263
            else:
264
                result_index = result_index.union(
×
265
                    _disaggregate_tsa_timeindex(
266
                        model.es.periods[p], period_data
267
                    )
268
                )
269
    else:
270
        if model.es.timeindex is None:
1✔
271
            result_index = list(range(len(model.es.timeincrement) + 1))
1✔
272
        else:
273
            result_index = model.es.timeindex
1✔
274

275
    if model.es.tsa_parameters is not None:
1✔
276
        df_dict = _disaggregate_tsa_result(df_dict, model.es.tsa_parameters)
1✔
277

278
    # create final result dictionary by splitting up the dataframes in the
279
    # dataframe dict into a series for scalar data and dataframe for sequences
280
    result = {}
1✔
281

282
    # Standard model results extraction
283
    if model.es.periods is None:
1✔
284
        result = _extract_standard_model_result(
1✔
285
            df_dict, result, result_index, remove_last_time_point
286
        )
287
        scalars_col = "scalars"
1✔
288

289
    # Results extraction for a multi-period model
290
    else:
291
        period_indexed = ["invest", "total", "old", "old_end", "old_exo"]
1✔
292

293
        result = _extract_multi_period_model_result(
1✔
294
            model,
295
            df_dict,
296
            period_indexed,
297
            result,
298
            result_index,
299
            remove_last_time_point,
300
        )
301
        scalars_col = "period_scalars"
1✔
302

303
    # add dual variables for bus constraints
304
    if model.dual is not None:
1✔
305
        grouped = groupby(
1✔
306
            sorted(model.BusBlock.balance.iterkeys()), lambda t: t[0]
307
        )
308
        for bus, timestep in grouped:
1✔
309
            duals = [
1✔
310
                model.dual[model.BusBlock.balance[bus, t]] for _, t in timestep
311
            ]
312
            if model.es.periods is None:
1!
313
                df = pd.DataFrame({"duals": duals}, index=result_index[:-1])
1✔
314
            # TODO: Align with standard model
315
            else:
316
                df = pd.DataFrame({"duals": duals}, index=result_index)
×
317
            if (bus, None) not in result.keys():
1!
318
                result[(bus, None)] = {
1✔
319
                    "sequences": df,
320
                    scalars_col: pd.Series(dtype=float),
321
                }
322
            else:
323
                result[(bus, None)]["sequences"]["duals"] = duals
×
324

325
    return result
1✔
326

327

328
def _extract_standard_model_result(
1✔
329
    df_dict, result, result_index, remove_last_time_point
330
):
331
    """Extract and return the results of a standard model
332

333
    * Optionally remove last time point or include it elsewise.
334
    * Set index to timeindex and pivot results such that values are displayed
335
      for the respective variables. Reindex with the energy system's timeindex.
336
    * Filter for columns with nan values to retrieve scalar variables. Split
337
      up the DataFrame into sequences and scalars and return it.
338

339
    Parameters
340
    ----------
341
    df_dict : dict
342
        dictionary of results DataFrames
343
    result : dict
344
        dictionary to store the results
345
    result_index : pd.DatetimeIndex
346
        timeindex to use for the results (derived from EnergySystem)
347
    remove_last_time_point : bool
348
        if True, remove the last time point
349

350
    Returns
351
    -------
352
    result : dict
353
        dictionary with results stored
354
    """
355
    if remove_last_time_point:
1✔
356
        # The values of intervals belong to the time at the beginning of the
357
        # interval.
358
        for k in df_dict:
1✔
359
            df_dict[k].set_index("timestep", inplace=True)
1✔
360
            df_dict[k] = df_dict[k].pivot(
1✔
361
                columns="variable_name", values="value"
362
            )
363
            set_result_index(df_dict, k, result_index[:-1])
1✔
364
            result[k] = divide_scalars_sequences(df_dict, k)
1✔
365
    else:
366
        for k in df_dict:
1✔
367
            df_dict[k].set_index("timestep", inplace=True)
1✔
368
            df_dict[k] = df_dict[k].pivot(
1✔
369
                columns="variable_name", values="value"
370
            )
371
            # Add empty row with nan at the end of the table by adding 1 to the
372
            # last value of the numeric index.
373
            df_dict[k].loc[df_dict[k].index[-1] + 1, :] = np.nan
1✔
374
            set_result_index(df_dict, k, result_index)
1✔
375
            result[k] = divide_scalars_sequences(df_dict, k)
1✔
376

377
    return result
1✔
378

379

380
def _extract_multi_period_model_result(
1✔
381
    model,
382
    df_dict,
383
    period_indexed=None,
384
    result=None,
385
    result_index=None,
386
    remove_last_time_point=False,
387
):
388
    """Extract and return the results of a multi-period model
389

390
    Difference to standard model is in the way, scalar values are extracted
391
    since they now depend on periods.
392

393
    Parameters
394
    ----------
395
    model : oemof.solph.models.Model
396
        The optimization model
397
    df_dict : dict
398
        dictionary of results DataFrames
399
    period_indexed : list
400
        list of variables that are indexed by periods
401
    result : dict
402
        dictionary to store the results
403
    result_index : pd.DatetimeIndex
404
        timeindex to use for the results (derived from EnergySystem)
405
    remove_last_time_point : bool
406
        if True, remove the last time point
407

408
    Returns
409
    -------
410
    result : dict
411
        dictionary with results stored
412
    """
413
    for k in df_dict:
1✔
414
        df_dict[k].set_index("timestep", inplace=True)
1✔
415
        df_dict[k] = df_dict[k].pivot(columns="variable_name", values="value")
1✔
416
        # Split data set
417
        period_cols = [
1✔
418
            col for col in df_dict[k].columns if col in period_indexed
419
        ]
420
        # map periods to their start years for displaying period results
421
        d = {
1✔
422
            key: val + model.es.periods[0].min().year
423
            for key, val in enumerate(model.es.periods_years)
424
        }
425
        period_scalars = df_dict[k].loc[:, period_cols].dropna()
1✔
426
        sequences = df_dict[k].loc[
1✔
427
            :, [col for col in df_dict[k].columns if col not in period_cols]
428
        ]
429
        if remove_last_time_point:
1!
430
            set_sequences_index(sequences, result_index[:-1])
×
431
        else:
432
            set_sequences_index(sequences, result_index)
1✔
433
        if period_scalars.empty:
1✔
434
            period_scalars = pd.DataFrame(index=d.values())
1✔
435
        try:
1✔
436
            period_scalars.rename(index=d, inplace=True)
1✔
437
            period_scalars.index.name = "period"
1✔
438
            result[k] = {
1✔
439
                "period_scalars": period_scalars,
440
                "sequences": sequences,
441
            }
442
        except IndexError:
×
443
            error_message = (
×
444
                "Some indices seem to be not matching.\n"
445
                "Cannot properly extract model results."
446
            )
447
            raise IndexError(error_message)
×
448

449
    return result
1✔
450

451

452
def _disaggregate_tsa_result(df_dict, tsa_parameters):
1✔
453
    """
454
    Disaggregate timeseries aggregated by TSAM
455

456
    All component flows are disaggregated using mapping order of original and
457
    typical clusters in TSAM parameters. Additionally, storage SOC is
458
    disaggregated from inter and intra storage contents.
459

460
    Multi-period indexes are removed from results up front and added again
461
    after disaggregation.
462

463
    Parameters
464
    ----------
465
    df_dict : dict
466
        Raw results from oemof model
467
    tsa_parameters : list-of-dicts
468
        TSAM parameters holding order, occurrences and timsteps_per_period for
469
        each period
470

471
    Returns
472
    -------
473
    dict: Disaggregated sequences
474
    """
475
    periodic_dict = {}
1✔
476
    flow_dict = {}
1✔
477
    for key, data in df_dict.items():
1✔
478
        periodic_values = data[data["variable_name"].isin(PERIOD_INDEXES)]
1✔
479
        if not periodic_values.empty:
1✔
480
            periodic_dict[key] = periodic_values
1✔
481
        flow_dict[key] = data[~data["variable_name"].isin(PERIOD_INDEXES)]
1✔
482

483
    # Find storages and remove related entries from flow dict:
484
    storages, storage_keys = _get_storage_soc_flows_and_keys(flow_dict)
1✔
485
    for key in storage_keys:
1✔
486
        del flow_dict[key]
1✔
487

488
    # Find multiplexer and remove related entries from flow dict:
489
    multiplexer, multiplexer_keys = _get_multiplexer_flows_and_keys(flow_dict)
1✔
490
    for key in multiplexer_keys:
1!
491
        del flow_dict[key]
×
492

493
    # Disaggregate flows
494
    for flow in flow_dict:
1✔
495
        disaggregated_flow_frames = []
1✔
496
        period_offset = 0
1✔
497
        for tsa_period in tsa_parameters:
1✔
498
            for k in tsa_period["order"]:
1✔
499
                flow_k = flow_dict[flow].iloc[
1✔
500
                    period_offset
501
                    + k * tsa_period["timesteps"] : period_offset
502
                    + (k + 1) * tsa_period["timesteps"]
503
                ]
504
                # Disaggregate segmentation
505
                if "segments" in tsa_period:
1✔
506
                    flow_k = _disaggregate_segmentation(
1✔
507
                        flow_k, tsa_period["segments"], k
508
                    )
509
                disaggregated_flow_frames.append(flow_k)
1✔
510
            period_offset += tsa_period["timesteps"] * len(
1✔
511
                tsa_period["occurrences"]
512
            )
513
        ts = pd.concat(disaggregated_flow_frames)
1✔
514
        ts.timestep = range(len(ts))
1✔
515
        ts = ts.set_index("timestep")  # Have to set and reset index as
1✔
516
        # interpolation in pandas<2.1.0 cannot handle NANs in index
517
        flow_dict[flow] = ts.ffill().reset_index("timestep")
1✔
518

519
    # Add storage SOC flows:
520
    for storage, soc in storages.items():
1✔
521
        flow_dict[(storage, None)] = _calculate_soc_from_inter_and_intra_soc(
1✔
522
            soc, storage, tsa_parameters
523
        )
524
    # Add multiplexer boolean actives values:
525
    for multiplexer, values in multiplexer.items():
1!
526
        flow_dict[(multiplexer, None)] = _calculate_multiplexer_actives(
×
527
            values, multiplexer, tsa_parameters
528
        )
529
    # Add periodic values (they get extracted in period extraction fct)
530
    for key, data in periodic_dict.items():
1✔
531
        flow_dict[key] = pd.concat([flow_dict[key], data])
1✔
532

533
    return flow_dict
1✔
534

535

536
def _disaggregate_segmentation(
1✔
537
    df: pd.DataFrame,
538
    segments: Dict[Tuple[int, int], int],
539
    current_period: int,
540
) -> pd.DataFrame:
541
    """Disaggregate segmentation
542

543
    For each segment values are reindex by segment length holding None values,
544
    which are interpolated in a later step (as storages need linear
545
    interpolation while flows need padded interpolation).
546

547
    Parameters
548
    ----------
549
    df : pd.Dataframe
550
        holding values for each segment
551
    segments : Dict[Tuple[int, int], int]
552
        Segmentation dict from TSAM, holding segmentation length for each
553
        timestep in each typical period
554
    current_period: int
555
        Typical period the data belongs to, needed to extract related segments
556

557
    Returns
558
    -------
559
    pd.Dataframe
560
        holding values for each timestep instead of each segment.
561
        Added timesteps contain None values and are interpolated later.
562
    """
563
    current_segments = list(
1✔
564
        v for ((k, s), v) in segments.items() if k == current_period
565
    )
566
    df.index = range(len(current_segments))
1✔
567
    segmented_index = itertools.chain.from_iterable(
1✔
568
        [i] + list(itertools.repeat(None, s - 1))
569
        for i, s in enumerate(current_segments)
570
    )
571
    disaggregated_data = df.reindex(segmented_index)
1✔
572
    return disaggregated_data
1✔
573

574

575
def _calculate_soc_from_inter_and_intra_soc(soc, storage, tsa_parameters):
1✔
576
    """Calculate resulting SOC from inter and intra SOC flows"""
577
    soc_frames = []
1✔
578
    i_offset = 0
1✔
579
    t_offset = 0
1✔
580
    for p, tsa_period in enumerate(tsa_parameters):
1✔
581
        for i, k in enumerate(tsa_period["order"]):
1✔
582
            inter_value = soc["inter"].iloc[i_offset + i]["value"]
1✔
583
            # Self-discharge has to be taken into account for calculating
584
            # inter SOC for each timestep in cluster
585
            t0 = t_offset + i * tsa_period["timesteps"]
1✔
586
            # Add last timesteps of simulation in order to interpolate SOC for
587
            # last segment correctly:
588
            is_last_timestep = (
1✔
589
                p == len(tsa_parameters) - 1
590
                and i == len(tsa_period["order"]) - 1
591
            )
592
            timesteps = (
1✔
593
                tsa_period["timesteps"] + 1
594
                if is_last_timestep
595
                else tsa_period["timesteps"]
596
            )
597
            inter_series = (
1✔
598
                pd.Series(
599
                    itertools.accumulate(
600
                        (
601
                            (
602
                                (1 - storage.loss_rate[t])
603
                                ** tsa_period["segments"][(k, t - t0)]
604
                                if "segments" in tsa_period
605
                                else 1 - storage.loss_rate[t]
606
                            )
607
                            for t in range(
608
                                t0,
609
                                t0 + timesteps - 1,
610
                            )
611
                        ),
612
                        operator.mul,
613
                        initial=1,
614
                    )
615
                )
616
                * inter_value
617
            )
618
            intra_series = soc["intra"][(p, k)].iloc[0:timesteps]
1✔
619
            soc_frame = pd.DataFrame(
1✔
620
                intra_series["value"].values
621
                + inter_series.values,  # Neglect indexes, otherwise none
622
                columns=["value"],
623
            )
624

625
            # Disaggregate segmentation
626
            if "segments" in tsa_period:
1✔
627
                soc_disaggregated = _disaggregate_segmentation(
1✔
628
                    soc_frame[:-1] if is_last_timestep else soc_frame,
629
                    tsa_period["segments"],
630
                    k,
631
                )
632
                if is_last_timestep:
1✔
633
                    soc_disaggregated.loc[len(soc_disaggregated)] = (
1✔
634
                        soc_frame.iloc[-1]
635
                    )
636
                soc_frame = soc_disaggregated
1✔
637

638
            soc_frames.append(soc_frame)
1✔
639
        i_offset += len(tsa_period["order"])
1✔
640
        t_offset += i_offset * tsa_period["timesteps"]
1✔
641
    soc_ts = pd.concat(soc_frames)
1✔
642

643
    soc_ts["variable_name"] = "soc"
1✔
644
    soc_ts["timestep"] = range(len(soc_ts))
1✔
645

646
    # Disaggregate segments by linear interpolation and remove
647
    # last timestep afterwards (only needed for interpolation)
648
    # Note: Interpolate on object dtype is deprecated.
649
    # We probably won't fix this before fully moving to the Results object.
650
    interpolated_soc = soc_ts.interpolate()
1✔
651
    return interpolated_soc.iloc[:-1]
1✔
652

653

654
def _calculate_multiplexer_actives(values, multiplexer, tsa_parameters):
1✔
655
    """Calculate multiplexer actives"""
656
    actives_frames = []
×
657
    for p, tsa_period in enumerate(tsa_parameters):
×
658
        for i, k in enumerate(tsa_period["order"]):
×
659
            timesteps = tsa_period["timesteps"]
×
660
            actives_frames.append(
×
661
                pd.DataFrame(
662
                    values[(p, k)].iloc[0:timesteps], columns=["value"]
663
                )
664
            )
665
    actives_frames_ts = pd.concat(actives_frames)
×
666
    actives_frames_ts["variable_name"] = values[(p, k)][
×
667
        "variable_name"
668
    ].values[0]
669
    actives_frames_ts["timestep"] = range(len(actives_frames_ts))
×
670
    return actives_frames_ts
×
671

672

673
def _get_storage_soc_flows_and_keys(flow_dict):
1✔
674
    """Detect storage flows in flow dict"""
675
    storages = {}
1✔
676
    storage_keys = []
1✔
677
    for oemof_tuple, data in flow_dict.items():
1✔
678
        if not isinstance(oemof_tuple[0], GenericStorage):
1✔
679
            continue  # Skip components other than Storage
1✔
680
        if oemof_tuple[1] is not None and not isinstance(oemof_tuple[1], int):
1✔
681
            continue  # Skip storage output flows
1✔
682

683
        # Here we have either inter or intra storage index,
684
        # depending on oemof tuple length
685
        storage_keys.append(oemof_tuple)
1✔
686
        if oemof_tuple[0] not in storages:
1✔
687
            storages[oemof_tuple[0]] = {"inter": 0, "intra": {}}
1✔
688
        if len(oemof_tuple) == 2:
1✔
689
            # Must be filtered for variable name "inter_storage_content",
690
            # otherwise "init_content" variable (in non-multi-period approach)
691
            # interferes with SOC results
692
            storages[oemof_tuple[0]]["inter"] = data[
1✔
693
                data["variable_name"] == "inter_storage_content"
694
            ]
695
        if len(oemof_tuple) == 3:
1✔
696
            storages[oemof_tuple[0]]["intra"][
1✔
697
                (oemof_tuple[1], oemof_tuple[2])
698
            ] = data
699
    return storages, storage_keys
1✔
700

701

702
def _get_multiplexer_flows_and_keys(flow_dict):
1✔
703
    """Detect multiplexer flows in flow dict"""
704
    multiplexer = {}
1✔
705
    multiplexer_keys = []
1✔
706
    for oemof_tuple, data in flow_dict.items():
1✔
707
        if oemof_tuple[1] is not None and not isinstance(oemof_tuple[1], int):
1!
708
            continue
1✔
709
        if "multiplexer_active" in data["variable_name"].values[0]:
×
710
            multiplexer.setdefault(oemof_tuple[0], {})
×
711
            multiplexer_keys.append(oemof_tuple)
×
712
            multiplexer[oemof_tuple[0]][
×
713
                (oemof_tuple[1], oemof_tuple[2])
714
            ] = data
715
    return multiplexer, multiplexer_keys
1✔
716

717

718
def _disaggregate_tsa_timeindex(period_index, tsa_parameters):
1✔
719
    """Disaggregate aggregated period timeindex by using TSA parameters"""
720
    return pd.date_range(
1✔
721
        start=period_index[0],
722
        periods=tsa_parameters["timesteps_per_period"]
723
        * len(tsa_parameters["order"]),
724
        freq=period_index.freq,
725
    )
726

727

728
def convert_keys_to_strings(result, keep_none_type=False):
1✔
729
    """
730
    Convert the dictionary keys to strings.
731

732
    All (tuple) keys of the result object e.g. results[(pp1, bus1)] are
733
    converted into strings that represent the object labels
734
    e.g. results[('pp1','bus1')].
735
    """
736
    if keep_none_type:
1✔
737
        converted = {
1✔
738
            (
739
                tuple([str(e) if e is not None else None for e in k])
740
                if isinstance(k, tuple)
741
                else str(k) if k is not None else None
742
            ): v
743
            for k, v in result.items()
744
        }
745
    else:
746
        converted = {
1✔
747
            tuple(map(str, k)) if isinstance(k, tuple) else str(k): v
748
            for k, v in result.items()
749
        }
750
    return converted
1✔
751

752

753
def meta_results(om, undefined=False):
1✔
754
    """
755
    Fetch some metadata from the Solver. Feel free to add more keys.
756

757
    Valid keys of the resulting dictionary are: 'objective', 'problem',
758
    'solver'.
759

760
    om : oemof.solph.Model
761
        A solved Model.
762
    undefined : bool
763
        By default (False) only defined keys can be found in the dictionary.
764
        Set to True to get also the undefined keys.
765

766
    Returns
767
    -------
768
    dict
769
    """
770
    meta_res = {"objective": om.objective()}
1✔
771

772
    for k1 in ["Problem", "Solver"]:
1✔
773
        k1 = k1.lower()
1✔
774
        meta_res[k1] = {}
1✔
775
        for k2, v2 in om.es.results[k1][0].items():
1✔
776
            try:
1✔
777
                if str(om.es.results[k1][0][k2]) == "<undefined>":
1✔
778
                    if undefined:
1!
779
                        meta_res[k1][k2] = str(om.es.results[k1][0][k2])
×
780
                else:
781
                    meta_res[k1][k2] = om.es.results[k1][0][k2]
1✔
782
            except TypeError:
×
783
                if undefined:
×
784
                    msg = "Cannot fetch meta results of type {0}"
×
785
                    meta_res[k1][k2] = msg.format(
×
786
                        type(om.es.results[k1][0][k2])
787
                    )
788

789
    return meta_res
1✔
790

791

792
def __separate_attrs(
1✔
793
    system, exclude_attrs, get_flows=False, exclude_none=True
794
):
795
    """
796
    Create a dictionary with flow scalars and series.
797

798
    The dictionary is structured with flows as tuples and nested dictionaries
799
    holding the scalars and series e.g.
800
    {(node1, node2): {'scalars': {'attr1': scalar, 'attr2': 'text'},
801
    'sequences': {'attr1': iterable, 'attr2': iterable}}}
802

803
    system:
804
        A solved oemof.solph.Model or oemof.solph.Energysystem
805
    exclude_attrs: List[str]
806
        List of additional attributes which shall be excluded from
807
        parameter dict
808
    get_flows: bool
809
        Whether to include flow values or not
810
    exclude_none: bool
811
        If set, scalars and sequences containing None values are excluded
812

813
    Returns
814
    -------
815
    dict
816
    """
817

818
    def detect_scalars_and_sequences(com):
1✔
819
        scalars = {}
1✔
820
        sequences = {}
1✔
821

822
        default_exclusions = [
1✔
823
            "__",
824
            "_",
825
            "registry",
826
            "inputs",
827
            "outputs",
828
            "Label",
829
            "input",
830
            "output",
831
            "constraint_group",
832
        ]
833
        # Must be tuple in order to work with `str.startswith()`:
834
        exclusions = tuple(default_exclusions + exclude_attrs)
1✔
835
        attrs = [
1✔
836
            i
837
            for i in dir(com)
838
            if not (i.startswith(exclusions) or callable(getattr(com, i)))
839
        ]
840

841
        for a in attrs:
1✔
842
            attr_value = getattr(com, a)
1✔
843

844
            # Iterate trough investment and add scalars and sequences with
845
            # "investment" prefix to component data:
846
            if attr_value.__class__.__name__ == "Investment":
1✔
847
                invest_data = detect_scalars_and_sequences(attr_value)
1✔
848
                scalars.update(
1✔
849
                    {
850
                        "investment_" + str(k): v
851
                        for k, v in invest_data["scalars"].items()
852
                    }
853
                )
854
                sequences.update(
1✔
855
                    {
856
                        "investment_" + str(k): v
857
                        for k, v in invest_data["sequences"].items()
858
                    }
859
                )
860
                continue
1✔
861

862
            if isinstance(attr_value, str):
1✔
863
                scalars[a] = attr_value
1✔
864
                continue
1✔
865

866
            # If the label is a tuple it is iterable, therefore it should be
867
            # converted to a string. Otherwise, it will be a sequence.
868
            if a == "label":
1✔
869
                attr_value = str(attr_value)
1✔
870

871
            if isinstance(attr_value, abc.Iterable):
1✔
872
                sequences[a] = attr_value
1✔
873
            elif isinstance(attr_value, _FakeSequence):
1!
874
                scalars[a] = attr_value.value
×
875
            else:
876
                scalars[a] = attr_value
1✔
877

878
        sequences = flatten(sequences)
1✔
879

880
        com_data = {
1✔
881
            "scalars": scalars,
882
            "sequences": sequences,
883
        }
884
        move_undetected_scalars(com_data)
1✔
885
        if exclude_none:
1✔
886
            remove_nones(com_data)
1✔
887

888
        com_data = {
1✔
889
            "scalars": pd.Series(com_data["scalars"]),
890
            "sequences": pd.DataFrame(com_data["sequences"]),
891
        }
892
        return com_data
1✔
893

894
    def move_undetected_scalars(com):
1✔
895
        for ckey, value in list(com["sequences"].items()):
1✔
896
            if isinstance(value, (str, numbers.Number)):
1✔
897
                com["scalars"][ckey] = value
1✔
898
                del com["sequences"][ckey]
1✔
899
            elif isinstance(value, _FakeSequence):
1✔
900
                com["scalars"][ckey] = value.value
1✔
901
                del com["sequences"][ckey]
1✔
902
            elif len(value) == 0:
1✔
903
                del com["sequences"][ckey]
1✔
904

905
    def remove_nones(com):
1✔
906
        for ckey, value in list(com["scalars"].items()):
1✔
907
            if value is None:
1✔
908
                del com["scalars"][ckey]
1✔
909
        for ckey, value in list(com["sequences"].items()):
1✔
910
            if len(value) == 0 or value[0] is None:
1!
911
                del com["sequences"][ckey]
×
912

913
    # Check if system is es or om:
914
    if system.__class__.__name__ == "EnergySystem":
1✔
915
        components = system.flows() if get_flows else system.nodes
1✔
916
    else:
917
        components = system.flows if get_flows else system.es.nodes
1✔
918

919
    data = {}
1✔
920
    for com_key in components:
1✔
921
        component = components[com_key] if get_flows else com_key
1✔
922
        component_data = detect_scalars_and_sequences(component)
1✔
923
        comkey = com_key if get_flows else (com_key, None)
1✔
924
        data[comkey] = component_data
1✔
925
    return data
1✔
926

927

928
def parameter_as_dict(system, exclude_none=True, exclude_attrs=None):
1✔
929
    """
930
    Create a result dictionary containing node parameters.
931

932
    Results are written into a dictionary of pandas objects where
933
    a Series holds all scalar values and a dataframe all sequences for nodes
934
    and flows.
935
    The dictionary is keyed by flows (n, n) and nodes (n, None), e.g.
936
    `parameter[(n, n)]['sequences']` or `parameter[(n, n)]['scalars']`.
937

938
    Parameters
939
    ----------
940
    system: energy_system.EnergySystem
941
        A populated energy system.
942
    exclude_none: bool
943
        If True, all scalars and sequences containing None values are excluded
944
    exclude_attrs: Optional[List[str]]
945
        Optional list of additional attributes which shall be excluded from
946
        parameter dict
947

948
    Returns
949
    -------
950
    dict: Parameters for all nodes and flows
951
    """
952

953
    if exclude_attrs is None:
1✔
954
        exclude_attrs = []
1✔
955

956
    flow_data = __separate_attrs(
1✔
957
        system, exclude_attrs, get_flows=True, exclude_none=exclude_none
958
    )
959
    node_data = __separate_attrs(
1✔
960
        system, exclude_attrs, get_flows=False, exclude_none=exclude_none
961
    )
962

963
    flow_data.update(node_data)
1✔
964
    return flow_data
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc