• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

polca / premise / 5764130857

pending completion
5764130857

push

github-actions

romainsacchi
Merge remote-tracking branch 'origin/master'

1385 of 5737 relevant lines covered (24.14%)

0.24 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

11.0
/premise/fuels.py
1
"""
2
Integrates projections regarding fuel production and supply.
3
"""
4

5
import copy
1✔
6
import logging.config
1✔
7
from functools import lru_cache
1✔
8
from pathlib import Path
1✔
9
from typing import Union
1✔
10

11
import wurst
1✔
12
import xarray as xr
1✔
13
import yaml
1✔
14
from numpy import ndarray
1✔
15

16
from . import VARIABLES_DIR
1✔
17
from .inventory_imports import get_biosphere_code
1✔
18
from .logger import create_logger
1✔
19
from .transformation import (
1✔
20
    Any,
21
    BaseTransformation,
22
    Dict,
23
    IAMDataCollection,
24
    InventorySet,
25
    List,
26
    Tuple,
27
    get_shares_from_production_volume,
28
    get_suppliers_of_a_region,
29
    np,
30
    uuid,
31
    ws,
32
)
33
from .utils import DATA_DIR, get_crops_properties
1✔
34

35
logger = create_logger("fuel")
1✔
36

37
REGION_CLIMATE_MAP = VARIABLES_DIR / "iam_region_to_climate.yaml"
1✔
38
FUEL_LABELS = DATA_DIR / "fuels" / "fuel_labels.csv"
1✔
39
SUPPLY_CHAIN_SCENARIOS = DATA_DIR / "fuels" / "supply_chain_scenarios.yml"
1✔
40
HEAT_SOURCES = DATA_DIR / "fuels" / "heat_sources_map.yml"
1✔
41
HYDROGEN_SOURCES = DATA_DIR / "fuels" / "hydrogen_activities.yml"
1✔
42
HYDROGEN_SUPPLY_LOSSES = DATA_DIR / "fuels" / "hydrogen_supply_losses.yml"
1✔
43
METHANE_SOURCES = DATA_DIR / "fuels" / "methane_activities.yml"
1✔
44
LIQUID_FUEL_SOURCES = DATA_DIR / "fuels" / "liquid_fuel_activities.yml"
1✔
45
FUEL_MARKETS = DATA_DIR / "fuels" / "fuel_markets.yml"
1✔
46
BIOFUEL_SOURCES = DATA_DIR / "fuels" / "biofuels_activities.yml"
1✔
47
FUEL_GROUPS = DATA_DIR / "fuels" / "fuel_groups.yaml"
1✔
48

49

50
def fetch_mapping(filepath: str) -> dict:
1✔
51
    """Returns a dictionary from a YML file"""
52

53
    with open(filepath, "r", encoding="utf-8") as stream:
×
54
        mapping = yaml.safe_load(stream)
×
55
    return mapping
×
56

57

58
@lru_cache()
1✔
59
def get_compression_effort(
1✔
60
    inlet_pressure: int, outlet_pressure: int, flow_rate: int
61
) -> float:
62
    """
63
    Calculate the required electricity consumption from the compressor given
64
    an inlet and outlet pressure and a flow rate for hydrogen.
65

66
    :param inlet_pressure: the input pressure in bar
67
    :param outlet_pressure: the output pressure in bar
68
    :param flow_rate: the flow rate of hydrogen in kg/day
69
    :return: the required electricity consumption in kWh
70

71
    """
72
    # Constants
73
    COMPRESSIBILITY_FACTOR = 1.03198
×
74
    NUM_COMPRESSOR_STAGES = 2
×
75
    INLET_TEMPERATURE = 310.95  # K
×
76
    RATIO_SPECIFIC_HEATS = 1.4
×
77
    MOLECULAR_MASS_H2 = 2.15  # g/mol
×
78
    COMPRESSOR_EFFICIENCY = 0.75
×
79

80
    # Intermediate calculations
81
    mass_flow_rate = flow_rate / (24 * 3600)  # convert to kg/s
×
82
    specific_gas_constant = 8.314  # J/(mol*K)
×
83
    part_1 = (
×
84
        mass_flow_rate
85
        * (COMPRESSIBILITY_FACTOR * INLET_TEMPERATURE * specific_gas_constant)
86
        / (MOLECULAR_MASS_H2 * COMPRESSOR_EFFICIENCY)
87
        * (NUM_COMPRESSOR_STAGES * RATIO_SPECIFIC_HEATS / (RATIO_SPECIFIC_HEATS - 1))
88
    )
89
    part_2 = (
×
90
        (outlet_pressure / inlet_pressure)
91
        ** ((RATIO_SPECIFIC_HEATS - 1) / (NUM_COMPRESSOR_STAGES * RATIO_SPECIFIC_HEATS))
92
    ) - 1
93
    shaft_power = part_1 * part_2
×
94

95
    # Convert to kWh
96
    electricity_consumption = shaft_power * 24 / 1000
×
97

98
    return electricity_consumption
×
99

100

101
@lru_cache()
1✔
102
def get_pre_cooling_energy(
1✔
103
    ambient_temperature: float, capacity_utilization: float
104
) -> float:
105
    """
106
    Calculate the required electricity consumption to pre-cool the hydrogen
107
    before tank filling.
108

109
    :param ambient_temperature: the ambient temperature in degrees Celsius
110
    :param capacity_utilization: the capacity utilization of the pre-cooling system
111
    :return: the required electricity consumption in kWh
112

113
    """
114
    # Constants
115
    COEFFICIENT_1 = 0.3 / 1.6
×
116
    COEFFICIENT_2 = -0.018
×
117
    COEFFICIENT_3 = 25
×
118
    COEFFICIENT_4 = -21
×
119

120
    # Convert temperature to Kelvin
121
    temperature_K = ambient_temperature + 273.15
×
122

123
    # Calculate pre-cooling energy
124
    energy_pre_cooling = (
×
125
        COEFFICIENT_1 * np.exp(COEFFICIENT_2 * ambient_temperature)
126
        + (COEFFICIENT_3 * np.log(temperature_K) + COEFFICIENT_4) / capacity_utilization
127
    )
128

129
    return energy_pre_cooling
×
130

131

132
@lru_cache()
1✔
133
def adjust_electrolysis_electricity_requirement(year: int) -> ndarray:
1✔
134
    """
135

136
    Calculate the adjusted electricity requirement for hydrogen electrolysis
137
    based on the given year.
138

139
    The electricity requirement decreases linearly from 58 kWh/kg H2 in 2010
140
    to 48 kWh/kg H2 in 2050, according to a literature review conducted by
141
    the Paul Scherrer Institute:
142

143
    Bauer (ed.), C., Desai, H., Heck, T., Sacchi, R., Schneider, S., Terlouw,
144
    T., Treyer, K., Zhang, X. Electricity storage and hydrogen – technologies,
145
    costs and impacts on climate change.
146
    Auftraggeberin: Bundesamt für Energie BFE, 3003 Bern.
147

148

149
    :param year: the year for which to calculate the adjusted electricity requirement
150
    :return: the adjusted electricity requirement in kWh/kg H2
151

152
    """
153
    # Constants
154
    MIN_ELECTRICITY_REQUIREMENT = 48
×
155
    MAX_ELECTRICITY_REQUIREMENT = 60  # no maximum
×
156

157
    # Calculate adjusted electricity requirement
158
    electricity_requirement = -0.3538 * (year - 2010) + 58.589
×
159

160
    # Clip to minimum and maximum values
161
    adjusted_requirement = np.clip(
×
162
        electricity_requirement,
163
        MIN_ELECTRICITY_REQUIREMENT,
164
        MAX_ELECTRICITY_REQUIREMENT,
165
    )
166

167
    return adjusted_requirement
×
168

169

170
def is_fuel_production(name):
1✔
171
    return any(i in name for i in ["Ethanol production", "Biodiesel production"])
×
172

173

174
def update_co2_emissions(
1✔
175
    dataset: dict, amount_non_fossil_co2: float, biosphere_flows: dict
176
) -> dict:
177
    """Update fossil and non-fossil CO2 emissions of the dataset."""
178
    # Test for the presence of a fossil CO2 flow
179
    if not any(
×
180
        exc for exc in dataset["exchanges"] if exc["name"] == "Carbon dioxide, fossil"
181
    ):
182
        pass
183
        # print(f"{dataset['name']} has no fossil CO2 output.")
184

185
    if "log parameters" not in dataset:
×
186
        dataset["log parameters"] = {}
×
187

188
    # subtract the biogenic CO2 amount to the initial fossil CO2 emission amount
189
    for exc in ws.biosphere(dataset, ws.equals("name", "Carbon dioxide, fossil")):
×
190
        dataset["log parameters"].update(
×
191
            {"initial amount of fossil CO2": exc["amount"]}
192
        )
193
        exc["amount"] -= amount_non_fossil_co2
×
194
        if exc["amount"] < 0:
×
195
            exc["amount"] = 0
×
196
        dataset["log parameters"].update({"new amount of fossil CO2": exc["amount"]})
×
197

198
    # add the non-fossil CO2 emission flow
199
    non_fossil_co2 = {
×
200
        "uncertainty type": 0,
201
        "amount": amount_non_fossil_co2,
202
        "type": "biosphere",
203
        "name": "Carbon dioxide, non-fossil",
204
        "unit": "kilogram",
205
        "categories": ("air",),
206
        "input": (
207
            "biosphere3",
208
            biosphere_flows[
209
                ("Carbon dioxide, non-fossil", "air", "unspecified", "kilogram")
210
            ],
211
        ),
212
    }
213

214
    dataset["log parameters"].update(
×
215
        {"new amount of biogenic CO2": amount_non_fossil_co2}
216
    )
217

218
    dataset["exchanges"].append(non_fossil_co2)
×
219

220
    return dataset
×
221

222

223
@lru_cache()
1✔
224
def add_boil_off_losses(vehicle, distance, loss_val):
1✔
225
    if vehicle == "truck":
×
226
        # average truck speed
227
        speed = 50
×
228
    else:
229
        # average ship speed
230
        speed = 36
×
231
    days = distance / speed / 24
×
232
    # boil-off losses, function of days in transit
233
    return np.power(1 + loss_val, days)
×
234

235

236
@lru_cache()
1✔
237
def add_pipeline_losses(distance, loss_val):
1✔
238
    # pipeline losses, function of distance
239
    return 1 + (loss_val * distance)
×
240

241

242
def add_other_losses(loss_val):
1✔
243
    return 1 + loss_val
×
244

245

246
@lru_cache()
1✔
247
def calculate_fuel_properties(amount, lhv, co2_factor, biogenic_share):
1✔
248
    """
249
    Calculate the fossil and non-fossil CO2 emissions and LHV for the given fuel
250
    properties and amount.
251
    """
252
    fossil_co2 = amount * lhv * co2_factor * (1 - biogenic_share)
×
253
    non_fossil_co2 = amount * lhv * co2_factor * biogenic_share
×
254
    weighted_lhv = amount * lhv
×
255
    return fossil_co2, non_fossil_co2, weighted_lhv
×
256

257

258
def update_dataset(dataset, supplier_key, amount):
1✔
259
    """
260
    Add a new exchange to the dataset for the given fuel and supplier, and update
261
    the LHV and CO2 fields.
262
    """
263
    exchange = {
×
264
        "uncertainty type": 0,
265
        "amount": amount,
266
        "product": supplier_key[2],
267
        "name": supplier_key[0],
268
        "unit": supplier_key[-1],
269
        "location": supplier_key[1],
270
        "type": "technosphere",
271
    }
272
    dataset["exchanges"].append(exchange)
×
273

274
    return dataset
×
275

276

277
def _update_fuels(scenario, version, system_model, modified_datasets):
1✔
278
    fuels = Fuels(
×
279
        database=scenario["database"],
280
        iam_data=scenario["iam data"],
281
        model=scenario["model"],
282
        pathway=scenario["pathway"],
283
        year=scenario["year"],
284
        version=version,
285
        system_model=system_model,
286
        modified_datasets=modified_datasets,
287
    )
288

289
    if any(
×
290
        x is not None
291
        for x in (
292
            scenario["iam data"].petrol_markets,
293
            scenario["iam data"].diesel_markets,
294
            scenario["iam data"].gas_markets,
295
            scenario["iam data"].hydrogen_markets,
296
        )
297
    ):
298
        fuels.generate_fuel_markets()
×
299
        scenario["database"] = fuels.database
×
300
        modified_datasets = fuels.modified_datasets
×
301
    else:
302
        print("No fuel markets found in IAM data. Skipping.")
×
303

304
    return scenario, modified_datasets
×
305

306

307
class Fuels(BaseTransformation):
1✔
308
    """
309
    Class that modifies fuel inventories and markets in ecoinvent based on IAM output data.
310
    """
311

312
    def __init__(
1✔
313
        self,
314
        database: List[dict],
315
        iam_data: IAMDataCollection,
316
        model: str,
317
        pathway: str,
318
        year: int,
319
        version: str,
320
        system_model: str,
321
        modified_datasets: dict,
322
    ):
323
        super().__init__(
×
324
            database,
325
            iam_data,
326
            model,
327
            pathway,
328
            year,
329
            version,
330
            system_model,
331
            modified_datasets,
332
        )
333
        # ecoinvent version
334
        self.version = version
×
335
        # dictionary of crops with specifications
336
        self.crops_props = get_crops_properties()
×
337
        # list to store markets that will be created
338
        self.new_fuel_markets = {}
×
339
        # dictionary to store mapping results, to avoid redundant effort
340
        self.cached_suppliers = {}
×
341
        self.biosphere_flows = get_biosphere_code(self.version)
×
342
        self.fuel_groups = fetch_mapping(FUEL_GROUPS)
×
343
        self.rev_fuel_groups = {
×
344
            sub_type: main_type
345
            for main_type, sub_types in self.fuel_groups.items()
346
            for sub_type in sub_types
347
        }
348

349
        self.iam_fuel_markets = self.iam_data.production_volumes.sel(
×
350
            variables=[g for g in [item for sublist in list(self.fuel_groups.values()) for item in sublist]
351
                       if g in self.iam_data.production_volumes.variables.values.tolist()
352
                       ]
353
        )
354

355
        self.fuel_efficiencies = xr.DataArray(
×
356
            dims=["variables"], coords={"variables": []}
357
        )
358
        for efficiency in [
×
359
            self.iam_data.petrol_efficiencies,
360
            self.iam_data.diesel_efficiencies,
361
            self.iam_data.gas_efficiencies,
362
            self.iam_data.hydrogen_efficiencies,
363
        ]:
364
            if efficiency is not None:
×
365
                self.fuel_efficiencies = xr.concat(
×
366
                    [self.fuel_efficiencies, efficiency],
367
                    dim="variables",
368
                )
369

370
    def find_transport_activity(
1✔
371
        self, items_to_look_for: List[str], items_to_exclude: List[str], loc: str
372
    ) -> Tuple[str, str, str, str]:
373
        """Find the transport activity that is most similar to the given activity.
374
        This is done by looking for the most similar activity in the database.
375
        """
376

377
        try:
×
378
            dataset = ws.get_one(
×
379
                self.database,
380
                *[ws.contains("name", i) for i in items_to_look_for],
381
                ws.doesnt_contain_any("name", items_to_exclude),
382
                ws.equals("location", loc),
383
            )
384
        except ws.NoResults:
×
385
            dataset = ws.get_one(
×
386
                self.database,
387
                *[ws.contains("name", i) for i in items_to_look_for],
388
                ws.doesnt_contain_any("name", items_to_exclude),
389
            )
390

391
        return (
×
392
            dataset["name"],
393
            dataset["reference product"],
394
            dataset["unit"],
395
            dataset["location"],
396
        )
397

398
    def find_suppliers(
1✔
399
        self, name: str, ref_prod: str, unit: str, loc: str, exclude: List[str] = []
400
    ) -> Dict[Tuple[Any, Any, Any, Any], float]:
401
        """
402
        Return a list of potential suppliers given a name, reference product,
403
        unit and location, with their respective supply share (based on production volumes).
404

405
        :param name: the name of the activity
406
        :param ref_prod: the reference product of the activity
407
        :param unit: the unit of the activity
408
        :param loc: the location of the activity
409
        :param exclude: a list of activities to exclude from the search
410
        :return: a dictionary of potential suppliers with their respective supply share
411
        """
412

413
        # if we find a result in the cache dictionary, return it
414
        key = (name, ref_prod, loc)
×
415
        if key in self.cached_suppliers:
×
416
            return self.cached_suppliers[key]
×
417

418
        ecoinvent_regions = self.geo.iam_to_ecoinvent_location(loc)
×
419
        # search first for suppliers in `loc`, but also in the ecoinvent
420
        # locations to are comprised in `loc`, and finally in `RER`, `RoW` and `GLO`,
421
        possible_locations = [
×
422
            [loc],
423
            ecoinvent_regions,
424
            ["RER"],
425
            ["RoW"],
426
            ["GLO"],
427
            ["CH"],
428
        ]
429
        suppliers, counter = [], 0
×
430

431
        # while we do not find a result
432
        while len(suppliers) == 0:
×
433
            suppliers = list(
×
434
                get_suppliers_of_a_region(
435
                    database=self.database,
436
                    locations=possible_locations[counter],
437
                    names=[name] if isinstance(name, str) else name,
438
                    reference_prod=ref_prod,
439
                    unit=unit,
440
                    exclude=exclude,
441
                )
442
            )
443
            counter += 1
×
444

445
        suppliers = [s for s in suppliers if s]  # filter out empty lists
×
446

447
        # find production volume-based share
448
        suppliers = get_shares_from_production_volume(suppliers)
×
449

450
        # store the result in cache for next time
451
        self.cached_suppliers[key] = suppliers
×
452

453
        return suppliers
×
454

455
    def generate_hydrogen_activities(self) -> None:
1✔
456
        """
457
        Defines regional variants for hydrogen production, but also different supply
458
        chain designs:
459
        * by truck (500 km), gaseous, liquid and LOHC
460
        * by reassigned CNG pipeline (500 km), gaseous, with and without inhibitors
461
        * by dedicated H2 pipeline (500 km), gaseous
462
        * by ship, liquid (2000 km)
463

464
        For truck and pipeline supply chains, we assume a transmission and a distribution part, for which
465
        we have specific pipeline designs. We also assume a means for regional storage in between (salt cavern).
466
        We apply distance-based losses along the way.
467

468
        Most of these supply chain design options are based on the work of:
469
        * Wulf C, Reuß M, Grube T, Zapp P, Robinius M, Hake JF, et al.
470
          Life Cycle Assessment of hydrogen transport and distribution options.
471
          J Clean Prod 2018;199:431–43. https://doi.org/10.1016/j.jclepro.2018.07.180.
472
        * Hank C, Sternberg A, Köppel N, Holst M, Smolinka T, Schaadt A, et al.
473
          Energy efficiency and economic assessment of imported energy carriers based on renewable electricity.
474
          Sustain Energy Fuels 2020;4:2256–73. https://doi.org/10.1039/d0se00067a.
475
        * Petitpas G. Boil-off losses along the LH2 pathway. US Dep Energy Off Sci Tech Inf 2018.
476

477

478
        """
479

480
        hydrogen_sources = fetch_mapping(HYDROGEN_SOURCES)
×
481

482
        for hydrogen_type, hydrogen_vars in hydrogen_sources.items():
×
483
            hydrogen_activity_name = hydrogen_sources[hydrogen_type].get("name")
×
484
            hydrogen_efficiency_variable = hydrogen_sources[hydrogen_type].get("var")
×
485
            hydrogen_feedstock_name = hydrogen_sources[hydrogen_type].get(
×
486
                "feedstock name"
487
            )
488
            hydrogen_feedstock_unit = hydrogen_sources[hydrogen_type].get(
×
489
                "feedstock unit"
490
            )
491
            efficiency_floor_value = hydrogen_sources[hydrogen_type].get("floor value")
×
492

493
            new_ds = self.fetch_proxies(
×
494
                name=hydrogen_activity_name,
495
                ref_prod="hydrogen",
496
                production_variable=hydrogen_efficiency_variable,
497
            )
498

499
            for region, dataset in new_ds.items():
×
500
                # find current energy consumption in dataset
501
                initial_energy_consumption = sum(
×
502
                    exc["amount"]
503
                    for exc in dataset["exchanges"]
504
                    if exc["unit"] == hydrogen_feedstock_unit
505
                    and hydrogen_feedstock_name in exc["name"]
506
                    and exc["type"] == "technosphere"
507
                )
508

509
                # add it to "log parameters"
510
                if "log parameters" not in dataset:
×
511
                    dataset["log parameters"] = {}
×
512

513
                dataset["log parameters"].update(
×
514
                    {
515
                        "initial energy input for hydrogen production": initial_energy_consumption
516
                    }
517
                )
518

519
                # Fetch the efficiency change of the
520
                # electrolysis process over time,
521
                # according to the IAM scenario,
522
                # if available.
523

524
                if (
×
525
                    hydrogen_efficiency_variable
526
                    in self.fuel_efficiencies.variables.values
527
                ):
528
                    # Find scaling factor compared to 2020
529
                    scaling_factor = 1 / self.find_iam_efficiency_change(
×
530
                        data=self.fuel_efficiencies,
531
                        variable=hydrogen_efficiency_variable,
532
                        location=region,
533
                    )
534

535
                    # new energy consumption
536
                    new_energy_consumption = scaling_factor * initial_energy_consumption
×
537

538
                    # set a floor value/kg H2
539
                    if new_energy_consumption < efficiency_floor_value:
×
540
                        new_energy_consumption = efficiency_floor_value
×
541
                else:
542
                    if hydrogen_type == "from electrolysis":
×
543
                        # get the electricity consumption
544
                        new_energy_consumption = (
×
545
                            adjust_electrolysis_electricity_requirement(self.year)
546
                        )
547
                    else:
548
                        new_energy_consumption = None
×
549

550
                if new_energy_consumption:
×
551
                    # remove energy inputs
552
                    dataset["exchanges"] = [
×
553
                        exc
554
                        for exc in dataset["exchanges"]
555
                        if not (
556
                            exc["unit"] == hydrogen_feedstock_unit
557
                            and hydrogen_feedstock_name in exc["name"]
558
                            and exc["type"] == "technosphere"
559
                        )
560
                    ]
561

562
                    energy_suppliers = self.find_suppliers(
×
563
                        name=hydrogen_feedstock_name,
564
                        ref_prod=hydrogen_feedstock_name,
565
                        unit=hydrogen_feedstock_unit,
566
                        loc=region,
567
                        exclude=["period", "production", "high voltage"],
568
                    )
569

570
                    dataset["exchanges"].extend(
×
571
                        {
572
                            "uncertainty type": 0,
573
                            "amount": new_energy_consumption * share,
574
                            "type": "technosphere",
575
                            "product": supplier[2],
576
                            "name": supplier[0],
577
                            "unit": supplier[-1],
578
                            "location": supplier[1],
579
                        }
580
                        for supplier, share in energy_suppliers.items()
581
                    )
582

583
                    # add it to "log parameters"
584
                    if "log parameters" not in dataset:
×
585
                        dataset["log parameters"] = {}
×
586

587
                    # add it to "log parameters"
588
                    dataset["log parameters"].update(
×
589
                        {
590
                            "new energy input for hydrogen production": new_energy_consumption
591
                        }
592
                    )
593

594
                    self.write_log(dataset)
×
595

596
                    # add it to list of created datasets
597
                    self.modified_datasets[(self.model, self.scenario, self.year)][
×
598
                        "created"
599
                    ].append(
600
                        (
601
                            dataset["name"],
602
                            dataset["reference product"],
603
                            dataset["location"],
604
                            dataset["unit"],
605
                        )
606
                    )
607

608
                    string = f" The electricity input per kg of H2 has been adapted to the year {self.year}."
×
609
                    if "comment" in dataset:
×
610
                        dataset["comment"] += string
×
611
                    else:
612
                        dataset["comment"] = string
×
613

614
                    dataset["comment"] = (
×
615
                        "Region-specific hydrogen production dataset "
616
                        "generated by `premise`. "
617
                    )
618

619
            self.database.extend(new_ds.values())
×
620

621
        # print("Generate region-specific hydrogen supply chains.")
622

623
        # loss coefficients for hydrogen supply
624
        losses = fetch_mapping(HYDROGEN_SUPPLY_LOSSES)
×
625

626
        supply_chain_scenarios = fetch_mapping(SUPPLY_CHAIN_SCENARIOS)
×
627

628
        for act in [
×
629
            "hydrogen embrittlement inhibition",
630
            "geological hydrogen storage",
631
            # "hydrogenation of hydrogen",
632
            # "dehydrogenation of hydrogen",
633
            "hydrogen refuelling station",
634
        ]:
635
            new_ds = self.fetch_proxies(name=act, ref_prod=" ")
×
636

637
            for k, dataset in new_ds.items():
×
638
                for exc in ws.production(dataset):
×
639
                    if "input" in exc:
×
640
                        del exc["input"]
×
641

642
                new_ds[k] = self.relink_technosphere_exchanges(
×
643
                    dataset,
644
                )
645

646
            self.database.extend(new_ds.values())
×
647

648
            # add to log
649
            for dataset in list(new_ds.values()):
×
650
                self.write_log(dataset)
×
651
                # add it to list of created datasets
652
                self.modified_datasets[(self.model, self.scenario, self.year)][
×
653
                    "created"
654
                ].append(
655
                    (
656
                        dataset["name"],
657
                        dataset["reference product"],
658
                        dataset["location"],
659
                        dataset["unit"],
660
                    )
661
                )
662

663
        for region in self.regions:
×
664
            for hydrogen_type, hydrogen_vars in hydrogen_sources.items():
×
665
                for vehicle, config in supply_chain_scenarios.items():
×
666
                    for state in config["state"]:
×
667
                        for distance in config["distance"]:
×
668
                            # dataset creation
669
                            dataset: dict[
×
670
                                str,
671
                                Union[
672
                                    Union[
673
                                        str, list[dict[str, Union[int, str]]], ndarray
674
                                    ],
675
                                    Any,
676
                                ],
677
                            ] = {
678
                                "location": region,
679
                                "name": f"hydrogen supply, {hydrogen_type}, by {vehicle}, as {state}, over {distance} km",
680
                                "reference product": "hydrogen, 700 bar",
681
                                "unit": "kilogram",
682
                                "database": self.database[1]["database"],
683
                                "code": str(uuid.uuid4().hex),
684
                                "comment": "Dataset representing hydrogen supply, generated by `premise`.",
685
                                "exchanges": [
686
                                    {
687
                                        "uncertainty type": 0,
688
                                        "loc": 1,
689
                                        "amount": 1,
690
                                        "type": "production",
691
                                        "production volume": 1,
692
                                        "product": "hydrogen, 700 bar",
693
                                        "name": f"hydrogen supply, {hydrogen_type}, "
694
                                        f"by {vehicle}, as {state}, over {distance} km",
695
                                        "unit": "kilogram",
696
                                        "location": region,
697
                                    }
698
                                ],
699
                            }
700

701
                            # transport
702
                            dataset = self.add_hydrogen_transport(
×
703
                                dataset, config, region, distance, vehicle
704
                            )
705

706
                            # need for inhibitor and purification if CNG pipeline
707
                            # electricity for purification: 2.46 kWh/kg H2
708
                            if vehicle == "CNG pipeline":
×
709
                                dataset = self.add_hydrogen_inhibitor(dataset, region)
×
710

711
                            if "regional storage" in config:
×
712
                                dataset = self.add_hydrogen_regional_storage(
×
713
                                    dataset, region, config
714
                                )
715

716
                            # electricity for compression
717
                            if state in ["gaseous", "liquid"]:
×
718
                                dataset = self.add_compression_electricity(
×
719
                                    state, vehicle, distance, region, dataset
720
                                )
721

722
                            # electricity for hydrogenation, dehydrogenation and
723
                            # compression at delivery
724
                            if state == "liquid organic compound":
×
725
                                dataset = self.add_hydrogenation_energy(region, dataset)
×
726

727
                            dataset = self.add_hydrogen_input_and_losses(
×
728
                                hydrogen_vars,
729
                                region,
730
                                losses,
731
                                vehicle,
732
                                state,
733
                                distance,
734
                                dataset,
735
                            )
736

737
                            # add fuelling station, including storage tank
738
                            dataset["exchanges"].append(
×
739
                                self.add_h2_fuelling_station(region)
740
                            )
741

742
                            # add pre-cooling
743
                            dataset = self.add_pre_cooling_electricity(dataset, region)
×
744

745
                            dataset = self.relink_technosphere_exchanges(
×
746
                                dataset,
747
                            )
748

749
                            self.database.append(dataset)
×
750

751
                            # add to log
752
                            self.write_log(dataset)
×
753

754
                            # add it to list of created datasets
755
                            self.modified_datasets[
×
756
                                (self.model, self.scenario, self.year)
757
                            ]["created"].append(
758
                                (
759
                                    dataset["name"],
760
                                    dataset["reference product"],
761
                                    dataset["location"],
762
                                    dataset["unit"],
763
                                )
764
                            )
765

766
    def add_hydrogen_transport(
1✔
767
        self,
768
        dataset: Dict[str, Any],
769
        config: Dict[str, Any],
770
        region: str,
771
        distance: float,
772
        vehicle: str,
773
    ) -> Dict[str, Any]:
774
        """
775
        Adds hydrogen transport exchanges to the given dataset.
776

777
        :param dataset: The dataset to modify.
778
        :param config: The configuration for the vehicle transport.
779
        :param region: The region of the dataset.
780
        :param distance: The distance traveled.
781
        :param vehicle: The type of vehicle used.
782
        :return: The modified dataset.
783
        """
784
        for transport in config["vehicle"]:
×
785
            transport_name = transport["name"]
×
786
            transport_ref_prod = transport["reference product"]
×
787
            transport_unit = transport["unit"]
×
788
            suppliers = self.find_suppliers(
×
789
                name=transport_name,
790
                ref_prod=transport_ref_prod,
791
                unit=transport_unit,
792
                loc=region,
793
            )
794

795
            for supplier, share in suppliers.items():
×
796
                if supplier[-1] == "ton kilometer":
×
797
                    amount = distance * share / 1000
×
798
                else:
799
                    amount = distance * share / 2 * (1 / eval(config["lifetime"]))
×
800

801
                exchange = {
×
802
                    "uncertainty type": 0,
803
                    "amount": amount,
804
                    "type": "technosphere",
805
                    "product": supplier[2],
806
                    "name": supplier[0],
807
                    "unit": supplier[-1],
808
                    "location": supplier[1],
809
                    "comment": f"Transport over {distance} km by {vehicle}. ",
810
                }
811

812
                dataset["exchanges"].append(exchange)
×
813

814
            comment = f"Transport over {distance} km by {vehicle}. "
×
815

816
            if "comment" in dataset:
×
817
                dataset["comment"] += comment
×
818
            else:
819
                dataset["comment"] = comment
×
820

821
        return dataset
×
822

823
    def add_hydrogen_input_and_losses(
1✔
824
        self, hydrogen_activity, region, losses, vehicle, state, distance, dataset
825
    ):
826
        # fetch the H2 production activity
827
        h2_ds = list(
×
828
            self.find_suppliers(
829
                name=hydrogen_activity["name"],
830
                ref_prod="hydrogen",
831
                unit="kilogram",
832
                loc=region,
833
            ).keys()
834
        )[0]
835

836
        # include losses along the way
837
        string = ""
×
838
        total_loss = 1
×
839
        for loss, val in losses[vehicle][state].items():
×
840
            val = float(val)
×
841

842
            if loss == "boil-off":
×
843
                total_loss *= add_boil_off_losses(vehicle, distance, val)
×
844
                string += f"Boil-off losses: {int((total_loss - 1) * 100)}%. "
×
845

846
            elif loss == "pipeline_leak":
×
847
                total_loss *= add_pipeline_losses(distance, val)
×
848
                string += f"Pipeline losses: {int((total_loss - 1) * 100)}%. "
×
849
            else:
850
                total_loss *= add_other_losses(val)
×
851
                string += f"{loss} losses: {int(val * 100)}%. "
×
852

853
        dataset["exchanges"].append(
×
854
            {
855
                "uncertainty type": 0,
856
                "amount": 1 * total_loss,
857
                "type": "technosphere",
858
                "product": h2_ds[2],
859
                "name": h2_ds[0],
860
                "unit": h2_ds[3],
861
                "location": region,
862
            }
863
        )
864

865
        # adds losses as hydrogen emission to air
866
        dataset["exchanges"].append(
×
867
            {
868
                "uncertainty type": 0,
869
                "amount": total_loss - 1,
870
                "type": "biosphere",
871
                "name": "Hydrogen",
872
                "unit": "kilogram",
873
                "categories": ("air",),
874
                "input": (
875
                    "biosphere3",
876
                    self.biosphere_flows[
877
                        (
878
                            "Hydrogen",
879
                            "air",
880
                            "unspecified",
881
                            "kilogram",
882
                        )
883
                    ],
884
                ),
885
            }
886
        )
887

888
        if "comment" in dataset:
×
889
            dataset["comment"] += string
×
890
        else:
891
            dataset["comment"] = string
×
892

893
        if "log parameters" not in dataset:
×
894
            dataset["log parameters"] = {}
×
895
        dataset["log parameters"].update(
×
896
            {"hydrogen distribution losses": total_loss - 1}
897
        )
898

899
        return dataset
×
900

901
    def add_hydrogenation_energy(
1✔
902
        self, region: str, dataset: Dict[str, Any]
903
    ) -> Dict[str, Any]:
904
        """
905
        Adds hydrogenation and dehydrogenation activities, as well as compression at delivery,
906
        to a dataset for a given region.
907

908
        :param region: The region for which to add the activities.
909
        :param dataset: The dataset to modify.
910
        :return: The modified dataset.
911

912
        :raises ValueError: If no hydrogenation activity is found for the specified region.
913

914
        """
915

916
        try:
×
917
            # fetch the H2 hydrogenation activity
918
            hydrogenation_ds = list(
×
919
                self.find_suppliers(
920
                    name="hydrogenation of hydrogen",
921
                    ref_prod="hydrogenation",
922
                    unit="kilogram",
923
                    loc=region,
924
                ).keys()
925
            )[0]
926

927
            # fetch the H2 de-hydrogenation activity
928
            dehydrogenation_ds = list(
×
929
                self.find_suppliers(
930
                    name="dehydrogenation of hydrogen",
931
                    ref_prod="dehydrogenation",
932
                    unit="kilogram",
933
                    loc=region,
934
                ).keys()
935
            )[0]
936

937
        except ws.NoResults:
×
938
            raise ValueError(f"No hydrogenation activity found for region {region}")
×
939
        except ws.MultipleResults:
×
940
            raise ValueError(
×
941
                f"Multiple hydrogenation activities found for region {region}"
942
            )
943

944
        dataset["exchanges"].extend(
×
945
            [
946
                {
947
                    "uncertainty type": 0,
948
                    "amount": 1,
949
                    "type": "technosphere",
950
                    "product": hydrogenation_ds[2],
951
                    "name": hydrogenation_ds[0],
952
                    "unit": hydrogenation_ds[3],
953
                    "location": region,
954
                },
955
                {
956
                    "uncertainty type": 0,
957
                    "amount": 1,
958
                    "type": "technosphere",
959
                    "product": dehydrogenation_ds[2],
960
                    "name": dehydrogenation_ds[0],
961
                    "unit": dehydrogenation_ds[3],
962
                    "location": region,
963
                },
964
            ]
965
        )
966

967
        # After dehydrogenation at ambient temperature at delivery
968
        # the hydrogen needs to be compressed up to 900 bar to be dispensed
969
        # in 700 bar storage tanks
970

971
        electricity_comp = get_compression_effort(25, 900, 1000)
×
972

973
        electricity_suppliers = self.find_suppliers(
×
974
            name="market group for electricity, low voltage",
975
            ref_prod="electricity, low voltage",
976
            unit="kilowatt hour",
977
            loc=region,
978
            exclude=["period"],
979
        )
980

981
        dataset["exchanges"].extend(
×
982
            {
983
                "uncertainty type": 0,
984
                "amount": electricity_comp * share,
985
                "type": "technosphere",
986
                "product": supplier[2],
987
                "name": supplier[0],
988
                "unit": supplier[-1],
989
                "location": supplier[1],
990
            }
991
            for supplier, share in electricity_suppliers.items()
992
        )
993

994
        string = (
×
995
            " Hydrogenation and dehydrogenation of hydrogen included. "
996
            "Compression at delivery after dehydrogenation also included."
997
        )
998
        if "comment" in dataset:
×
999
            dataset["comment"] += string
×
1000
        else:
1001
            dataset["comment"] = string
×
1002

1003
        if "log parameters" not in dataset:
×
1004
            dataset["log parameters"] = {}
×
1005
        dataset["log parameters"].update(
×
1006
            {
1007
                "electricity for hydrogen compression after dehydrogenation": electricity_comp
1008
            }
1009
        )
1010

1011
        return dataset
×
1012

1013
    def add_hydrogen_regional_storage(
1✔
1014
        self, dataset: dict, region: str, config: dict
1015
    ) -> dict:
1016
        """
1017

1018
        Add a geological storage activity to the dataset for a given region.
1019

1020
        :param dataset: The dataset to modify.
1021
        :param region: The region for which to add the activity.
1022
        :param config: The configuration file for the analysis.
1023
        :return: The modified dataset.
1024

1025
        """
1026

1027
        storage_ds = list(
×
1028
            self.find_suppliers(
1029
                name=config["regional storage"]["name"],
1030
                ref_prod=config["regional storage"]["reference product"],
1031
                unit=config["regional storage"]["unit"],
1032
                loc=region,
1033
            ).keys()
1034
        )[0]
1035

1036
        dataset["exchanges"].append(
×
1037
            {
1038
                "uncertainty type": 0,
1039
                "amount": 1,
1040
                "type": "technosphere",
1041
                "product": storage_ds[2],
1042
                "name": storage_ds[0],
1043
                "unit": storage_ds[3],
1044
                "location": region,
1045
                "comment": "Geological storage (salt cavern).",
1046
            }
1047
        )
1048

1049
        string = (
×
1050
            " Geological storage is added. It includes 0.344 kWh for "
1051
            "the injection and pumping of 1 kg of H2."
1052
        )
1053
        if "comment" in dataset:
×
1054
            dataset["comment"] += string
×
1055
        else:
1056
            dataset["comment"] = string
×
1057

1058
        return dataset
×
1059

1060
    def add_hydrogen_inhibitor(self, dataset: dict, region: str) -> dict:
1✔
1061
        """
1062
        Adds hydrogen embrittlement inhibitor to the dataset for a given region.
1063

1064
        :param dataset: The dataset to modify.
1065
        :param region: The region for which to add the activity.
1066
        :return: The modified dataset.
1067
        """
1068

1069
        inhibbitor_ds = list(
×
1070
            self.find_suppliers(
1071
                name="hydrogen embrittlement inhibition",
1072
                ref_prod="hydrogen",
1073
                unit="kilogram",
1074
                loc=region,
1075
            ).keys()
1076
        )[0]
1077

1078
        dataset["exchanges"].append(
×
1079
            {
1080
                "uncertainty type": 0,
1081
                "amount": 1,
1082
                "type": "technosphere",
1083
                "product": inhibbitor_ds[2],
1084
                "name": inhibbitor_ds[0],
1085
                "unit": inhibbitor_ds[3],
1086
                "location": region,
1087
                "comment": "Injection of an inhibiting gas (oxygen) "
1088
                "to prevent embrittlement of metal. ",
1089
            }
1090
        )
1091

1092
        string = (
×
1093
            "2.46 kWh/kg H2 is needed to purify the hydrogen from the inhibiting gas. "
1094
            "The recovery rate for hydrogen after separation from the inhibitor gas is 93%. "
1095
        )
1096
        if "comment" in dataset:
×
1097
            dataset["comment"] += string
×
1098
        else:
1099
            dataset["comment"] = string
×
1100

1101
        return dataset
×
1102

1103
    def add_compression_electricity(
1✔
1104
        self, state: str, vehicle: str, distance: float, region: str, dataset: dict
1105
    ) -> dict:
1106
        """
1107
        Add the electricity needed for the compression of hydrogen.
1108

1109
        :param state: The state of the hydrogen (gaseous or liquid).
1110
        :param vehicle: The vehicle used for transport (truck or pipeline).
1111
        :param distance: The distance travelled by the vehicle.
1112
        :param region: The region for which to add the activity.
1113
        :param dataset: The dataset to modify.
1114
        :return: The modified dataset.
1115

1116
        """
1117

1118
        # if gaseous
1119
        # if transport by truck, compression from 25 bar to 500 bar for the transport
1120
        # and from 500 bar to 900 bar for dispensing in 700 bar storage tanks
1121

1122
        # if transport by pipeline, initial compression from 25 bar to 100 bar
1123
        # and 0.6 kWh re-compression every 250 km
1124
        # and finally from 100 bar to 900 bar for dispensing in 700 bar storage tanks
1125

1126
        # if liquid
1127
        # liquefaction electricity need
1128
        # currently, 12 kWh/kg H2
1129
        # midterm, 8 kWh/ kg H2
1130
        # by 2050, 6 kWh/kg H2
1131

1132
        if state == "gaseous":
×
1133
            if vehicle == "truck":
×
1134
                electricity_comp = get_compression_effort(25, 500, 1000)
×
1135
                electricity_comp += get_compression_effort(500, 900, 1000)
×
1136
            else:
1137
                electricity_comp = get_compression_effort(25, 100, 1000) + (
×
1138
                    0.6 * distance / 250
1139
                )
1140
                electricity_comp += get_compression_effort(100, 900, 1000)
×
1141

1142
            string = (
×
1143
                f" {electricity_comp} kWh is added to compress from 25 bar 100 bar (if pipeline)"
1144
                f"or 500 bar (if truck), and then to 900 bar to dispense in storage tanks at 700 bar. "
1145
                " Additionally, if transported by pipeline, there is re-compression (0.6 kWh) every 250 km."
1146
            )
1147

1148
        else:
1149
            electricity_comp = np.clip(
×
1150
                np.interp(
1151
                    self.year,
1152
                    [2020, 2035, 2050],
1153
                    [12, 8, 6],
1154
                ),
1155
                12,
1156
                6,
1157
            )
1158

1159
            string = f" {electricity_comp} kWh is added to liquefy the hydrogen. "
×
1160

1161
        suppliers = self.find_suppliers(
×
1162
            name="market group for electricity, low voltage",
1163
            ref_prod="electricity, low voltage",
1164
            unit="kilowatt hour",
1165
            loc=region,
1166
            exclude=["period"],
1167
        )
1168

1169
        new_exc = []
×
1170
        for supplier, share in suppliers.items():
×
1171
            new_exc.append(
×
1172
                {
1173
                    "uncertainty type": 0,
1174
                    "amount": electricity_comp * share,
1175
                    "type": "technosphere",
1176
                    "product": supplier[2],
1177
                    "name": supplier[0],
1178
                    "unit": supplier[-1],
1179
                    "location": supplier[1],
1180
                }
1181
            )
1182

1183
        dataset["exchanges"].extend(new_exc)
×
1184

1185
        if "comment" in dataset:
×
1186
            dataset["comment"] += string
×
1187
        else:
1188
            dataset["comment"] = string
×
1189

1190
        if "log parameters" not in dataset:
×
1191
            dataset["log parameters"] = {}
×
1192
        dataset["log parameters"].update(
×
1193
            {"electricity for hydrogen compression": electricity_comp}
1194
        )
1195

1196
        return dataset
×
1197

1198
    @lru_cache(maxsize=None)
1✔
1199
    def add_h2_fuelling_station(self, region: str) -> dict:
1✔
1200
        """
1201
        Add the hydrogen fuelling station.
1202

1203
        :param dataset: The dataset to modify.
1204
        :param region: The region for which to add the activity.
1205
        :return: The modified dataset.
1206

1207
        """
1208

1209
        ds_h2_station = list(
×
1210
            self.find_suppliers(
1211
                name="hydrogen refuelling station",
1212
                ref_prod="hydrogen",
1213
                unit="unit",
1214
                loc=region,
1215
            ).keys()
1216
        )[0]
1217

1218
        return {
×
1219
            "uncertainty type": 0,
1220
            "amount": 1 / (600 * 365 * 40),  # 1 over lifetime: 40 years, 600 kg H2/day
1221
            "type": "technosphere",
1222
            "product": ds_h2_station[2],
1223
            "name": ds_h2_station[0],
1224
            "unit": ds_h2_station[3],
1225
            "location": region,
1226
        }
1227

1228
    def add_pre_cooling_electricity(self, dataset: dict, region: str) -> dict:
1✔
1229
        """
1230
        Add the electricity needed for pre-cooling the hydrogen.
1231

1232
        :param dataset: The dataset to modify.
1233
        :param region: The region for which to add the activity.
1234
        :return: The modified dataset.
1235
        """
1236

1237
        # finally, add pre-cooling
1238
        # is needed before filling vehicle tanks
1239
        # as the hydrogen is pumped, the ambient temperature
1240
        # vaporizes the gas, and because of the Thomson-Joule effect,
1241
        # the gas temperature increases.
1242
        # Hence, a refrigerant is needed to keep the H2 as low as
1243
        # -30 C during pumping.
1244

1245
        # https://www.osti.gov/servlets/purl/1422579 gives us a formula
1246
        # to estimate pre-cooling electricity need
1247
        # it requires a capacity utilization for the fuelling station
1248
        # as well as an ambient temperature
1249
        # we will use a temp of 25 C
1250
        # and a capacity utilization going from 10 kg H2/day in 2020
1251
        # to 150 kg H2/day in 2050
1252

1253
        t_amb = 25
×
1254
        cap_util = np.interp(self.year, [2020, 2050, 2100], [10, 150, 150])
×
1255
        el_pre_cooling = get_pre_cooling_energy(t_amb, cap_util)
×
1256

1257
        suppliers = self.find_suppliers(
×
1258
            name="market group for electricity, low voltage",
1259
            ref_prod="electricity, low voltage",
1260
            unit="kilowatt hour",
1261
            loc=region,
1262
            exclude=["period"],
1263
        )
1264

1265
        for supplier, share in suppliers.items():
×
1266
            dataset["exchanges"].append(
×
1267
                {
1268
                    "uncertainty type": 0,
1269
                    "amount": el_pre_cooling * share,
1270
                    "type": "technosphere",
1271
                    "product": supplier[2],
1272
                    "name": supplier[0],
1273
                    "unit": supplier[-1],
1274
                    "location": supplier[1],
1275
                }
1276
            )
1277

1278
        string = (
×
1279
            f"Pre-cooling electricity is considered ({el_pre_cooling}), "
1280
            f"assuming an ambiant temperature of {t_amb}C "
1281
            f"and a capacity utilization for the fuel station of {cap_util} kg/day."
1282
        )
1283
        if "comment" in dataset:
×
1284
            dataset["comment"] += string
×
1285
        else:
1286
            dataset["comment"] = string
×
1287

1288
        if "log parameters" not in dataset:
×
1289
            dataset["log parameters"] = {}
×
1290
        dataset["log parameters"].update(
×
1291
            {"electricity for hydrogen pre-cooling": el_pre_cooling}
1292
        )
1293

1294
        return dataset
×
1295

1296
    def generate_biogas_activities(self):
1✔
1297
        """
1298
        Generate biogas activities.
1299
        """
1300

1301
        fuel_activities = fetch_mapping(METHANE_SOURCES)
×
1302

1303
        for fuel, activities in fuel_activities.items():
×
1304
            for activity in activities:
×
1305
                if fuel == "methane, synthetic":
×
1306
                    original_ds = self.fetch_proxies(
×
1307
                        name=activity,
1308
                        ref_prod=" ",
1309
                        delete_original_dataset=False,
1310
                        empty_original_activity=False,
1311
                    )
1312

1313
                    for co2_type in [
×
1314
                        (
1315
                            "carbon dioxide, captured from atmosphere, with a sorbent-based direct air capture system, 100ktCO2, with waste heat, and grid electricity",
1316
                            "carbon dioxide, captured from atmosphere",
1317
                            "waste heat",
1318
                        ),
1319
                        (
1320
                            "carbon dioxide, captured from atmosphere, with a solvent-based direct air capture system, 1MtCO2, with heat pump heat, and grid electricity",
1321
                            "carbon dioxide, captured from atmosphere",
1322
                            "heat pump heat",
1323
                        ),
1324
                    ]:
1325
                        new_ds = copy.deepcopy(original_ds)
×
1326

1327
                        for region, dataset in new_ds.items():
×
1328
                            dataset["code"] = str(uuid.uuid4().hex)
×
1329
                            dataset["name"] += f", using {co2_type[2]}"
×
1330
                            for prod in ws.production(dataset):
×
1331
                                prod["name"] = dataset["name"]
×
1332

1333
                                if "input" in prod:
×
1334
                                    del prod["input"]
×
1335

1336
                            for exc in ws.technosphere(dataset):
×
1337
                                if (
×
1338
                                    "carbon dioxide, captured from atmosphere"
1339
                                    in exc["name"].lower()
1340
                                ):
1341
                                    # store amount
1342
                                    co2_amount = exc["amount"]
×
1343

1344
                                    try:
×
1345
                                        # add new exchanges
1346
                                        dac_suppliers = self.find_suppliers(
×
1347
                                            name=co2_type[0],
1348
                                            ref_prod=co2_type[1],
1349
                                            unit="kilogram",
1350
                                            loc=region,
1351
                                        )
1352
                                    except IndexError:
×
1353
                                        dac_suppliers = None
×
1354

1355
                                    if dac_suppliers:
×
1356
                                        # remove exchange
1357
                                        dataset["exchanges"].remove(exc)
×
1358

1359
                                        dataset["exchanges"].extend(
×
1360
                                            {
1361
                                                "uncertainty type": 0,
1362
                                                "amount": co2_amount * share,
1363
                                                "type": "technosphere",
1364
                                                "product": supplier[2],
1365
                                                "name": supplier[0],
1366
                                                "unit": supplier[-1],
1367
                                                "location": supplier[1],
1368
                                            }
1369
                                            for supplier, share in dac_suppliers.items()
1370
                                        )
1371

1372
                            for exc in ws.technosphere(dataset):
×
1373
                                if (
×
1374
                                    "methane, from electrochemical methanation"
1375
                                    in exc["name"]
1376
                                ):
1377
                                    exc["name"] += f", using {co2_type[2]}"
×
1378
                                    exc["location"] = dataset["location"]
×
1379

1380
                                    dataset["name"] = dataset["name"].replace(
×
1381
                                        "from electrochemical methanation",
1382
                                        f"from electrochemical methanation "
1383
                                        f"(H2 from electrolysis, CO2 from DAC "
1384
                                        f"using {co2_type[2]})",
1385
                                    )
1386

1387
                                    for prod in ws.production(dataset):
×
1388
                                        prod["name"] = prod["name"].replace(
×
1389
                                            "from electrochemical methanation",
1390
                                            f"from electrochemical methanation "
1391
                                            f"(H2 from electrolysis, CO2 from DAC "
1392
                                            f"using {co2_type[2]})",
1393
                                        )
1394

1395
                        self.database.extend(new_ds.values())
×
1396

1397
                        # add to log
1398
                        for new_dataset in list(new_ds.values()):
×
1399
                            self.write_log(new_dataset)
×
1400

1401
                            # add it to list of created datasets
1402
                            self.modified_datasets[
×
1403
                                (self.model, self.scenario, self.year)
1404
                            ]["created"].append(
1405
                                (
1406
                                    new_dataset["name"],
1407
                                    new_dataset["reference product"],
1408
                                    new_dataset["location"],
1409
                                    new_dataset["unit"],
1410
                                )
1411
                            )
1412
                else:
1413
                    original_ds = self.fetch_proxies(name=activity, ref_prod=" ")
×
1414
                    new_ds = copy.deepcopy(original_ds)
×
1415

1416
                    for region, dataset in new_ds.items():
×
1417
                        dataset["code"] = str(uuid.uuid4().hex)
×
1418
                        for exc in ws.production(dataset):
×
1419
                            if "input" in exc:
×
1420
                                exc.pop("input")
×
1421

1422
                        new_ds[region] = self.relink_technosphere_exchanges(
×
1423
                            dataset,
1424
                        )
1425

1426
                    self.database.extend(new_ds.values())
×
1427

1428
                    # add to log
1429
                    for new_dataset in list(new_ds.values()):
×
1430
                        self.write_log(new_dataset)
×
1431
                        # add it to list of created datasets
1432
                        self.modified_datasets[(self.model, self.scenario, self.year)][
×
1433
                            "created"
1434
                        ].append(
1435
                            (
1436
                                new_dataset["name"],
1437
                                new_dataset["reference product"],
1438
                                new_dataset["location"],
1439
                                new_dataset["unit"],
1440
                            )
1441
                        )
1442

1443
    def generate_synthetic_fuel_activities(self):
1✔
1444
        """
1445
        Generate synthetic fuel activities.
1446
        """
1447

1448
        fuel_activities = fetch_mapping(LIQUID_FUEL_SOURCES)
×
1449

1450
        for activities in fuel_activities.values():
×
1451
            for activity in activities:
×
1452
                new_ds = self.fetch_proxies(name=activity, ref_prod=" ")
×
1453
                for region, dataset in new_ds.items():
×
1454
                    for exc in ws.production(dataset):
×
1455
                        if "input" in exc:
×
1456
                            del exc["input"]
×
1457

1458
                    for exc in ws.technosphere(dataset):
×
1459
                        if "carbon dioxide, captured from atmosphere" in exc["name"]:
×
1460
                            # store amount
1461
                            co2_amount = exc["amount"]
×
1462

1463
                            try:
×
1464
                                # add new exchanges
1465
                                dac_suppliers = self.find_suppliers(
×
1466
                                    name="carbon dioxide, captured from atmosphere, with a solvent-based direct air capture "
1467
                                    "system, 1MtCO2, with heat pump heat, and grid electricity",
1468
                                    ref_prod="carbon dioxide, captured from atmosphere",
1469
                                    unit="kilogram",
1470
                                    loc=region,
1471
                                )
1472
                            except IndexError:
×
1473
                                dac_suppliers = None
×
1474

1475
                            if dac_suppliers:
×
1476
                                # remove exchange
1477
                                dataset["exchanges"].remove(exc)
×
1478

1479
                                dataset["exchanges"].extend(
×
1480
                                    {
1481
                                        "uncertainty type": 0,
1482
                                        "amount": co2_amount * share,
1483
                                        "type": "technosphere",
1484
                                        "product": supplier[2],
1485
                                        "name": supplier[0],
1486
                                        "unit": supplier[-1],
1487
                                        "location": supplier[1],
1488
                                    }
1489
                                    for supplier, share in dac_suppliers.items()
1490
                                )
1491

1492
                    dataset = self.relink_technosphere_exchanges(
×
1493
                        dataset,
1494
                    )
1495

1496
                    self.database.append(dataset)
×
1497

1498
                    # add to log
1499
                    self.write_log(dataset)
×
1500
                    # add it to list of created datasets
1501
                    self.modified_datasets[(self.model, self.scenario, self.year)][
×
1502
                        "created"
1503
                    ].append(
1504
                        (
1505
                            dataset["name"],
1506
                            dataset["reference product"],
1507
                            dataset["location"],
1508
                            dataset["unit"],
1509
                        )
1510
                    )
1511

1512
    def adjust_land_use(self, dataset: dict, region: str, crop_type: str) -> dict:
1✔
1513
        """
1514
        Adjust land use.
1515

1516
        :param dataset: dataset to adjust
1517
        :param region: region of the dataset
1518
        :param crop_type: crop type of the dataset
1519
        :return: adjusted dataset
1520

1521
        """
1522
        string = ""
×
1523
        land_use = 0
×
1524

1525
        for exc in dataset["exchanges"]:
×
1526
            # we adjust the land use
1527
            if exc["type"] == "biosphere" and exc["name"].startswith("Occupation"):
×
1528
                if "LHV [MJ/kg as received]" in dataset:
×
1529
                    lower_heating_value = dataset["LHV [MJ/kg as received]"]
×
1530
                else:
1531
                    lower_heating_value = dataset.get("LHV [MJ/kg dry]", 0)
×
1532

1533
                # Ha/GJ
1534
                land_use = (
×
1535
                    self.iam_data.land_use.sel(region=region, variables=crop_type)
1536
                    .interp(year=self.year)
1537
                    .values
1538
                )
1539

1540
                # replace NA values with 0
1541
                if np.isnan(land_use):
×
1542
                    land_use = 0
×
1543

1544
                if land_use > 0:
×
1545
                    # HA to m2
1546
                    land_use *= 10000
×
1547
                    # m2/GJ to m2/MJ
1548
                    land_use /= 1000
×
1549
                    # m2/kg, as received
1550
                    land_use *= lower_heating_value
×
1551
                    # update exchange value
1552
                    exc["amount"] = float(land_use)
×
1553

1554
                    string = (
×
1555
                        f"The land area occupied has been modified to {land_use}, "
1556
                        f"to be in line with the scenario {self.scenario} of {self.model.upper()} "
1557
                        f"in {self.year} in the region {region}. "
1558
                    )
1559

1560
        if string and land_use:
×
1561
            if "comment" in dataset:
×
1562
                dataset["comment"] += string
×
1563
            else:
1564
                dataset["comment"] = string
×
1565

1566
            if "log parameters" not in dataset:
×
1567
                dataset["log parameters"] = {}
×
1568
            dataset["log parameters"].update(
×
1569
                {
1570
                    "land footprint": land_use,
1571
                }
1572
            )
1573

1574
        return dataset
×
1575

1576
    def adjust_land_use_change_emissions(
1✔
1577
        self,
1578
        dataset: dict,
1579
        region: str,
1580
        crop_type: str,
1581
    ) -> dict:
1582
        """
1583
        Adjust land use change emissions to crop farming dataset
1584
        if the variable is provided by the IAM.
1585

1586
        :param dataset: dataset to adjust
1587
        :param region: region of the dataset
1588
        :param crop_type: crop type of the dataset
1589
        :return: adjusted dataset
1590

1591
        """
1592

1593
        # then, we should include the Land Use Change-induced CO2 emissions
1594
        # those are given in kg CO2-eq./GJ of primary crop energy
1595

1596
        # kg CO2/GJ
1597
        land_use_co2 = (
×
1598
            self.iam_data.land_use_change.sel(region=region, variables=crop_type)
1599
            .interp(year=self.year)
1600
            .values
1601
        )
1602

1603
        # replace NA values with 0
1604
        if np.isnan(land_use_co2):
×
1605
            land_use_co2 = 0
×
1606

1607
        if land_use_co2 > 0:
×
1608
            # lower heating value, as received
1609
            if "LHV [MJ/kg as received]" in dataset:
×
1610
                lower_heating_value = dataset["LHV [MJ/kg as received]"]
×
1611
            else:
1612
                lower_heating_value = dataset.get("LHV [MJ/kg dry]", 0)
×
1613

1614
            # kg CO2/MJ
1615
            land_use_co2 /= 1000
×
1616
            land_use_co2 *= lower_heating_value
×
1617

1618
            land_use_co2_exc = {
×
1619
                "uncertainty type": 0,
1620
                "loc": float(land_use_co2),
1621
                "amount": float(land_use_co2),
1622
                "type": "biosphere",
1623
                "name": "Carbon dioxide, from soil or biomass stock",
1624
                "unit": "kilogram",
1625
                "input": (
1626
                    "biosphere3",
1627
                    self.biosphere_flows[
1628
                        (
1629
                            "Carbon dioxide, from soil or biomass stock",
1630
                            "air",
1631
                            "non-urban air or from high stacks",
1632
                            "kilogram",
1633
                        )
1634
                    ],
1635
                ),
1636
                "categories": (
1637
                    "air",
1638
                    "non-urban air or from high stacks",
1639
                ),
1640
            }
1641
            dataset["exchanges"].append(land_use_co2_exc)
×
1642

1643
            string = (
×
1644
                f"{land_use_co2} kg of land use-induced CO2 has been added by premise, "
1645
                f"to be in line with the scenario {self.scenario} of {self.model.upper()} "
1646
                f"in {self.year} in the region {region}."
1647
            )
1648

1649
            if "comment" in dataset:
×
1650
                dataset["comment"] += string
×
1651
            else:
1652
                dataset["comment"] = string
×
1653

1654
            if "log parameters" not in dataset:
×
1655
                dataset["log parameters"] = {}
×
1656
            dataset["log parameters"].update(
×
1657
                {
1658
                    "land use CO2": land_use_co2,
1659
                }
1660
            )
1661

1662
        return dataset
×
1663

1664
    def adjust_biomass_conversion_efficiency(
1✔
1665
        self, dataset: dict, region: str, crop_type: str
1666
    ) -> dict:
1667
        """
1668
        Adjust biomass conversion efficiency.
1669

1670
        :param dataset: dataset to adjust
1671
        :param region: region of the dataset
1672
        :param crop_type: crop type of the dataset
1673
        :return: adjusted dataset
1674

1675
        """
1676

1677
        # Find variables with crop type and biofuel type keywords
1678
        crop_var = [
×
1679
            v
1680
            for v in self.iam_fuel_markets.variables.values.tolist()
1681
            if crop_type.lower() in v.lower()
1682
            and any(x.lower() in v.lower() for x in ["bioethanol", "biodiesel"])
1683
        ]
1684

1685
        if len(crop_var) == 0:
×
1686
            return dataset
×
1687
        else:
1688
            crop_var = crop_var[0]
×
1689

1690
        if crop_var in self.fuel_efficiencies.variables.values:
×
1691
            # Find scaling factor compared to 2020
1692
            scaling_factor = 1 / self.find_iam_efficiency_change(
×
1693
                data=self.fuel_efficiencies, variable=crop_var, location=region
1694
            )
1695

1696
            if "log parameters" not in dataset:
×
1697
                dataset["log parameters"] = {}
×
1698

1699
            if scaling_factor != 1:
×
1700
                # Rescale the biomass input according
1701
                # to the IAM efficiency values
1702
                for e in dataset["exchanges"]:
×
1703
                    if any(x in e["name"] for x in ["Farming", "Supply"]):
×
1704
                        dataset["log parameters"].update(
×
1705
                            {
1706
                                "initial biomass per kg biofuel": e["amount"],
1707
                                "final biomass per kg biofuel": e["amount"]
1708
                                * scaling_factor,
1709
                            }
1710
                        )
1711

1712
                        e["amount"] *= scaling_factor
×
1713

1714
                biomass_inputs = [
×
1715
                    ws.equals("unit", "kilogram"),
1716
                    ws.either(
1717
                        ws.contains("name", "Farming"), ws.contains("name", "Supply")
1718
                    ),
1719
                ]
1720
                wurst.change_exchanges_by_constant_factor(
×
1721
                    dataset, scaling_factor, biomass_inputs
1722
                )
1723

1724
                # Update dataset comment field
1725
                comment = f"The biomass input has been rescaled by premise by {(scaling_factor - 1) * 100:.0f}%.\n"
×
1726
                comment += f"To be in line with the scenario {self.scenario} of {self.model.upper()} in {self.year} in the region {region}.\n"
×
1727
                if "ethanol" in dataset["name"].lower():
×
1728
                    comment += "Bioethanol has a combustion CO2 emission factor of 1.91 kg CO2/kg."
×
1729
                if "biodiesel" in dataset["name"].lower():
×
1730
                    comment += "Biodiesel has a combustion CO2 emission factor of 2.85 kg CO2/kg."
×
1731
                dataset["comment"] = dataset.get("comment", "") + comment
×
1732

1733
        return dataset
×
1734

1735
    def get_production_label(self, crop_type: str) -> [str, None]:
1✔
1736
        """
1737
        Get the production label for the dataset.
1738
        """
1739
        try:
×
1740
            return [
×
1741
                i
1742
                for i in self.iam_fuel_markets.coords["variables"].values.tolist()
1743
                if crop_type.lower() in i.lower()
1744
            ][0]
1745
        except IndexError:
×
1746
            return None
×
1747

1748
    def should_adjust_land_use(self, dataset: dict, crop_type: str) -> bool:
1✔
1749
        """
1750
        Check if the dataset should be adjusted for land use.
1751
        """
1752

1753
        if self.iam_data.land_use is None:
×
1754
            return False
×
1755
        return (
×
1756
            any(i in dataset["name"].lower() for i in ("farming and supply",))
1757
            and crop_type.lower() in self.iam_data.land_use.variables.values
1758
            and not any(
1759
                i in dataset["name"].lower() for i in ["straw", "residue", "stover"]
1760
            )
1761
        )
1762

1763
    def should_adjust_land_use_change_emissions(
1✔
1764
        self, dataset: dict, crop_type: str
1765
    ) -> bool:
1766
        """
1767
        Check if the dataset should be adjusted for land use change emissions.
1768
        """
1769
        if self.iam_data.land_use_change is None:
×
1770
            return False
×
1771
        return (
×
1772
            any(i in dataset["name"].lower() for i in ("farming and supply",))
1773
            and crop_type.lower() in self.iam_data.land_use_change.variables.values
1774
            and not any(
1775
                i in dataset["name"].lower() for i in ["straw", "residue", "stover"]
1776
            )
1777
        )
1778

1779
    def generate_biofuel_activities(self):
1✔
1780
        """
1781
        Create region-specific biofuel datasets.
1782
        Update the conversion efficiency.
1783
        :return:
1784
        """
1785

1786
        # Map regions to their respective climate types
1787
        region_to_climate = fetch_mapping(REGION_CLIMATE_MAP)[self.model]
×
1788

1789
        # Map climate types to their respective crop types and crops
1790
        crop_types = list(self.crops_props.keys())
×
1791
        climates = set(region_to_climate.values())
×
1792
        climate_to_crop_type = {
×
1793
            clim: {
1794
                crop_type: self.crops_props[crop_type]["crop_type"][self.model][clim]
1795
                for crop_type in crop_types
1796
            }
1797
            for clim in climates
1798
        }
1799

1800
        biofuel_activities = fetch_mapping(BIOFUEL_SOURCES)
×
1801

1802
        # List to store processed crops
1803
        processed_crops = []
×
1804

1805
        for climate in ["tropical", "temperate"]:
×
1806
            regions = [k for k, v in region_to_climate.items() if v == climate]
×
1807
            for crop_type in climate_to_crop_type[climate]:
×
1808
                specific_crop = climate_to_crop_type[climate][crop_type]
×
1809
                if specific_crop in processed_crops:
×
1810
                    continue
×
1811
                processed_crops.append(specific_crop)
×
1812

1813
                # Skip processing corn if it's already been processed
1814
                if specific_crop == "corn":
×
1815
                    regions = list(region_to_climate.keys())
×
1816

1817
                # Get list of activities for the crop and biofuel type
1818
                activities = biofuel_activities[crop_type][specific_crop]
×
1819

1820
                for activity in activities:
×
1821
                    # Fetch dataset for activity and regions
1822
                    new_datasets = self.fetch_proxies(
×
1823
                        name=activity,
1824
                        ref_prod=" ",
1825
                        production_variable=self.get_production_label(
1826
                            crop_type=crop_type
1827
                        ),
1828
                        regions=regions,
1829
                    )
1830

1831
                    # Adjust efficiency for fuel production activities
1832
                    new_datasets = {
×
1833
                        region: self.adjust_biomass_conversion_efficiency(
1834
                            dataset=ds,
1835
                            region=region,
1836
                            crop_type=crop_type,
1837
                        )
1838
                        if is_fuel_production(ds["name"])
1839
                        else ds
1840
                        for region, ds in new_datasets.items()
1841
                    }
1842

1843
                    # Adjust land use for farming activities
1844
                    new_datasets = {
×
1845
                        region: self.adjust_land_use(ds, region, crop_type)
1846
                        if self.should_adjust_land_use(ds, crop_type)
1847
                        else ds
1848
                        for region, ds in new_datasets.items()
1849
                    }
1850

1851
                    # Adjust land use change emissions for farming activities
1852
                    new_datasets = {
×
1853
                        region: self.adjust_land_use_change_emissions(
1854
                            ds, region, crop_type
1855
                        )
1856
                        if self.should_adjust_land_use_change_emissions(ds, crop_type)
1857
                        else ds
1858
                        for region, ds in new_datasets.items()
1859
                    }
1860

1861
                    self.database.extend(new_datasets.values())
×
1862

1863
                    # add to log
1864
                    for dataset in list(new_datasets.values()):
×
1865
                        self.write_log(dataset)
×
1866

1867
                        # add it to list of created datasets
1868
                        self.modified_datasets[(self.model, self.scenario, self.year)][
×
1869
                            "created"
1870
                        ].append(
1871
                            (
1872
                                dataset["name"],
1873
                                dataset["reference product"],
1874
                                dataset["location"],
1875
                                dataset["unit"],
1876
                            )
1877
                        )
1878

1879
    def get_fuel_mapping(self) -> dict:
1✔
1880
        """
1881
        Define filter functions that decide which wurst datasets to modify.
1882
        :return: dictionary that contains filters and functions
1883
        :rtype: dict
1884
        """
1885

1886
        return {
×
1887
            fuel: {
1888
                "find_share": self.fetch_fuel_share,
1889
                "fuel filters": self.fuel_map[fuel],
1890
            }
1891
            for fuel in self.iam_fuel_markets.variables.values
1892
        }
1893

1894
    @lru_cache(maxsize=None)
1✔
1895
    def fetch_fuel_share(
1✔
1896
        self, fuel: str, relevant_fuel_types: Tuple[str], region: str, period: int
1897
    ) -> float:
1898
        """
1899
        Return the percentage of a specific fuel type in the fuel mix for a specific region.
1900
        :param fuel: the name of the fuel to fetch the percentage for
1901
        :param relevant_fuel_types: a list of relevant fuel types to include in the calculation
1902
        :param region: the IAM region to fetch the data for
1903
        :param period: the period to fetch the data for
1904
        :return: the percentage of the specified fuel type in the fuel mix for the region
1905
        """
1906

1907
        relevant_variables = [
×
1908
            v
1909
            for v in self.iam_fuel_markets.variables.values
1910
            if any(x.lower() in v.lower() for x in relevant_fuel_types)
1911
        ]
1912

1913
        fuel_share = (
×
1914
            (
1915
                self.iam_fuel_markets.sel(region=region, variables=fuel)
1916
                / self.iam_fuel_markets.sel(
1917
                    region=region, variables=relevant_variables
1918
                ).sum(dim="variables")
1919
            )
1920
            .interp(
1921
                year=np.arange(self.year, self.year + period + 1),
1922
                kwargs={"fill_value": "extrapolate"},
1923
            )
1924
            .mean(dim="year")
1925
            .values
1926
        )
1927

1928
        if np.isnan(fuel_share):
×
1929
            print(f"Incorrect fuel share for {fuel} in {region}")
×
1930
            fuel_share = 0
×
1931

1932
        return float(fuel_share)
×
1933

1934
    def relink_activities_to_new_markets(self):
1✔
1935
        """
1936
        Links fuel input exchanges to new datasets
1937
        with the appropriate IAM location.
1938

1939
        Does not return anything.
1940
        """
1941

1942
        # Create set of activities that consume fuels
1943
        created_markets = list(set(x[0] for x in self.new_fuel_markets))
×
1944

1945
        # Get fuel markets and amounts
1946
        fuel_markets = fetch_mapping(FUEL_MARKETS)
×
1947

1948
        list_items_to_ignore = [
×
1949
            "blending",
1950
            "market group",
1951
            "lubricating oil production",
1952
            "petrol production",
1953
        ]
1954

1955
        old_fuel_inputs = ["market for " + x for x in list(fuel_markets.keys())] + [
×
1956
            "market for petrol, unleaded",
1957
            "market for diesel",
1958
        ]
1959
        old_fuel_inputs.extend(
×
1960
            ["market group for " + x for x in list(fuel_markets.keys())]
1961
        )
1962
        old_fuel_inputs.extend(
×
1963
            [
1964
                "market group for petrol, unleaded",
1965
                "market group for diesel",
1966
            ]
1967
        )
1968

1969
        # Iterate over datasets and update exchanges as necessary
1970
        for dataset in ws.get_many(
×
1971
            self.database,
1972
            ws.exclude(ws.either(*[ws.contains("name", x) for x in created_markets])),
1973
        ):
1974
            # Check that a fuel input exchange is present
1975
            # in the list of inputs
1976
            # Check also for "market group for" inputs
1977
            exchanges = list(
×
1978
                ws.technosphere(
1979
                    dataset,
1980
                    ws.either(*[ws.contains("name", x) for x in old_fuel_inputs]),
1981
                )
1982
            )
1983

1984
            if exchanges:
×
1985
                supplier_loc = (
×
1986
                    dataset["location"]
1987
                    if dataset["location"] in self.regions
1988
                    else self.geo.ecoinvent_to_iam_location(dataset["location"])
1989
                )
1990

1991
                amount_non_fossil_co2 = sum(
×
1992
                    a["amount"]
1993
                    * self.new_fuel_markets.get((a["name"], supplier_loc), {}).get(
1994
                        "non-fossil CO2", 0
1995
                    )
1996
                    for a in exchanges
1997
                )
1998

1999
                if amount_non_fossil_co2 > 0 and not any(
×
2000
                    x in dataset["name"].lower() for x in list_items_to_ignore
2001
                ):
2002
                    update_co2_emissions(
×
2003
                        dataset, amount_non_fossil_co2, self.biosphere_flows
2004
                    )
2005
                    self.write_log(dataset, status="updated")
×
2006

2007
    def generate_fuel_supply_chains(self):
1✔
2008
        """Duplicate fuel chains and make them IAM region-specific"""
2009

2010
        # hydrogen
2011
        # print("Generate region-specific hydrogen production pathways.")
2012
        self.generate_hydrogen_activities()
×
2013

2014
        # biogas
2015
        # print("Generate region-specific biogas and syngas supply chains.")
2016
        self.generate_biogas_activities()
×
2017

2018
        # synthetic fuels
2019
        # print("Generate region-specific synthetic fuel supply chains.")
2020
        self.generate_synthetic_fuel_activities()
×
2021

2022
        # biofuels
2023
        # print("Generate region-specific biofuel supply chains.")
2024
        self.generate_biofuel_activities()
×
2025

2026
    def generate_world_fuel_market(
1✔
2027
        self, dataset: dict, d_act: dict, prod_vars: list, period: int
2028
    ) -> dict:
2029
        """
2030
        Generate the world fuel market for a given dataset and product variables.
2031

2032
        :param dataset: The dataset for which to generate the world fuel market.
2033
        :param d_act: A dictionary of activity datasets, keyed by region.
2034
        :param prod_vars: A list of product variables.
2035
        :return: A tuple containing the final LHV, fossil CO2, and biogenic CO2 emissions for the world fuel market,
2036

2037

2038
        This function generates the world fuel market exchanges for a given dataset and set of product variables.
2039
        It first filters out non-production exchanges from the dataset, and then calculates the total production
2040
        volume for the world using the given product variables. For each region, it calculates the share of the
2041
        production volume and adds a technosphere exchange to the dataset with the appropriate share. It also
2042
        calculates the total LHV, fossil CO2, and biogenic CO2 emissions for each region. Finally, it returns a
2043
        tuple with the final LHV, fossil CO2, and biogenic CO2 emissions for the world fuel market, as well as the
2044
        updated dataset with the world fuel market exchanges.
2045

2046
        """
2047

2048
        if period != 0:
×
2049
            # this dataset is for a period of time
2050
            dataset["name"] += f", {period}-year period"
×
2051
            dataset["comment"] += (
×
2052
                f" Average fuel mix over a {period}"
2053
                f"-year period {self.year}-{self.year + period}."
2054
            )
2055
            for exc in ws.production(dataset):
×
2056
                exc["name"] += f", {period}-year period"
×
2057

2058
        # Filter out non-production exchanges
2059
        dataset["exchanges"] = [
×
2060
            e for e in dataset["exchanges"] if e["type"] == "production"
2061
        ]
2062

2063
        final_lhv, final_fossil_co2, final_biogenic_co2 = 0, 0, 0
×
2064

2065
        # Calculate share of production volume for each region
2066
        for r in d_act.keys():
×
2067
            if r == "World":
×
2068
                continue
×
2069

2070
            share = (
×
2071
                (
2072
                    self.iam_fuel_markets.sel(region=r, variables=prod_vars).sum(
2073
                        dim="variables"
2074
                    )
2075
                    / self.iam_fuel_markets.sel(
2076
                        variables=prod_vars,
2077
                        region=[
2078
                            x
2079
                            for x in self.iam_fuel_markets.region.values
2080
                            if x != "World"
2081
                        ],
2082
                    ).sum(dim=["variables", "region"])
2083
                )
2084
                .interp(
2085
                    year=np.arange(self.year, self.year + period + 1),
2086
                    kwargs={"fill_value": "extrapolate"},
2087
                )
2088
                .mean(dim="year")
2089
                .values
2090
            )
2091

2092
            if np.isnan(share):
×
2093
                print("Incorrect market share for", dataset["name"], "in", r)
×
2094

2095
            # Calculate total LHV, fossil CO2, and biogenic CO2 for the region
2096
            fuel_market_key = (dataset["name"], r)
×
2097

2098
            # if key absent from self.new_fuel_markets, then it does not exist
2099

2100
            if fuel_market_key in self.new_fuel_markets and share > 0:
×
2101
                # Add exchange for the region
2102
                exchange = {
×
2103
                    "uncertainty type": 0,
2104
                    "amount": share,
2105
                    "type": "technosphere",
2106
                    "product": dataset["reference product"],
2107
                    "name": dataset["name"],
2108
                    "unit": dataset["unit"],
2109
                    "location": r,
2110
                }
2111
                dataset["exchanges"].append(exchange)
×
2112

2113
                lhv = self.new_fuel_markets[fuel_market_key]["LHV"]
×
2114
                co2_factor = self.new_fuel_markets[fuel_market_key]["fossil CO2"]
×
2115
                biogenic_co2_factor = self.new_fuel_markets[fuel_market_key][
×
2116
                    "non-fossil CO2"
2117
                ]
2118
                final_lhv += share * lhv
×
2119
                final_fossil_co2 += share * co2_factor
×
2120
                final_biogenic_co2 += share * biogenic_co2_factor
×
2121

2122
                dataset["log parameters"] = {}
×
2123
                dataset["log parameters"]["fossil CO2 per kg fuel"] = final_fossil_co2
×
2124
                dataset["log parameters"][
×
2125
                    "non-fossil CO2 per kg fuel"
2126
                ] = final_biogenic_co2
2127
                dataset["log parameters"]["lower heating value"] = final_lhv
×
2128

2129
        return dataset
×
2130

2131
    def generate_regional_fuel_market(
1✔
2132
        self,
2133
        dataset: dict,
2134
        fuel_providers: dict,
2135
        prod_vars: list,
2136
        vars_map: dict,
2137
        fuel_category: str,
2138
        region: str,
2139
        activity: dict,
2140
        period: int,
2141
    ) -> dict:
2142
        """
2143
        Generate regional fuel market for a given dataset and fuel providers.
2144

2145
        :param dataset: The dataset for which to generate the regional fuel market.
2146
        :param fuel_providers: A dictionary of fuel providers, keyed by product variable.
2147
        :param prod_vars: A list of product variables.
2148
        :param vars_map: A dictionary mapping product variables to fuel names.
2149
        :param fuel_category: The fuel name.
2150
        :param region: The region for which to generate the regional fuel market.
2151
        :param activity: The activity dataset for the region.
2152
        :return: A tuple containing the final LHV, fossil CO2, and biogenic CO2 emissions for the regional fuel market,
2153
        as well as the updated dataset with the regional fuel market exchanges.
2154

2155
        """
2156

2157
        # Initialize variables
2158
        fossil_co2, non_fossil_co2, final_lhv = [0, 0, 0]
×
2159

2160
        if period != 0:
×
2161
            # this dataset is for a period of time
2162
            dataset["name"] += f", {period}-year period"
×
2163
            dataset["comment"] += (
×
2164
                f" Average fuel mix over a {period}"
2165
                f"-year period {self.year}-{self.year + period}."
2166
            )
2167
            for exc in ws.production(dataset):
×
2168
                exc["name"] += f", {period}-year period"
×
2169

2170
        # Remove existing fuel providers
2171
        dataset["exchanges"] = [
×
2172
            exc
2173
            for exc in dataset["exchanges"]
2174
            if exc["type"] != "technosphere"
2175
            or (
2176
                exc["product"] != dataset["reference product"]
2177
                and not any(
2178
                    x in exc["name"] for x in ["production", "evaporation", "import"]
2179
                )
2180
            )
2181
        ]
2182

2183
        string = ""
×
2184

2185
        for prod_var in prod_vars:
×
2186
            share = fuel_providers[prod_var]["find_share"](
×
2187
                prod_var, tuple(vars_map[fuel_category]), region, period
2188
            )
2189

2190
            if np.isnan(share) or share <= 0:
×
2191
                continue
×
2192

2193
            if isinstance(share, np.ndarray):
×
2194
                share = share.item(0)
×
2195

2196
            blacklist = [
×
2197
                "petroleum coke",
2198
                "petroleum gas",
2199
                "wax",
2200
                "low pressure",
2201
                "pressure, vehicle grade",
2202
                "burned",
2203
                "market",
2204
            ]
2205

2206
            if "natural gas" in dataset["name"]:
×
2207
                blacklist.remove("market")
×
2208
                blacklist.append("market for natural gas, high pressure")
×
2209

2210
            if "low-sulfur" in dataset["name"]:
×
2211
                blacklist.append("unleaded")
×
2212

2213
            possible_names = tuple(fuel_providers[prod_var]["fuel filters"])
×
2214

2215
            possible_suppliers = self.select_multiple_suppliers(
×
2216
                possible_names=possible_names,
2217
                dataset_location=dataset["location"],
2218
                look_for=tuple(vars_map[fuel_category]),
2219
                blacklist=tuple(blacklist),
2220
            )
2221

2222
            if not possible_suppliers:
×
2223
                print(
×
2224
                    f"No suppliers found for {prod_var} "
2225
                    f"in {region} for dataset "
2226
                    f"in location {dataset['location']}"
2227
                )
2228

2229
            for supplier_key, supplier_val in possible_suppliers.items():
×
2230
                # Convert m3 to kg
2231
                conversion_factor = 0.679 if supplier_key[-1] != activity["unit"] else 1
×
2232

2233
                supplier_share = share * supplier_val
×
2234

2235
                # Calculate amount of fuel input
2236
                # Corrected by the LHV of the initial fuel
2237
                # so that the overall composition maintains
2238
                # the same average LHV
2239
                amount = (
×
2240
                    supplier_share
2241
                    * (activity["lhv"] / self.fuels_specs[prod_var]["lhv"])
2242
                    * conversion_factor
2243
                )
2244

2245
                lhv = self.fuels_specs[prod_var]["lhv"]
×
2246
                co2_factor = self.fuels_specs[prod_var]["co2"]
×
2247
                biogenic_co2_share = self.fuels_specs[prod_var]["biogenic_share"]
×
2248

2249
                fossil_co2, non_fossil_co2, weighted_lhv = calculate_fuel_properties(
×
2250
                    amount, lhv, co2_factor, biogenic_co2_share
2251
                )
2252

2253
                final_lhv += weighted_lhv
×
2254

2255
                dataset = update_dataset(dataset, supplier_key, amount)
×
2256

2257
                text = (
×
2258
                    f"{prod_var.capitalize()}: {(share * 100):.1f} pct @ "
2259
                    f"{self.fuels_specs[prod_var]['lhv']} MJ/kg. "
2260
                )
2261
                if text not in string:
×
2262
                    string += text
×
2263

2264
                if "log parameters" not in dataset:
×
2265
                    dataset["log parameters"] = {}
×
2266

2267
                if "fossil CO2 per kg fuel" not in dataset["log parameters"]:
×
2268
                    dataset["log parameters"]["fossil CO2 per kg fuel"] = fossil_co2
×
2269
                else:
2270
                    dataset["log parameters"]["fossil CO2 per kg fuel"] += fossil_co2
×
2271

2272
                if "non-fossil CO2 per kg fuel" not in dataset["log parameters"]:
×
2273
                    dataset["log parameters"][
×
2274
                        "non-fossil CO2 per kg fuel"
2275
                    ] = non_fossil_co2
2276
                else:
2277
                    dataset["log parameters"][
×
2278
                        "non-fossil CO2 per kg fuel"
2279
                    ] += non_fossil_co2
2280

2281
                if "lower heating value" not in dataset["log parameters"]:
×
2282
                    dataset["log parameters"]["lower heating value"] = weighted_lhv
×
2283
                else:
2284
                    dataset["log parameters"]["lower heating value"] += weighted_lhv
×
2285

2286
        string += f"Final average LHV of {final_lhv} MJ/kg."
×
2287

2288
        if "comment" in dataset:
×
2289
            dataset["comment"] += string
×
2290
        else:
2291
            dataset["comment"] = string
×
2292

2293
        # add two new fields: `fossil CO2` and `biogenic CO2`
2294
        dataset["fossil CO2"] = fossil_co2
×
2295
        dataset["non-fossil CO2"] = non_fossil_co2
×
2296
        dataset["LHV"] = final_lhv
×
2297

2298
        return dataset
×
2299

2300
    def generate_fuel_markets(self):
1✔
2301
        """
2302
        Create new fuel supply chains
2303
        and update existing fuel markets.
2304

2305
        """
2306

2307
        # Create new fuel supply chains
2308
        self.generate_fuel_supply_chains()
×
2309

2310
        # print("Generate new fuel markets.")
2311

2312
        # we start by creating region-specific "diesel, burned in" markets
2313
        new_datasets = []
×
2314

2315
        for dataset in ws.get_many(
×
2316
            self.database,
2317
            ws.contains("name", "diesel, burned in"),
2318
            ws.exclude(ws.contains("name", "market")),
2319
        ):
2320
            new_ds = self.fetch_proxies(
×
2321
                name=dataset["name"],
2322
                ref_prod=dataset["reference product"],
2323
                production_variable=self.fuel_groups["diesel"],
2324
            )
2325

2326
            # add to log
2327
            for new_dataset in list(new_ds.values()):
×
2328
                self.write_log(new_dataset)
×
2329

2330
                # add it to list of created datasets
2331
                self.modified_datasets[(self.model, self.scenario, self.year)][
×
2332
                    "created"
2333
                ].append(
2334
                    (
2335
                        new_dataset["name"],
2336
                        new_dataset["reference product"],
2337
                        new_dataset["location"],
2338
                        new_dataset["unit"],
2339
                    )
2340
                )
2341

2342
            new_datasets.extend(list(new_ds.values()))
×
2343

2344
        # add datasets to database
2345
        self.database.extend(new_datasets)
×
2346

2347
        new_datasets = []
×
2348

2349
        for dataset in ws.get_many(
×
2350
            self.database,
2351
            ws.contains("name", "market for diesel, burned in"),
2352
        ):
2353
            new_ds = self.fetch_proxies(
×
2354
                name=dataset["name"],
2355
                ref_prod=dataset["reference product"],
2356
                production_variable=self.fuel_groups["diesel"],
2357
            )
2358

2359
            # add to log
2360
            for new_dataset in list(new_ds.values()):
×
2361
                self.write_log(new_dataset)
×
2362

2363
                # add it to list of created datasets
2364
                self.modified_datasets[(self.model, self.scenario, self.year)][
×
2365
                    "created"
2366
                ].append(
2367
                    (
2368
                        new_dataset["name"],
2369
                        new_dataset["reference product"],
2370
                        new_dataset["location"],
2371
                        new_dataset["unit"],
2372
                    )
2373
                )
2374

2375
            new_datasets.extend(list(new_ds.values()))
×
2376

2377
        # add datasets to database
2378
        self.database.extend(new_datasets)
×
2379

2380
        fuel_markets = fetch_mapping(FUEL_MARKETS)
×
2381

2382
        # refresh the fuel filters
2383
        # as some have been created in the meanwhile
2384
        mapping = InventorySet(self.database)
×
2385
        self.fuel_map = mapping.generate_fuel_map()
×
2386
        d_fuels = self.get_fuel_mapping()
×
2387

2388
        vars_map = {
×
2389
            "petrol, low-sulfur": ["petrol", "ethanol", "methanol", "gasoline"],
2390
            "diesel, low-sulfur": ["diesel", "biodiesel"],
2391
            "natural gas": ["natural gas", "biomethane"],
2392
            "hydrogen": ["hydrogen"],
2393
        }
2394

2395
        new_datasets = []
×
2396

2397
        for fuel, activity in fuel_markets.items():
×
2398
            if [
×
2399
                i
2400
                for e in self.iam_fuel_markets.variables.values
2401
                for i in vars_map[fuel]
2402
                if i in e
2403
            ]:
2404
                # print(f"--> {fuel}")
2405

2406
                prod_vars = [
×
2407
                    v
2408
                    for v in self.iam_fuel_markets.variables.values
2409
                    if any(i.lower() in v.lower() for i in vars_map[fuel])
2410
                ]
2411

2412
                d_act = self.fetch_proxies(
×
2413
                    name=activity["name"],
2414
                    ref_prod=activity["reference product"],
2415
                    production_variable=prod_vars,
2416
                )
2417

2418
                if self.system_model == "consequential":
×
2419
                    periods = [
×
2420
                        0,
2421
                    ]
2422
                else:
2423
                    periods = [0, 20, 40, 60]
×
2424

2425
                for period in periods:
×
2426
                    for region, dataset in copy.deepcopy(d_act).items():
×
2427
                        for exc in ws.production(dataset):
×
2428
                            if "input" in exc:
×
2429
                                del exc["input"]
×
2430
                        if "input" in dataset:
×
2431
                            del dataset["input"]
×
2432
                        if "code" in dataset:
×
2433
                            dataset["code"] = str(uuid.uuid4().hex)
×
2434

2435
                        if region != "World":
×
2436
                            dataset = self.generate_regional_fuel_market(
×
2437
                                dataset=dataset,
2438
                                fuel_providers=d_fuels,
2439
                                prod_vars=prod_vars,
2440
                                vars_map=vars_map,
2441
                                fuel_category=fuel,
2442
                                region=region,
2443
                                activity=activity,
2444
                                period=period,
2445
                            )
2446

2447
                        else:
2448
                            # World dataset
2449
                            dataset = self.generate_world_fuel_market(
×
2450
                                dataset=dataset,
2451
                                d_act=d_act,
2452
                                prod_vars=prod_vars,
2453
                                period=period,
2454
                            )
2455

2456
                        # add fuel market to the dictionary
2457
                        if "log parameters" in dataset:
×
2458
                            self.new_fuel_markets.update(
×
2459
                                {
2460
                                    (dataset["name"], dataset["location"]): {
2461
                                        "fossil CO2": dataset["log parameters"][
2462
                                            "fossil CO2 per kg fuel"
2463
                                        ],
2464
                                        "non-fossil CO2": dataset["log parameters"][
2465
                                            "non-fossil CO2 per kg fuel"
2466
                                        ],
2467
                                        "LHV": dataset["log parameters"][
2468
                                            "lower heating value"
2469
                                        ],
2470
                                    }
2471
                                }
2472
                            )
2473

2474
                            # add to log
2475
                            self.write_log(dataset)
×
2476

2477
                            # add it to list of created datasets
2478
                            self.modified_datasets[
×
2479
                                (self.model, self.scenario, self.year)
2480
                            ]["created"].append(
2481
                                (
2482
                                    dataset["name"],
2483
                                    dataset["reference product"],
2484
                                    dataset["location"],
2485
                                    dataset["unit"],
2486
                                )
2487
                            )
2488

2489
                            new_datasets.append(dataset)
×
2490

2491
        # add to database
2492
        self.database.extend(new_datasets)
×
2493

2494
        # list `market group for diesel` as "emptied"
2495
        datasets_to_empty = {
×
2496
            "market group for diesel": (
2497
                "market for diesel, low-sulfur",
2498
                "diesel, low-sulfur",
2499
            ),
2500
            "market group for diesel, low-sulfur": (
2501
                "market for diesel, low-sulfur",
2502
                "diesel, low-sulfur",
2503
            ),
2504
            "market for petrol, unleaded": (
2505
                "market for petrol, low-sulfur",
2506
                "petrol, low-sulfur",
2507
            ),
2508
            "market for diesel": (
2509
                "market for diesel, low-sulfur",
2510
                "diesel, low-sulfur",
2511
            ),
2512
        }
2513

2514
        for old_ds, new_ds in datasets_to_empty.items():
×
2515
            for ds in ws.get_many(self.database, ws.equals("name", old_ds)):
×
2516
                self.modified_datasets[(self.model, self.scenario, self.year)][
×
2517
                    "emptied"
2518
                ].append(
2519
                    (ds["name"], ds["reference product"], ds["location"], ds["unit"])
2520
                )
2521

2522
                ds["exchanges"] = [
×
2523
                    e for e in ds["exchanges"] if e["type"] == "production"
2524
                ]
2525
                ds["exchanges"].append(
×
2526
                    {
2527
                        "name": new_ds[0],
2528
                        "product": new_ds[1],
2529
                        "type": "technosphere",
2530
                        "amount": 1.0,
2531
                        "unit": ds["unit"],
2532
                        "location": self.ecoinvent_to_iam_loc[ds["location"]],
2533
                    }
2534
                )
2535

2536
        for ds in ws.get_many(
×
2537
            self.database,
2538
            ws.exclude(ws.either(*[ws.equals("name", i) for i in datasets_to_empty])),
2539
        ):
2540
            for exc in ws.technosphere(
×
2541
                ds,
2542
                ws.either(*[ws.equals("name", i) for i in datasets_to_empty]),
2543
            ):
2544
                new_supplier = datasets_to_empty[exc["name"]]
×
2545
                exc["name"] = new_supplier[0]
×
2546
                exc["product"] = new_supplier[1]
×
2547
                exc["location"] = (
×
2548
                    ds["location"]
2549
                    if ds["location"] in self.regions
2550
                    else self.ecoinvent_to_iam_loc[ds["location"]]
2551
                )
2552

2553
        self.relink_activities_to_new_markets()
×
2554
        print("Done!")
×
2555

2556
    def write_log(self, dataset, status="created"):
1✔
2557
        """
2558
        Write log file.
2559
        """
2560

2561
        logger.info(
×
2562
            f"{status}|{self.model}|{self.scenario}|{self.year}|"
2563
            f"{dataset['name']}|{dataset['location']}|"
2564
            f"{dataset.get('log parameters', {}).get('initial amount of fossil CO2', '')}|"
2565
            f"{dataset.get('log parameters', {}).get('new amount of fossil CO2', '')}|"
2566
            f"{dataset.get('log parameters', {}).get('new amount of biogenic CO2', '')}|"
2567
            f"{dataset.get('log parameters', {}).get('initial energy input for hydrogen production', '')}|"
2568
            f"{dataset.get('log parameters', {}).get('new energy input for hydrogen production', '')}|"
2569
            f"{dataset.get('log parameters', {}).get('hydrogen distribution losses', '')}|"
2570
            f"{dataset.get('log parameters', {}).get('electricity for hydrogen compression', '')}|"
2571
            f"{dataset.get('log parameters', {}).get('electricity for hydrogen compression after dehydrogenation', '')}|"
2572
            f"{dataset.get('log parameters', {}).get('electricity for hydrogen pre-cooling', '')}|"
2573
            f"{dataset.get('log parameters', {}).get('initial biomass per kg biofuel', '')}|"
2574
            f"{dataset.get('log parameters', {}).get('final biomass per kg biofuel', '')}|"
2575
            f"{dataset.get('log parameters', {}).get('land footprint', '')}|"
2576
            f"{dataset.get('log parameters', {}).get('land use CO2', '')}|"
2577
            f"{dataset.get('log parameters', {}).get('fossil CO2 per kg fuel', '')}|"
2578
            f"{dataset.get('log parameters', {}).get('non-fossil CO2 per kg fuel', '')}|"
2579
            f"{dataset.get('log parameters', {}).get('lower heating value', '')}"
2580
        )
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc