Skip to content
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 12 additions & 14 deletions config/test/config.validationember.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,29 +22,27 @@ scenario:


snapshots:
start: "2023-01-01"
end: "2024-01-01"
start: "2013-01-01"
end: "2013-01-31"

countries: ['IT', 'AT', 'CH', 'DE']

enable:
validate_ember: true

atlite:
cutout_directory: cutouts
default_cutout: europe-2023-sarah3-era5
default_cutout: europe-jan-2023-sarah3-era5

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
default_cutout: europe-jan-2023-sarah3-era5
default_cutout: europe-jan-2013-sarah3-era5

nprocesses: 4
show_progress: true
cutouts:
# use 'base' to determine geographical bounds and time span from config
# base:
# module: era5
europe-2023-sarah3-era5:
europe-jan-2023-sarah3-era5:

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
europe-jan-2023-sarah3-era5:
europe-jan-2013-sarah3-era5:

module: [sarah, era5] # in priority order
x: [-12., 42.]
y: [33., 72.]
dx: 0.3
dy: 0.3
time: ['2023', '2023']

time: ["2013-01-01", "2013-01-31"]

lines:
under_construction: remove
Expand Down Expand Up @@ -77,7 +75,7 @@ electricity:
Offshore: offwind-ac
Onshore: onwind
PV: solar

load:
fill_gaps:
enable: true
Expand Down Expand Up @@ -109,7 +107,7 @@ costs:
year: 2025

ember_settings:
ntc: false
ntc_cross_country_pf_restriction: false
ramping: false
nuclear_decommissioning: false
ntc: false
ntc_cross_country_pf_restriction: false
ramping: false
nuclear_decommissioning: false
47 changes: 29 additions & 18 deletions rules/emberdata.smk
Original file line number Diff line number Diff line change
Expand Up @@ -2,24 +2,29 @@
#
# SPDX-License-Identifier: MIT
import time

# rules/emberdata.smk

from pathlib import Path

import requests

DOWNLOADS = {
Path("validation", "ember_data", "yearly_full_release_long_format.csv"):
"https://storage.googleapis.com/emb-prod-bkt-publicdata/public-downloads/yearly_full_release_long_format.csv",
Path("validation", "ember_data", "europe_monthly_full_release_long_format.csv"):
"https://storage.googleapis.com/emb-prod-bkt-publicdata/public-downloads/europe_monthly_full_release_long_format.csv",
Path("validation", "entsoe_data", "physical_energy_power_flows_2023.csv"):
"https://www.entsoe.eu/publications/data/power-stats/2023/physical_energy_power_flows_2023.csv"
Path(
"validation", "ember_data", "yearly_full_release_long_format.csv"
): "https://storage.googleapis.com/emb-prod-bkt-publicdata/public-downloads/yearly_full_release_long_format.csv",
Path(
"validation", "ember_data", "europe_monthly_full_release_long_format.csv"
): "https://storage.googleapis.com/emb-prod-bkt-publicdata/public-downloads/europe_monthly_full_release_long_format.csv",
Path(
"validation", "entsoe_data", "physical_energy_power_flows_2023.csv"
): "https://www.entsoe.eu/publications/data/power-stats/2023/physical_energy_power_flows_2023.csv",
}


rule download_ember_data:
output:
[str(path) for path in DOWNLOADS.keys()]
[str(path) for path in DOWNLOADS.keys()],

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if those changes are not in upstream, I'd prefer to keep them to avoid unnecessary merge conflicts later.

but you're welcome making this suggestion upstream :)

run:
import urllib.request
import yaml
Expand All @@ -41,63 +46,69 @@ rule download_ember_data:
logger.info(f"Downloading {url} -> {filepath}")
response = requests.get(url)
response.raise_for_status() # Raise an error for non-200 responses
with open(filepath,"wb") as f:
with open(filepath, "wb") as f:
f.write(response.content)

# Confirm file creation
# Confirm file creation
while not filepath.exists():
logger.info(f"Waiting for {filepath} to appear...")
time.sleep(1)

else:
logger.info(f"Already exists: {filepath}")


rule download_ember_NTC_data:
output:
file="validation/ember_data/Reg_NTC"
file="validation/ember_data/Reg_NTC",
shell:
"""
gdown https://drive.google.com/uc?id=1GTo4UrI_X9ZCsgtM4KobO_pw-TTrEoDy -O {output.file}
"""


rule download_eurostat:
output:
"validation/eurostatdata/eurostat_nrg_bal_c_2023.csv"
"validation/eurostatdata/eurostat_nrg_bal_c_2023.csv",
shell:
"""
curl -L "https://ec.europa.eu/eurostat/api/dissemination/sdmx/2.1/data/nrg_bal_c?format=SDMX-CSV&startPeriod=2023&endPeriod=2023&lang=en&geo=EU27_2020&unit=KTOE" -o {output}
"""


rule download_jrc_idees:
output:
"validation/eurostatdata/JRC-IDEES-2021_EU27.zip"
"validation/eurostatdata/JRC-IDEES-2021_EU27.zip",
shell:
"""
curl -L "https://jeodpp.jrc.ec.europa.eu/ftp/jrc-opendata/JRC-IDEES/JRC-IDEES-2021_v1/JRC-IDEES-2021_EU27.zip" -o {output}
"""


rule download_hotmaps:
output:
"validation/eurostatdata/Industrial_Database.csv"
"validation/eurostatdata/Industrial_Database.csv",
shell:
"""
curl -L "https://gitlab.com/hotmaps/industrial_sites/industrial_sites_Industrial_Database/-/raw/master/data/Industrial_Database.csv" -o {output}
"""


rule hourly_lignite_prices:
input:
"validation/ember_data/hourly_fuel_costs.csv"
"validation/ember_data/hourly_fuel_costs.csv",
output:
resources("hourly_fuel_costs_with_lignite.csv")
resources("hourly_fuel_costs_with_lignite.csv"),
script:
"../scripts/hourly_lignite.py"


rule extract_jrc_idees:
input:
"validation/eurostatdata/JRC-IDEES-2021_EU27.zip"
"validation/eurostatdata/JRC-IDEES-2021_EU27.zip",
output:
directory("validation/eurostatdata/jrc_idees/")
directory("validation/eurostatdata/jrc_idees/"),
shell:
"""
unzip {input} -d {output}
"""
"""
46 changes: 31 additions & 15 deletions rules/retrieve.smk
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ from shutil import move, unpack_archive
from shutil import copy2 as shcopy2
from zipfile import ZipFile

from snakemake.io import temp

if config["enable"].get("retrieve", "auto") == "auto":
config["enable"]["retrieve"] = has_internet_access()

Expand Down Expand Up @@ -157,21 +159,35 @@ if config["enable"]["retrieve"]:

if config["enable"]["retrieve"] and config["enable"].get("retrieve_cutout", True):

rule retrieve_cutout:
input:
storage(
"https://zenodo.org/records/15349674/files/{cutout}.nc",
),
output:
CDIR.joinpath("{cutout}.nc").as_posix(),
log:
Path("logs").joinpath(CDIR, "retrieve_cutout_{cutout}.log").as_posix(),
resources:
mem_mb=5000,
retries: 2
run:
move(input[0], output[0])
validate_checksum(output[0], input[0])
if config["enable"].get("validate_ember", False):

rule retrieve_cutout_test:
output:
CDIR.joinpath("{cutout}.nc").as_posix(),
temp(CDIR.joinpath("{cutout}.nc.status").as_posix())
shell:
"""
gdown --id 16HrlB5FejyB4uE5hG04tAxWh6tFVM6xL -O {output[0]}
echo $? > {output[1]}
"""

else:

rule retrieve_cutout:
input:
storage(
"https://zenodo.org/records/15349674/files/{cutout}.nc",
),
output:
CDIR.joinpath("{cutout}.nc").as_posix(),
log:
Path("logs").joinpath(CDIR, "retrieve_cutout_{cutout}.log").as_posix(),
resources:
mem_mb=5000,
retries: 2
run:
move(input[0], output[0])
validate_checksum(output[0], input[0])


if config["enable"]["retrieve"]:
Expand Down
5 changes: 2 additions & 3 deletions scripts/add_electricity.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,6 @@
import xarray as xr
from pypsa.clustering.spatial import DEFAULT_ONE_PORT_STRATEGIES, normed_or_uniform

from scripts.apply_ntcs import apply_ntc

from scripts._helpers import (
PYPSA_V1,
configure_logging,
Expand All @@ -71,6 +69,7 @@
set_scenario_config,
update_p_nom_max,
)
from scripts.apply_ntcs import apply_ntc

if PYPSA_V1:
pypsa.options.params.add.return_names = True
Expand Down Expand Up @@ -615,7 +614,7 @@ def attach_wind_and_solar(
caps = ppl.query("carrier == @car").groupby("bus").p_nom.sum()
caps = pd.Series(
data=ds.indexes["bus"].get_level_values("bus").map(caps),
index=ds.indexes["bus"]
index=ds.indexes["bus"],

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
index=ds.indexes["bus"],
index=ds.indexes["bus"]

).fillna(0)
else:
caps = pd.Series(index=ds.indexes["bus"]).fillna(0)
Expand Down
63 changes: 34 additions & 29 deletions scripts/apply_ntcs.py
Original file line number Diff line number Diff line change
@@ -1,49 +1,54 @@
import pandas as pd


def apply_ntc(n, ntc_file, year=2025):
# Load NTC CSV
df = pd.read_csv(ntc_file)

# Get unique years and sort
available_years = sorted(df['Year'].unique())
available_years = sorted(df["Year"].unique())

# Function to get NTC for a border at a specific year
def get_ntc(border, y, direction):
row = df[(df['Border'] == border) & (df['Year'] == y)]
row = df[(df["Border"] == border) & (df["Year"] == y)]
if not row.empty:
return row.iloc[0][direction]
return 0

# If year exactly matches, filter directly
if year in available_years:
ntc_df = df[df['Year'] == year]
ntc_df = df[df["Year"] == year]
else:
# Find lower and upper bounds
lower = max([y for y in available_years if y <= year] or [available_years[0]])
upper = min([y for y in available_years if y >= year] or [available_years[-1]])

if lower == upper:
ntc_df = df[df['Year'] == lower]
ntc_df = df[df["Year"] == lower]
else:
# Interpolate
factor = (year - lower) / (upper - lower)
ntc_df = df[df['Year'] == lower].copy() # Base on lower
ntc_df = df[df["Year"] == lower].copy() # Base on lower
for i, row in ntc_df.iterrows():
border = row['Border']
ntc_f_lower = row['NTC_F']
ntc_b_lower = row['NTC_B']
ntc_f_upper = get_ntc(border, upper, 'NTC_F')
ntc_b_upper = get_ntc(border, upper, 'NTC_B')
ntc_df.at[i, 'NTC_F'] = ntc_f_lower + factor * (ntc_f_upper - ntc_f_lower)
ntc_df.at[i, 'NTC_B'] = ntc_b_lower + factor * (ntc_b_upper - ntc_b_lower)
border = row["Border"]
ntc_f_lower = row["NTC_F"]
ntc_b_lower = row["NTC_B"]
ntc_f_upper = get_ntc(border, upper, "NTC_F")
ntc_b_upper = get_ntc(border, upper, "NTC_B")
ntc_df.at[i, "NTC_F"] = ntc_f_lower + factor * (
ntc_f_upper - ntc_f_lower
)
ntc_df.at[i, "NTC_B"] = ntc_b_lower + factor * (
ntc_b_upper - ntc_b_lower
)

# Load the network
# n = pypsa.Network(snakemake.input.network)

# Function to identify cross-border components
def is_cross_border(component):
bus0_country = n.buses.at[component.bus0, 'country']
bus1_country = n.buses.at[component.bus1, 'country']
bus0_country = n.buses.at[component.bus0, "country"]
bus1_country = n.buses.at[component.bus1, "country"]
return bus0_country != bus1_country

# Remove existing cross-border lines and links
Expand All @@ -55,24 +60,24 @@ def is_cross_border(component):

# Add a single bidirectional link for each border using one NTC configuration
for _, row in ntc_df.iterrows():
from_country = row['From']
to_country = row['To']
ntc_f = row['NTC_F']
ntc_b = row['NTC_B']
from_buses = n.buses[n.buses['country'] == from_country].index
to_buses = n.buses[n.buses['country'] == to_country].index
from_country = row["From"]
to_country = row["To"]
ntc_f = row["NTC_F"]
ntc_b = row["NTC_B"]

from_buses = n.buses[n.buses["country"] == from_country].index
to_buses = n.buses[n.buses["country"] == to_country].index

if from_buses.empty or to_buses.empty:
print(f"Skipping {from_country}-{to_country}: Country not in network.")
continue

from_bus = from_buses[0]
to_bus = to_buses[0]

larger = max(ntc_f, ntc_b)
smaller = min(ntc_f, ntc_b)

if larger > 0:
if ntc_f >= ntc_b:
bus0 = from_bus
Expand All @@ -82,9 +87,9 @@ def is_cross_border(component):
bus0 = to_bus
bus1 = from_bus
direction = f"{to_country} -> {from_country}"

p_min_pu = -smaller / larger if larger > 0 else 0

n.add(
"Link",
f"{direction} NTC {year}",
Expand All @@ -97,5 +102,5 @@ def is_cross_border(component):
carrier="DC",
capital_cost=0,
length=1,
p_nom_extendable=False
p_nom_extendable=False,
)
Loading
Loading