KPI Availability Forecast Curtailment Events¶
KpiAvailabilityForecastCurtailmentEvents(perfdb)
¶
Class used for handling power forecast curtailment events. Can be accessed via perfdb.kpis.availability.forecasts.curtailment_events.
Parameters:
Source code in echo_postgres/kpi_availability_forecast_curtailment_events.py
def __init__(self, perfdb: e_pg.PerfDB) -> None:
"""Class used for handling power forecast curtailment events. Can be accessed via `perfdb.kpis.availability.forecasts.curtailment_events`.
Parameters
----------
perfdb : PerfDB
Top level object carrying all functionality and the connection handler.
"""
super().__init__(perfdb)
from .kpi_availability_forecast_curtailment_event_assets import KpiAvailabilityForecastCurtailmentEventAssets
self.assets = KpiAvailabilityForecastCurtailmentEventAssets(perfdb)
delete(ids)
¶
Deletes power forecast curtailment events by ID.
Note: linked assets (in power_forecast_curtailment_event_assets) are cascade-deleted.
Parameters:
-
(ids¶list[int]) –List of curtailment event IDs to delete.
Returns:
-
int–Number of rows deleted.
Source code in echo_postgres/kpi_availability_forecast_curtailment_events.py
@validate_call
def delete(self, ids: list[int]) -> int:
"""Deletes power forecast curtailment events by ID.
Note: linked assets (in power_forecast_curtailment_event_assets) are cascade-deleted.
Parameters
----------
ids : list[int]
List of curtailment event IDs to delete.
Returns
-------
int
Number of rows deleted.
"""
query = sql.SQL("DELETE FROM performance.power_forecast_curtailment_events WHERE id = ANY({ids})").format(
ids=sql.Literal(ids),
)
self._perfdb.conn.execute(query)
deleted = self._perfdb.conn.rowcount
logger.debug(f"Deleted {deleted} curtailment event(s).")
return deleted
get(ids=None, revision_names=None, period=None, filter_type='and', output_type='pl.DataFrame')
¶
Gets power forecast curtailment events with full details.
The most useful keys/columns returned are:
- id
- revision_name
- description
- curtailment_percentage
- power_fraction_available
- start_time
- end_time
- duration_hours
- duration_days
Parameters:
-
(ids¶list[int] | None, default:None) –List of event IDs to filter. By default None.
-
(revision_names¶list[str] | None, default:None) –List of revision names to filter. By default None.
-
(period¶DateTimeRange | None, default:None) –Time range; returns events that overlap with this range. By default None.
-
(filter_type¶Literal['and', 'or'], default:'and') –How to treat multiple filters. By default "and".
-
(output_type¶Literal['dict', 'DataFrame', 'pl.DataFrame'], default:'pl.DataFrame') –Output type. By default "pl.DataFrame".
Returns:
-
dict[int, dict[str, Any]]–In case output_type is "dict", returns {id: {col: val, ...}, ...}.
-
DataFrame–In case output_type is "DataFrame", returns a pandas DataFrame indexed by id.
-
DataFrame–In case output_type is "pl.DataFrame", returns a Polars DataFrame.
Source code in echo_postgres/kpi_availability_forecast_curtailment_events.py
@validate_call
def get(
self,
ids: list[int] | None = None,
revision_names: list[str] | None = None,
period: DateTimeRange | None = None,
filter_type: Literal["and", "or"] = "and",
output_type: Literal["dict", "DataFrame", "pl.DataFrame"] = "pl.DataFrame",
) -> dict[int, dict[str, Any]] | pd.DataFrame | pl.DataFrame:
"""Gets power forecast curtailment events with full details.
The most useful keys/columns returned are:
- id
- revision_name
- description
- curtailment_percentage
- power_fraction_available
- start_time
- end_time
- duration_hours
- duration_days
Parameters
----------
ids : list[int] | None, optional
List of event IDs to filter. By default None.
revision_names : list[str] | None, optional
List of revision names to filter. By default None.
period : DateTimeRange | None, optional
Time range; returns events that overlap with this range. By default None.
filter_type : Literal["and", "or"], optional
How to treat multiple filters. By default "and".
output_type : Literal["dict", "DataFrame", "pl.DataFrame"], optional
Output type. By default "pl.DataFrame".
Returns
-------
dict[int, dict[str, Any]]
In case output_type is "dict", returns {id: {col: val, ...}, ...}.
pd.DataFrame
In case output_type is "DataFrame", returns a pandas DataFrame indexed by id.
pl.DataFrame
In case output_type is "pl.DataFrame", returns a Polars DataFrame.
"""
where = self._check_get_args(ids, revision_names, period, filter_type)
query = sql.SQL(
"SELECT * FROM performance.v_power_forecast_curtailment_events {where} ORDER BY revision_name, start_time",
).format(where=where)
df = self._perfdb.conn.read_to_polars(query, schema_overrides=self._cols_schema)
return convert_output(df, output_type, index_col="id")
get_defaults(years=None, months=None, object_names=None, revision_names=None, filter_type='and', output_type='pl.DataFrame')
¶
Gets the per-asset / per-month resolution of the curtailment events that are the default for each (asset, year, month) according to availability_forecast_revision_assignments.
Backed by performance.v_power_forecast_defaults_curtailment_events. Emits one row per
(asset, year, month, event) where the asset's revision assignment for that month points to the
event's revision and the event's [start_time, end_time] overlaps the calendar month.
curtailment_percentage is a fraction (0..1); month_impact is the calendar-month-relative
fraction (0..1) weighted by curtailment_percentage.
Parameters:
-
(years¶list[int] | None, default:None) –List of years to filter. By default None.
-
(months¶list[int] | None, default:None) –List of months (1-12) to filter. By default None.
-
(object_names¶list[str] | None, default:None) –List of object names to filter. By default None.
-
(revision_names¶list[str] | None, default:None) –List of revision names to filter. By default None.
-
(filter_type¶Literal['and', 'or'], default:'and') –How to treat multiple filters. By default "and".
-
(output_type¶Literal['dict', 'DataFrame', 'pl.DataFrame'], default:'pl.DataFrame') –Output type. By default "pl.DataFrame".
Returns:
-
dict[Any, dict[str, Any]]–In case output_type is "dict", returns {(object_name, year, month, record_id): {col: val, ...}, ...}.
-
DataFrame–In case output_type is "DataFrame", returns a pandas DataFrame with MultiIndex (object_name, year, month, record_id).
-
DataFrame–In case output_type is "pl.DataFrame", returns a Polars DataFrame.
Source code in echo_postgres/kpi_availability_forecast_curtailment_events.py
@validate_call
def get_defaults(
self,
years: list[int] | None = None,
months: list[int] | None = None,
object_names: list[str] | None = None,
revision_names: list[str] | None = None,
filter_type: Literal["and", "or"] = "and",
output_type: Literal["dict", "DataFrame", "pl.DataFrame"] = "pl.DataFrame",
) -> dict[Any, dict[str, Any]] | pd.DataFrame | pl.DataFrame:
"""Gets the per-asset / per-month resolution of the curtailment events that are the default for each
(asset, year, month) according to availability_forecast_revision_assignments.
Backed by ``performance.v_power_forecast_defaults_curtailment_events``. Emits one row per
(asset, year, month, event) where the asset's revision assignment for that month points to the
event's revision and the event's [start_time, end_time] overlaps the calendar month.
``curtailment_percentage`` is a fraction (0..1); ``month_impact`` is the calendar-month-relative
fraction (0..1) weighted by curtailment_percentage.
Parameters
----------
years : list[int] | None, optional
List of years to filter. By default None.
months : list[int] | None, optional
List of months (1-12) to filter. By default None.
object_names : list[str] | None, optional
List of object names to filter. By default None.
revision_names : list[str] | None, optional
List of revision names to filter. By default None.
filter_type : Literal["and", "or"], optional
How to treat multiple filters. By default "and".
output_type : Literal["dict", "DataFrame", "pl.DataFrame"], optional
Output type. By default "pl.DataFrame".
Returns
-------
dict[Any, dict[str, Any]]
In case output_type is "dict", returns {(object_name, year, month, record_id): {col: val, ...}, ...}.
pd.DataFrame
In case output_type is "DataFrame", returns a pandas DataFrame with MultiIndex
(object_name, year, month, record_id).
pl.DataFrame
In case output_type is "pl.DataFrame", returns a Polars DataFrame.
"""
where = (
WhereClauseBuilder(filter_type=filter_type)
.add_any("year", years)
.add_any("month", months)
.add_any("object_name", object_names)
.add_any("revision_name", revision_names)
.build()
)
query = sql.SQL(
"SELECT * FROM performance.v_power_forecast_defaults_curtailment_events {where} "
"ORDER BY object_name, year, month, record_id",
).format(where=where)
df = self._perfdb.conn.read_to_polars(query, schema_overrides=self._defaults_cols_schema)
return convert_output(df, output_type, index_col=["object_name", "year", "month", "record_id"])
get_ids(revision_names=None, period=None, filter_type='and')
¶
Gets IDs of power forecast curtailment events.
Parameters:
-
(revision_names¶list[str] | None, default:None) –List of revision names to filter. By default None.
-
(period¶DateTimeRange | None, default:None) –Time range; returns events that overlap with this range. By default None.
-
(filter_type¶Literal['and', 'or'], default:'and') –How to treat multiple filters. By default "and".
Returns:
-
list[int]–List of curtailment event IDs.
Source code in echo_postgres/kpi_availability_forecast_curtailment_events.py
@validate_call
def get_ids(
self,
revision_names: list[str] | None = None,
period: DateTimeRange | None = None,
filter_type: Literal["and", "or"] = "and",
) -> list[int]:
"""Gets IDs of power forecast curtailment events.
Parameters
----------
revision_names : list[str] | None, optional
List of revision names to filter. By default None.
period : DateTimeRange | None, optional
Time range; returns events that overlap with this range. By default None.
filter_type : Literal["and", "or"], optional
How to treat multiple filters. By default "and".
Returns
-------
list[int]
List of curtailment event IDs.
"""
where = self._check_get_args(None, revision_names, period, filter_type)
query = sql.SQL("SELECT id FROM performance.v_power_forecast_curtailment_events {where} ORDER BY id").format(where=where)
df = self._perfdb.conn.read_to_polars(query, schema_overrides=self._cols_schema)
return df["id"].to_list()
insert(revision_name=None, description=None, curtailment_percentage=None, start_time=None, end_time=None, user_name=None, data_df=None, on_conflict='raise')
¶
Inserts one or more power forecast curtailment events.
You can pass individual values to insert a single event, or a DataFrame for batch insert.
Parameters:
-
(revision_name¶str | None, default:None) –Name of the forecast revision this event belongs to. Required for single insert. By default None.
-
(description¶str | None, default:None) –Human-readable explanation of the curtailment. Required for single insert. By default None.
-
(curtailment_percentage¶float | None, default:None) –Curtailment level as a percentage (must be in range (0, 90]). Required for single insert. By default None.
-
(start_time¶datetime | None, default:None) –Planned start timestamp. Required for single insert. By default None.
-
(end_time¶datetime | None, default:None) –Planned end timestamp (must be after start_time). Required for single insert. By default None.
-
(user_name¶str | None, default:None) –Name of the user creating the event. Required for single insert. By default None.
-
(data_df¶DataFrame | None, default:None) –DataFrame for batch insert. Required columns: revision_name, description, curtailment_percentage, start_time, end_time, user_name. When provided, individual parameters are ignored. By default None.
-
(on_conflict¶Literal['raise', 'ignore'], default:'raise') –Behavior on unexpected conflict. By default "raise".
Returns:
-
int | list[int] | None–ID(s) of the inserted event(s).
Source code in echo_postgres/kpi_availability_forecast_curtailment_events.py
@validate_call
def insert(
self,
revision_name: str | None = None,
description: str | None = None,
curtailment_percentage: float | None = None,
start_time: datetime | None = None,
end_time: datetime | None = None,
user_name: str | None = None,
data_df: pl.DataFrame | None = None,
on_conflict: Literal["raise", "ignore"] = "raise",
) -> int | list[int] | None:
"""Inserts one or more power forecast curtailment events.
You can pass individual values to insert a single event, or a DataFrame for batch insert.
Parameters
----------
revision_name : str | None, optional
Name of the forecast revision this event belongs to. Required for single insert. By default None.
description : str | None, optional
Human-readable explanation of the curtailment. Required for single insert. By default None.
curtailment_percentage : float | None, optional
Curtailment level as a percentage (must be in range (0, 90]). Required for single insert. By default None.
start_time : datetime | None, optional
Planned start timestamp. Required for single insert. By default None.
end_time : datetime | None, optional
Planned end timestamp (must be after start_time). Required for single insert. By default None.
user_name : str | None, optional
Name of the user creating the event. Required for single insert. By default None.
data_df : pl.DataFrame | None, optional
DataFrame for batch insert. Required columns: revision_name, description,
curtailment_percentage, start_time, end_time, user_name. When provided, individual
parameters are ignored. By default None.
on_conflict : Literal["raise", "ignore"], optional
Behavior on unexpected conflict. By default "raise".
Returns
-------
int | list[int] | None
ID(s) of the inserted event(s).
"""
df_schema = {
"revision_name": pl.Utf8,
"description": pl.Utf8,
"curtailment_percentage": pl.Float64,
"start_time": pl.Datetime("ms"),
"end_time": pl.Datetime("ms"),
"user_name": pl.Utf8,
}
if data_df is None:
single_insert = True
data_df = pl.DataFrame(
{
"revision_name": [revision_name],
"description": [description],
"curtailment_percentage": [curtailment_percentage],
"start_time": [start_time],
"end_time": [end_time],
"user_name": [user_name],
},
schema=df_schema,
)
else:
single_insert = False
data_df = data_df.cast({c: t for c, t in df_schema.items() if c in data_df.columns})
required_cols = ["revision_name", "description", "curtailment_percentage", "start_time", "end_time", "user_name"]
for col in required_cols:
if col not in data_df.columns:
raise ValueError(f"data_df is missing required column '{col}'.")
if data_df[col].is_null().any():
raise ValueError(f"Column '{col}' contains null values but is required.")
# validate curtailment_percentage range
invalid_pct = data_df.filter((pl.col("curtailment_percentage") <= 0) | (pl.col("curtailment_percentage") > 90))
if len(invalid_pct) > 0:
raise ValueError("'curtailment_percentage' must be in range (0, 90].")
# resolve revision_name → revision_id
rev_names = data_df["revision_name"].unique().to_list()
rev_ids = self._perfdb.kpis.availability.forecasts.revisions.get_ids(names=rev_names)
if missing := set(rev_names) - set(rev_ids.keys()):
raise ValueError(f"Revisions not found: {missing}")
data_df = data_df.with_columns(pl.col("revision_name").replace_strict(rev_ids, return_dtype=pl.Int64).alias("revision_id"))
data_df = data_df.drop("revision_name")
# resolve user_name → user_id
user_names = data_df["user_name"].unique().to_list()
user_ids = self._perfdb.users.instances.get_ids(names=user_names)
if missing := set(user_names) - set(user_ids.keys()):
raise ValueError(f"Users not found: {missing}")
data_df = data_df.with_columns(pl.col("user_name").replace_strict(user_ids, return_dtype=pl.Int64).alias("user_id"))
data_df = data_df.drop("user_name")
if_exists = "append" if on_conflict == "ignore" else "skip_row_check"
ids_df = self._perfdb.conn.polars_to_sql(
df=data_df,
table_name="power_forecast_curtailment_events",
schema="performance",
return_cols=["id"],
if_exists=if_exists,
)
ids = ids_df["id"].to_list()
logger.debug(f"Inserted {len(ids)} curtailment event(s): {ids}")
return ids[0] if single_insert else ids
update(event_id=None, description=None, curtailment_percentage=None, start_time=None, end_time=None, user_name=None, data_df=None)
¶
Updates one or more power forecast curtailment events.
You can pass individual values to update a single event, or a DataFrame for batch update.
Parameters:
-
(event_id¶int | None, default:None) –ID of the event to update. Required for single update. By default None.
-
(description¶str | None, default:None) –New description. By default None.
-
(curtailment_percentage¶float | None, default:None) –New curtailment percentage (must be in range (0, 90]). By default None.
-
(start_time¶datetime | None, default:None) –New planned start timestamp. By default None.
-
(end_time¶datetime | None, default:None) –New planned end timestamp. By default None.
-
(user_name¶str | None, default:None) –Name of the user performing the update. By default None.
-
(data_df¶DataFrame | None, default:None) –DataFrame for batch update. Required column: id. Optional: description, curtailment_percentage, start_time, end_time, user_name. When provided, individual parameters are ignored. By default None.
Source code in echo_postgres/kpi_availability_forecast_curtailment_events.py
@validate_call
def update(
self,
event_id: int | None = None,
description: str | None = None,
curtailment_percentage: float | None = None,
start_time: datetime | None = None,
end_time: datetime | None = None,
user_name: str | None = None,
data_df: pl.DataFrame | None = None,
) -> None:
"""Updates one or more power forecast curtailment events.
You can pass individual values to update a single event, or a DataFrame for batch update.
Parameters
----------
event_id : int | None, optional
ID of the event to update. Required for single update. By default None.
description : str | None, optional
New description. By default None.
curtailment_percentage : float | None, optional
New curtailment percentage (must be in range (0, 90]). By default None.
start_time : datetime | None, optional
New planned start timestamp. By default None.
end_time : datetime | None, optional
New planned end timestamp. By default None.
user_name : str | None, optional
Name of the user performing the update. By default None.
data_df : pl.DataFrame | None, optional
DataFrame for batch update. Required column: id. Optional: description,
curtailment_percentage, start_time, end_time, user_name. When provided, individual
parameters are ignored. By default None.
"""
df_schema = {
"id": pl.Int64,
"description": pl.Utf8,
"curtailment_percentage": pl.Float64,
"start_time": pl.Datetime("ms"),
"end_time": pl.Datetime("ms"),
"user_name": pl.Utf8,
}
if data_df is None:
single_update = True
data_df = pl.DataFrame(
{
"id": [event_id],
"description": [description],
"curtailment_percentage": [curtailment_percentage],
"start_time": [start_time],
"end_time": [end_time],
"user_name": [user_name],
},
schema=df_schema,
)
else:
single_update = False
data_df = data_df.cast({c: t for c, t in df_schema.items() if c in data_df.columns})
if "id" not in data_df.columns or data_df["id"].is_null().any():
raise ValueError("'id' column is required and cannot contain nulls.")
# validate curtailment_percentage range if provided
if "curtailment_percentage" in data_df.columns:
invalid_pct = data_df.filter(
pl.col("curtailment_percentage").is_not_null()
& ((pl.col("curtailment_percentage") <= 0) | (pl.col("curtailment_percentage") > 90)),
)
if len(invalid_pct) > 0:
raise ValueError("'curtailment_percentage' must be in range (0, 90].")
# verify IDs exist
existing = self._perfdb.conn.read_to_polars(
sql.SQL("SELECT id FROM performance.power_forecast_curtailment_events WHERE id = ANY({ids})").format(
ids=sql.Literal(data_df["id"].to_list()),
),
schema_overrides={"id": pl.Int64},
)
if missing := set(data_df["id"].to_list()) - set(existing["id"].to_list()):
raise ValueError(f"Curtailment event IDs not found: {missing}")
# resolve user_name → user_id if provided
if "user_name" in data_df.columns:
user_names = data_df["user_name"].drop_nulls().unique().to_list()
if user_names:
user_ids = self._perfdb.users.instances.get_ids(names=user_names)
if missing := set(user_names) - set(user_ids.keys()):
raise ValueError(f"Users not found: {missing}")
data_df = data_df.with_columns(
pl.col("user_name").replace_strict(user_ids, return_dtype=pl.Int64, default=None).alias("user_id"),
)
data_df = data_df.drop("user_name")
self._perfdb.conn.polars_to_sql(
df=data_df,
table_name="power_forecast_curtailment_events",
schema="performance",
conflict_cols=["id"],
if_exists="update_only",
ignore_null_cols=single_update,
)
logger.debug(f"Updated {len(data_df)} curtailment event(s).")