Add new daemons and debug scripts for Sigenergy and Oracle functionalities

- Implement `sigen_daemon.py` to poll Sigenergy plant metrics and store snapshots.
- Create `web_daemon.py` for serving a web interface with various endpoints.
- Add debug scripts:
  - `debug_duplicates.py` to find duplicate target times in forecast data.
  - `debug_energy_forecast.py` to print baseline energy forecast curves.
  - `debug_oracle_evaluations.py` to run the oracle evaluator.
  - `debug_sigen.py` to inspect stored Sigenergy plant snapshots.
  - `debug_weather.py` to trace resolved truth data.
  - `modbus_test.py` for exploring Sigenergy plants or inverters over Modbus TCP.
- Introduce `oracle_evaluator.py` for evaluating stored oracle predictions against actuals.
- Add TCN training scripts in `tcn` directory for training usage sequence models.
This commit is contained in:
rpotter6298
2026-04-28 08:14:00 +02:00
parent ff0c65a794
commit c8e3016fd6
55 changed files with 6385 additions and 633 deletions
+102
View File
@@ -0,0 +1,102 @@
from __future__ import annotations
import argparse
from datetime import timedelta
from gibil.classes.env_loader import EnvLoader
from gibil.classes.oracle.store import OracleStore
def main() -> None:
EnvLoader().load()
args = parse_args()
store = OracleStore.from_env()
if args.evaluate:
evaluated_count = store.evaluate_due_forecasts(
actual_window=timedelta(minutes=args.actual_window_minutes),
lookback=timedelta(hours=args.lookback_hours),
limit=args.limit,
)
print(f"evaluated_oracle_forecasts={evaluated_count}")
if args.summary:
rows = store.load_evaluation_summary(
lookback=timedelta(hours=args.lookback_hours)
)
print(f"oracle_evaluation_summary_rows={len(rows)}")
for row in rows:
print(
" ".join(
[
f"kind={row['kind']}",
f"model={row['model_version']}",
f"horizon={row.get('horizon_label') or _format_horizon(row)}",
f"n={row['evaluated_count']}",
f"bias={_format_w(row['mean_error_w'])}",
f"mae={_format_w(row['mean_absolute_error_w'])}",
f"median_ae={_format_w(row['median_absolute_error_w'])}",
f"mape={_format_pct(row['mean_absolute_pct_error'])}",
f"coverage={_format_pct(row['interval_coverage'])}",
]
)
)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Evaluate stored Astrape oracle predictions against Sigen actuals."
)
parser.add_argument(
"--evaluate",
action="store_true",
help="Evaluate stored predictions whose target time has passed.",
)
parser.add_argument(
"--summary",
action="store_true",
help="Print evaluation quality by kind/model/horizon bucket.",
)
parser.add_argument(
"--actual-window-minutes",
type=float,
default=5,
help="Minutes after each target timestamp to average as realized actuals.",
)
parser.add_argument(
"--lookback-hours",
type=float,
default=168,
help="Only evaluate/summarize predictions with target times this recent.",
)
parser.add_argument(
"--limit",
type=int,
default=1000,
help="Maximum unevaluated predictions to process.",
)
args = parser.parse_args()
if not args.evaluate and not args.summary:
args.evaluate = True
args.summary = True
return args
def _format_w(value: object) -> str:
if value is None:
return "n/a"
return f"{float(value):.0f}W"
def _format_horizon(row: dict[str, object]) -> str:
return f"{row['min_horizon_minutes']}-{row['max_horizon_minutes']}m"
def _format_pct(value: object) -> str:
if value is None:
return "n/a"
return f"{float(value) * 100:.1f}%"
if __name__ == "__main__":
main()