From e57bd11d9e3aefacef38ba484d4f56ff0a0aa2ac Mon Sep 17 00:00:00 2001 From: Martin Kedmenec Date: Mon, 4 May 2026 17:25:25 +0200 Subject: [PATCH 1/7] Add evaluation scripts --- backend/scripts/plot_evaluation.py | 362 ++++++++++++++++++ backend/scripts/run_evaluation.py | 574 +++++++++++++++++++++++++++++ 2 files changed, 936 insertions(+) create mode 100644 backend/scripts/plot_evaluation.py create mode 100644 backend/scripts/run_evaluation.py diff --git a/backend/scripts/plot_evaluation.py b/backend/scripts/plot_evaluation.py new file mode 100644 index 0000000..f63aabc --- /dev/null +++ b/backend/scripts/plot_evaluation.py @@ -0,0 +1,362 @@ +"""Generate evaluation summary tables and figures from CSV output. + +Run from the backend folder after run_evaluation.py: + + uv run python scripts/plot_evaluation.py --input evaluation-output --figures ../paper/figures + +The script writes summary CSV files and vector PDF figures. +""" + +from __future__ import annotations + +import argparse +import csv +import math +import statistics +from collections import defaultdict +from pathlib import Path +from typing import Any + +import matplotlib.pyplot as plt + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser() + _ = parser.add_argument("--input", type=Path, default=Path("evaluation-output")) + _ = parser.add_argument( + "--figures", type=Path, default=Path("evaluation-output/figures") + ) + return parser.parse_args() + + +def read_csv(path: Path) -> list[dict[str, str]]: + with path.open(newline="", encoding="utf-8") as file: + return list(csv.DictReader(file)) + + +def as_float(row: dict[str, str], key: str, default: float = math.nan) -> float: + value = row.get(key, "") + if value == "": + return default + return float(value) + + +def percentile(values: list[float], q: float) -> float: + if not values: + return math.nan + sorted_values = sorted(values) + index = (len(sorted_values) - 1) * q + lower = math.floor(index) + upper = math.ceil(index) + if lower == upper: + return sorted_values[int(index)] + return sorted_values[lower] * (upper - index) + sorted_values[upper] * ( + index - lower + ) + + +def write_csv(path: Path, rows: list[dict[str, Any]]) -> None: + if not rows: + return + path.parent.mkdir(parents=True, exist_ok=True) + with path.open("w", newline="", encoding="utf-8") as file: + writer = csv.DictWriter(file, fieldnames=list(rows[0].keys())) + writer.writeheader() + writer.writerows(rows) + + +def shortest_by_pair(routes: list[dict[str, str]]) -> dict[str, dict[str, str]]: + result = {} + for row in routes: + if row["method"] == "shortest" and row["route_index"] == "0": + result[row["pair_id"]] = row + return result + + +def add_quality_fields(routes: list[dict[str, str]]) -> list[dict[str, Any]]: + shortest = shortest_by_pair(routes) + enriched = [] + for row in routes: + base = shortest.get(row["pair_id"]) + if base is None: + continue + distance = as_float(row, "distance_m") + base_distance = as_float(base, "distance_m") + distance_overhead_pct = ( + ((distance / base_distance) - 1.0) * 100.0 + if base_distance > 0 + else math.nan + ) + score_gains = [ + as_float(row, "snow_free_score") - as_float(base, "snow_free_score"), + as_float(row, "flat_score") - as_float(base, "flat_score"), + as_float(row, "scenic_score") - as_float(base, "scenic_score"), + ] + enriched.append( + { + **row, + "distance_overhead_pct": distance_overhead_pct, + "max_score_gain_pp": max(score_gains), + "snow_free_gain_pp": score_gains[0], + "flat_gain_pp": score_gains[1], + "scenic_gain_pp": score_gains[2], + } + ) + return enriched + + +def summarize_runs(runs: list[dict[str, str]]) -> list[dict[str, Any]]: + by_method: dict[str, list[dict[str, str]]] = defaultdict(list) + for row in runs: + if row["success"] == "True": + by_method[row["method"]].append(row) + + rows = [] + for method, group in sorted(by_method.items()): + runtimes = [as_float(row, "runtime_ms") for row in group] + route_counts = [as_float(row, "route_count") for row in group] + total_labels = [ + as_float(row, "total_labels") for row in group if row["total_labels"] + ] + destination_labels = [ + as_float(row, "destination_labels") + for row in group + if row["destination_labels"] + ] + cap_hits = [ + row.get("hit_total_label_cap") == "True" + for row in group + if row.get("hit_total_label_cap", "") + ] + rows.append( + { + "method": method, + "requests": len(group), + "median_runtime_ms": round(statistics.median(runtimes), 2), + "p95_runtime_ms": round(percentile(runtimes, 0.95), 2), + "max_runtime_ms": round(max(runtimes), 2), + "median_route_count": round(statistics.median(route_counts), 2), + "median_total_labels": round(statistics.median(total_labels), 2) + if total_labels + else "", + "median_destination_labels": round( + statistics.median(destination_labels), 2 + ) + if destination_labels + else "", + "hit_label_cap_pct": round(100.0 * statistics.mean(cap_hits), 2) + if cap_hits + else "", + } + ) + return rows + + +def summarize_quality( + enriched: list[dict[str, Any]], *, include_neutral: bool +) -> list[dict[str, Any]]: + by_method: dict[str, list[dict[str, Any]]] = defaultdict(list) + for row in enriched: + if row["method"] == "shortest": + continue + if not include_neutral and row["profile"] == "neutral": + continue + by_method[row["method"]].append(row) + + rows = [] + for method, group in sorted(by_method.items()): + overhead = [float(row["distance_overhead_pct"]) for row in group] + gain = [float(row["max_score_gain_pp"]) for row in group] + rows.append( + { + "method": method, + "profile_scope": "all_profiles" + if include_neutral + else "preference_profiles", + "route_rows": len(group), + "median_distance_overhead_pct": round(statistics.median(overhead), 2), + "p95_distance_overhead_pct": round(percentile(overhead, 0.95), 2), + "median_max_score_gain_pp": round(statistics.median(gain), 2), + "p95_max_score_gain_pp": round(percentile(gain, 0.95), 2), + } + ) + return rows + + +def summarize_quality_by_profile( + enriched: list[dict[str, Any]], +) -> list[dict[str, Any]]: + by_group: dict[tuple[str, str], list[dict[str, Any]]] = defaultdict(list) + for row in enriched: + if row["method"] != "shortest": + by_group[(row["method"], row["profile"])].append(row) + + rows = [] + for (method, profile), group in sorted(by_group.items()): + overhead = [float(row["distance_overhead_pct"]) for row in group] + gain = [float(row["max_score_gain_pp"]) for row in group] + rows.append( + { + "method": method, + "profile": profile, + "route_rows": len(group), + "median_distance_overhead_pct": round(statistics.median(overhead), 2), + "p95_distance_overhead_pct": round(percentile(overhead, 0.95), 2), + "median_max_score_gain_pp": round(statistics.median(gain), 2), + "p95_max_score_gain_pp": round(percentile(gain, 0.95), 2), + } + ) + return rows + + +def summarize_sensitivity(routes: list[dict[str, str]]) -> list[dict[str, Any]]: + # Top route only, because this is what the UI recommends by default. + groups: dict[tuple[str, str], dict[str, str]] = defaultdict(dict) + for row in routes: + if row["method"] == "shortest" or row["route_index"] != "0": + continue + groups[(row["method"], row["pair_id"])][row["profile"]] = row["signature"] + + by_method: dict[str, list[int]] = defaultdict(list) + changed_from_neutral: dict[str, list[int]] = defaultdict(list) + for (method, _pair_id), signatures_by_profile in groups.items(): + signatures = set(signatures_by_profile.values()) + by_method[method].append(len(signatures)) + neutral = signatures_by_profile.get("neutral") + if neutral is not None: + changed = any( + signature != neutral + for profile, signature in signatures_by_profile.items() + if profile != "neutral" + ) + changed_from_neutral[method].append(1 if changed else 0) + + rows = [] + for method in sorted(by_method): + unique_counts = by_method[method] + changed_counts = changed_from_neutral[method] + rows.append( + { + "method": method, + "pairs": len(unique_counts), + "mean_unique_top_routes": round(statistics.mean(unique_counts), 2), + "median_unique_top_routes": round(statistics.median(unique_counts), 2), + "share_changed_from_neutral_pct": round( + 100.0 * statistics.mean(changed_counts), 2 + ) + if changed_counts + else "", + } + ) + return rows + + +def plot_runtime(runs: list[dict[str, str]], output: Path) -> None: + methods = ["shortest", "weighted", "pareto"] + data = [ + [ + as_float(row, "runtime_ms") + for row in runs + if row["method"] == method and row["success"] == "True" + ] + for method in methods + ] + _ = plt.figure(figsize=(3.35, 2.35)) + _ = plt.boxplot(data, tick_labels=methods, showfliers=False) + _ = plt.ylabel("Runtime (ms, log scale)") + plt.yscale("log") + plt.tight_layout() + plt.savefig(output) + plt.close() + + +def plot_detour_gain(enriched: list[dict[str, Any]], output: Path) -> None: + _ = plt.figure(figsize=(3.35, 2.35)) + for method, marker in [("weighted", "o"), ("pareto", "x")]: + group = [ + row + for row in enriched + if row["method"] == method and row["profile"] != "neutral" + ] + _ = plt.scatter( + [float(row["distance_overhead_pct"]) for row in group], + [float(row["max_score_gain_pp"]) for row in group], + marker=marker, + s=16, + alpha=0.75, + label=method, + ) + _ = plt.axhline(0, linewidth=0.8) + _ = plt.axvline(0, linewidth=0.8) + _ = plt.xlabel("Distance overhead vs. shortest (%)") + _ = plt.ylabel("Best score gain (pp)") + _ = plt.legend(frameon=False) + plt.tight_layout() + plt.savefig(output) + plt.close() + + +def plot_sensitivity(summary: list[dict[str, Any]], output: Path) -> None: + methods = [row["method"] for row in summary] + values = [float(row["mean_unique_top_routes"]) for row in summary] + plt.figure(figsize=(3.35, 2.20)) + plt.bar(methods, values) + plt.ylabel("Mean unique top routes") + plt.ylim(0, max(values + [1]) + 0.5) + plt.tight_layout() + plt.savefig(output) + plt.close() + + +def plot_pareto_labels(runs: list[dict[str, str]], output: Path) -> None: + labels = [ + as_float(row, "total_labels") / 1000.0 + for row in runs + if row["method"] == "pareto" and row["total_labels"] + ] + if not labels: + return + _ = plt.figure(figsize=(3.35, 2.20)) + _ = plt.hist(labels, bins=12) + _ = plt.xlabel("Labels generated (thousands)") + _ = plt.ylabel("Requests") + plt.tight_layout() + plt.savefig(output) + plt.close() + + +def main() -> None: + args = parse_args() + args.figures.mkdir(parents=True, exist_ok=True) + + runs = read_csv(args.input / "runs.csv") + routes = read_csv(args.input / "routes.csv") + enriched = add_quality_fields(routes) + + run_summary = summarize_runs(runs) + quality_summary = summarize_quality(enriched, include_neutral=True) + quality_preference_summary = summarize_quality(enriched, include_neutral=False) + quality_by_profile = summarize_quality_by_profile(enriched) + sensitivity_summary = summarize_sensitivity(routes) + + write_csv(args.input / "runtime_summary.csv", run_summary) + write_csv(args.input / "route_quality_summary.csv", quality_summary) + write_csv( + args.input / "route_quality_preference_summary.csv", quality_preference_summary + ) + write_csv(args.input / "route_quality_by_profile.csv", quality_by_profile) + write_csv(args.input / "sensitivity_summary.csv", sensitivity_summary) + write_csv(args.input / "routes_enriched.csv", enriched) + + plot_runtime(runs, args.figures / "evaluation_runtime_boxplot.pdf") + plot_detour_gain(enriched, args.figures / "evaluation_detour_gain_scatter.pdf") + plot_sensitivity( + sensitivity_summary, args.figures / "evaluation_weight_sensitivity.pdf" + ) + plot_pareto_labels(runs, args.figures / "evaluation_pareto_labels_histogram.pdf") + + print("Wrote summary CSV files and figures to", args.input, "and", args.figures) + + +if __name__ == "__main__": + main() diff --git a/backend/scripts/run_evaluation.py b/backend/scripts/run_evaluation.py new file mode 100644 index 0000000..7495b5d --- /dev/null +++ b/backend/scripts/run_evaluation.py @@ -0,0 +1,574 @@ +"""Run routing evaluation experiments and export CSV data. + +Place this file in backend/scripts/run_evaluation.py and run it from the backend +folder, for example: + + uv run python scripts/run_evaluation.py --pairs 20 --seed 7 --travel-mode cycling \ + --min-distance 2000 --max-distance 8000 --pair-filter active + +The script loads the same graph state as the FastAPI app, samples +origin-destination pairs from graph nodes, runs shortest, weighted, and Pareto +routing, and writes CSV files to evaluation-output/. + +The script intentionally calls the route-planning layer directly instead of the +HTTP endpoint. This measures server-side routing and serialization without +browser caching, geocoding, or front-end rendering noise. +""" + +from __future__ import annotations + +import argparse +import csv +import hashlib +import json +import random +import statistics +import sys +import time +from dataclasses import dataclass +from itertools import pairwise +from pathlib import Path +from typing import TYPE_CHECKING, Any +from unittest.mock import patch + +import networkx as nx + +# Make `app` importable when the script is placed in backend/scripts. +BACKEND_DIR = Path(__file__).resolve().parents[1] +if str(BACKEND_DIR) not in sys.path: + sys.path.insert(0, str(BACKEND_DIR)) + +from app import route_planner as route_planner_module # noqa: E402 +from app.costs import ( # noqa: E402 + build_weighted_edge_cost_function, + compute_edge_cost_components, + normalize_route_preference_weights, +) +from app.graph_state import ( # noqa: E402 + GRAPH_STATE, + get_graph_for_travel_mode, + load_graph_state, +) +from app.main import OVERLAY_DIRECTORY, PLACE_NAME # noqa: E402 +from app.models import ( # noqa: E402 + RouteCoordinates, + RoutePlanningOptions, + RoutePreferenceWeights, +) + +if TYPE_CHECKING: + from collections.abc import Iterable + + +@dataclass(frozen=True, slots=True) +class Pair: + pair_id: int + origin_node: int + destination_node: int + origin_longitude: float + origin_latitude: float + destination_longitude: float + destination_latitude: float + shortest_distance_m: float + shortest_snow_penalty: float + shortest_uphill_penalty: float + shortest_scenic_penalty: float + shortest_snow_free_score: float + shortest_flat_score: float + shortest_scenic_score: float + + +@dataclass(frozen=True, slots=True) +class WeightProfile: + name: str + scenic_weight: int + snow_free_weight: int + flat_weight: int + + +WEIGHT_PROFILES: tuple[WeightProfile, ...] = ( + WeightProfile("neutral", scenic_weight=0, snow_free_weight=0, flat_weight=0), + WeightProfile("scenic", scenic_weight=100, snow_free_weight=0, flat_weight=0), + WeightProfile("snow_free", scenic_weight=0, snow_free_weight=100, flat_weight=0), + WeightProfile("flat", scenic_weight=0, snow_free_weight=0, flat_weight=100), + WeightProfile("balanced", scenic_weight=50, snow_free_weight=50, flat_weight=50), +) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser() + _ = parser.add_argument("--pairs", type=int, default=20) + _ = parser.add_argument("--seed", type=int, default=7) + _ = parser.add_argument( + "--travel-mode", choices=["walking", "cycling"], default="cycling" + ) + _ = parser.add_argument("--min-distance", type=float, default=1_000.0) + _ = parser.add_argument("--max-distance", type=float, default=5_000.0) + _ = parser.add_argument("--max-sampling-attempts", type=int, default=20_000) + _ = parser.add_argument( + "--pair-filter", + choices=["random", "active", "changed"], + default="random", + help=( + "random accepts any pair within the distance interval; active requires " + "the shortest path to cross at least one non-default overlay; changed is " + "a diagnostic filter requiring at least one weighted profile to change " + "the scalar shortest path. Use active for the thesis benchmark." + ), + ) + _ = parser.add_argument("--active-epsilon", type=float, default=1e-6) + _ = parser.add_argument("--pareto-max-routes", type=int, default=3) + _ = parser.add_argument("--pareto-max-labels-per-node", type=int, default=40) + _ = parser.add_argument("--pareto-max-total-labels", type=int, default=50_000) + _ = parser.add_argument("--out", type=Path, default=Path("evaluation-output")) + _ = parser.add_argument( + "--profiles", + default=",".join(profile.name for profile in WEIGHT_PROFILES), + help="Comma-separated subset of: neutral,scenic,snow_free,flat,balanced", + ) + + return parser.parse_args() + + +def load_profiles(profile_arg: str) -> list[WeightProfile]: + names = {name.strip() for name in profile_arg.split(",") if name.strip()} + profiles = [profile for profile in WEIGHT_PROFILES if profile.name in names] + + if not profiles: + raise SystemExit("No valid weight profiles selected.") + + return profiles + + +def node_coordinates(graph: nx.MultiDiGraph, node_id: int) -> tuple[float, float]: + node = graph.nodes[node_id] + + return float(node["x"]), float(node["y"]) + + +def percent_score(penalty: float, distance: float) -> float: + if distance <= 0: + return 0.0 + return max(0.0, min(100.0, (1.0 - penalty / distance) * 100.0)) + + +def _select_shortest_edge_attributes( + graph: nx.MultiDiGraph, u: int, v: int +) -> dict[str, Any]: + payload = graph.get_edge_data(u, v) + if not isinstance(payload, dict) or not payload: + raise nx.NetworkXNoPath + return min(payload.values(), key=lambda attrs: float(attrs.get("length", 0.0))) + + +def path_cost_vector( + graph: nx.MultiDiGraph, node_path: list[int] +) -> tuple[float, float, float, float]: + distance = snow = hills = scenic = 0.0 + + for u, v in pairwise(node_path): + edge_attrs = _select_shortest_edge_attributes(graph, u, v) + d, s, h, c = compute_edge_cost_components(edge_attrs) + distance += d + snow += s + hills += h + scenic += c + + return distance, snow, hills, scenic + + +def path_has_active_objective( + cost_vector: tuple[float, float, float, float], *, epsilon: float +) -> bool: + distance, snow_penalty, uphill_penalty, scenic_penalty = cost_vector + return ( + snow_penalty > epsilon + or uphill_penalty > epsilon + or scenic_penalty < distance - epsilon + ) + + +def weighted_path_signature( + graph: nx.MultiDiGraph, source: int, target: int, profile: WeightProfile +) -> tuple[int, ...]: + weights = RoutePreferenceWeights( + scenic_weight=profile.scenic_weight, + snow_free_weight=profile.snow_free_weight, + flat_weight=profile.flat_weight, + ) + return tuple( + nx.shortest_path( + graph, + source=source, + target=target, + weight=build_weighted_edge_cost_function( + normalize_route_preference_weights(weights) + ), + ) + ) + + +def weighted_path_changes( + graph: nx.MultiDiGraph, source: int, target: int, profiles: list[WeightProfile] +) -> bool: + neutral = weighted_path_signature(graph, source, target, WEIGHT_PROFILES[0]) + return any( + weighted_path_signature(graph, source, target, profile) != neutral + for profile in profiles + if profile.name != "neutral" + ) + + +def sample_pairs( + graph: nx.MultiDiGraph, + *, + count: int, + seed: int, + min_distance: float, + max_distance: float, + max_attempts: int, + pair_filter: str, + active_epsilon: float, + profiles: list[WeightProfile], +) -> list[Pair]: + rng = random.Random(seed) + nodes = list(graph.nodes) + pairs: list[Pair] = [] + seen: set[tuple[int, int]] = set() + + for _attempt in range(max_attempts): + if len(pairs) >= count: + break + + origin_node, destination_node = rng.sample(nodes, 2) + if (origin_node, destination_node) in seen: + continue + seen.add((origin_node, destination_node)) + + try: + shortest_path = nx.shortest_path( + graph, + source=origin_node, + target=destination_node, + weight="length", + ) + cost_vector = path_cost_vector(graph, shortest_path) + except nx.NetworkXNoPath, nx.NodeNotFound: + continue + + distance, snow_penalty, uphill_penalty, scenic_penalty = cost_vector + if not (min_distance <= distance <= max_distance): + continue + + if pair_filter in {"active", "changed"} and not path_has_active_objective( + cost_vector, + epsilon=active_epsilon, + ): + continue + + if pair_filter == "changed" and not weighted_path_changes( + graph, + origin_node, + destination_node, + profiles, + ): + continue + + origin_lon, origin_lat = node_coordinates(graph, origin_node) + destination_lon, destination_lat = node_coordinates(graph, destination_node) + pairs.append( + Pair( + pair_id=len(pairs), + origin_node=origin_node, + destination_node=destination_node, + origin_longitude=origin_lon, + origin_latitude=origin_lat, + destination_longitude=destination_lon, + destination_latitude=destination_lat, + shortest_distance_m=distance, + shortest_snow_penalty=snow_penalty, + shortest_uphill_penalty=uphill_penalty, + shortest_scenic_penalty=scenic_penalty, + shortest_snow_free_score=percent_score(snow_penalty, distance), + shortest_flat_score=percent_score(uphill_penalty, distance), + shortest_scenic_score=percent_score(scenic_penalty, distance), + ) + ) + + if len(pairs) < count: + raise SystemExit( + f"Only sampled {len(pairs)} valid pairs after {max_attempts} attempts. " + "Try widening --min-distance/--max-distance, increasing " + "--max-sampling-attempts, lowering --pairs, or using --pair-filter random." + ) + + return pairs + + +def route_signature(coordinates: Iterable[Any]) -> str: + rounded = [] + for coordinate in coordinates: + lon = float(coordinate[0]) + lat = float(coordinate[1]) + rounded.append((round(lon, 6), round(lat, 6))) + payload = json.dumps(rounded, separators=(",", ":")) + return hashlib.sha1(payload.encode("utf-8")).hexdigest()[:12] + + +def route_options( + *, + method: str, + profile: WeightProfile, + pareto_max_routes: int, + pareto_max_labels_per_node: int, + pareto_max_total_labels: int, +) -> RoutePlanningOptions: + return RoutePlanningOptions( + route_optimization_method=method, # type: ignore[arg-type] + preference_weights=RoutePreferenceWeights( + scenic_weight=profile.scenic_weight, + snow_free_weight=profile.snow_free_weight, + flat_weight=profile.flat_weight, + ), + pareto_max_routes=pareto_max_routes, + pareto_max_labels_per_node=pareto_max_labels_per_node, + pareto_max_total_labels=pareto_max_total_labels, + ) + + +def run_one_request( + *, + pair: Pair, + travel_mode: str, + method: str, + profile: WeightProfile, + pareto_max_routes: int, + pareto_max_labels_per_node: int, + pareto_max_total_labels: int, +) -> tuple[dict[str, Any], list[dict[str, Any]]]: + coordinates = RouteCoordinates( + origin_longitude=pair.origin_longitude, + origin_latitude=pair.origin_latitude, + destination_longitude=pair.destination_longitude, + destination_latitude=pair.destination_latitude, + ) + options = route_options( + method=method, + profile=profile, + pareto_max_routes=pareto_max_routes, + pareto_max_labels_per_node=pareto_max_labels_per_node, + pareto_max_total_labels=pareto_max_total_labels, + ) + + pareto_stats: dict[str, Any] = { + "total_labels": "", + "destination_labels": "", + "hit_total_label_cap": "", + } + + original_pareto_search = route_planner_module.run_pareto_label_search + + def instrumented_pareto_search(*args: Any, **kwargs: Any): + labels, destination_label_ids = original_pareto_search(*args, **kwargs) + pareto_stats["total_labels"] = len(labels) + pareto_stats["destination_labels"] = len(destination_label_ids) + pareto_stats["hit_total_label_cap"] = len(labels) >= pareto_max_total_labels + return labels, destination_label_ids + + start = time.perf_counter() + try: + if method == "pareto": + with patch( + "app.route_planner.run_pareto_label_search", + side_effect=instrumented_pareto_search, + ): + response = route_planner_module.build_route_feature_collection( + graph_state=GRAPH_STATE, + route_coordinates=coordinates, + travel_mode=travel_mode, # type: ignore[arg-type] + route_options=options, + ) + else: + response = route_planner_module.build_route_feature_collection( + graph_state=GRAPH_STATE, + route_coordinates=coordinates, + travel_mode=travel_mode, # type: ignore[arg-type] + route_options=options, + ) + success = True + error = "" + except Exception as exception: # noqa: BLE001 - evaluation should log failures. + response = None + success = False + error = repr(exception) + + runtime_ms = (time.perf_counter() - start) * 1000.0 + + run_row = { + "pair_id": pair.pair_id, + "travel_mode": travel_mode, + "method": method, + "profile": profile.name, + "scenic_weight": profile.scenic_weight, + "snow_free_weight": profile.snow_free_weight, + "flat_weight": profile.flat_weight, + "runtime_ms": runtime_ms, + "success": success, + "error": error, + "route_count": len(response.features) if response is not None else 0, + "origin_node": pair.origin_node, + "destination_node": pair.destination_node, + "reference_shortest_distance_m": pair.shortest_distance_m, + **pareto_stats, + } + + route_rows: list[dict[str, Any]] = [] + if response is not None: + for feature in response.features: + breakdown = feature.properties.penalty_breakdown + if breakdown is None: + continue + distance = float(breakdown.distance) + snow_penalty = float(breakdown.snow_penalty) + uphill_penalty = float(breakdown.uphill_penalty) + scenic_penalty = float(breakdown.scenic_penalty) + route_rows.append( + { + "pair_id": pair.pair_id, + "travel_mode": travel_mode, + "method": method, + "profile": profile.name, + "route_index": feature.properties.route_index, + "route_count": response.meta.route_count, + "pareto_rank": feature.properties.pareto_rank or "", + "selection_score": feature.properties.selection_score or "", + "distance_m": distance, + "snow_penalty": snow_penalty, + "uphill_penalty": uphill_penalty, + "scenic_penalty": scenic_penalty, + "snow_free_score": percent_score(snow_penalty, distance), + "flat_score": percent_score(uphill_penalty, distance), + "scenic_score": percent_score(scenic_penalty, distance), + "signature": route_signature(feature.geometry.coordinates), + } + ) + + return run_row, route_rows + + +def write_csv(path: Path, rows: list[dict[str, Any]]) -> None: + if not rows: + raise SystemExit(f"No rows to write for {path}") + path.parent.mkdir(parents=True, exist_ok=True) + with path.open("w", newline="", encoding="utf-8") as file: + writer = csv.DictWriter(file, fieldnames=list(rows[0].keys())) + writer.writeheader() + writer.writerows(rows) + + +def pair_rows( + pairs: list[Pair], *, pair_filter: str, min_distance: float, max_distance: float +) -> list[dict[str, Any]]: + return [ + { + "pair_id": pair.pair_id, + "pair_filter": pair_filter, + "min_distance_m": min_distance, + "max_distance_m": max_distance, + "origin_node": pair.origin_node, + "destination_node": pair.destination_node, + "origin_longitude": pair.origin_longitude, + "origin_latitude": pair.origin_latitude, + "destination_longitude": pair.destination_longitude, + "destination_latitude": pair.destination_latitude, + "shortest_distance_m": pair.shortest_distance_m, + "shortest_snow_penalty": pair.shortest_snow_penalty, + "shortest_uphill_penalty": pair.shortest_uphill_penalty, + "shortest_scenic_penalty": pair.shortest_scenic_penalty, + "shortest_snow_free_score": pair.shortest_snow_free_score, + "shortest_flat_score": pair.shortest_flat_score, + "shortest_scenic_score": pair.shortest_scenic_score, + } + for pair in pairs + ] + + +def main() -> None: + args = parse_args() + profiles = load_profiles(args.profiles) + + print("Loading graphs and overlays ...", flush=True) + load_graph_state( + place_name=PLACE_NAME, + overlay_directory=OVERLAY_DIRECTORY, + graph_state=GRAPH_STATE, + ) + graph = get_graph_for_travel_mode(GRAPH_STATE, args.travel_mode) + + print("Sampling origin-destination pairs ...", flush=True) + pairs = sample_pairs( + graph, + count=args.pairs, + seed=args.seed, + min_distance=args.min_distance, + max_distance=args.max_distance, + max_attempts=args.max_sampling_attempts, + pair_filter=args.pair_filter, + active_epsilon=args.active_epsilon, + profiles=profiles, + ) + print( + f"Sampled {len(pairs)} pairs; shortest-distance median " + f"{statistics.median(pair.shortest_distance_m for pair in pairs):.0f} m.", + flush=True, + ) + + run_rows: list[dict[str, Any]] = [] + route_rows: list[dict[str, Any]] = [] + + requests: list[tuple[Pair, str, WeightProfile]] = [] + for pair in pairs: + requests.append((pair, "shortest", WEIGHT_PROFILES[0])) + for profile in profiles: + requests.append((pair, "weighted", profile)) + requests.append((pair, "pareto", profile)) + + print(f"Running {len(requests)} route requests ...", flush=True) + for index, (pair, method, profile) in enumerate(requests, start=1): + print( + f"[{index}/{len(requests)}] pair={pair.pair_id} method={method} profile={profile.name}", + flush=True, + ) + run_row, rows_for_request = run_one_request( + pair=pair, + travel_mode=args.travel_mode, + method=method, + profile=profile, + pareto_max_routes=args.pareto_max_routes, + pareto_max_labels_per_node=args.pareto_max_labels_per_node, + pareto_max_total_labels=args.pareto_max_total_labels, + ) + run_rows.append(run_row) + route_rows.extend(rows_for_request) + + args.out.mkdir(parents=True, exist_ok=True) + write_csv( + args.out / "pairs.csv", + pair_rows( + pairs, + pair_filter=args.pair_filter, + min_distance=args.min_distance, + max_distance=args.max_distance, + ), + ) + write_csv(args.out / "runs.csv", run_rows) + write_csv(args.out / "routes.csv", route_rows) + + failure_count = sum(1 for row in run_rows if not row["success"]) + print(f"Done. Wrote CSV files to {args.out}.") + if failure_count: + print(f"Warning: {failure_count} requests failed. Inspect runs.csv.") + + +if __name__ == "__main__": + main() From 53ca95ce86a3409fdbbe8d77e1eac312794bb86d Mon Sep 17 00:00:00 2001 From: Martin Kedmenec Date: Mon, 4 May 2026 17:26:07 +0200 Subject: [PATCH 2/7] Improve error handling in route_planner.py --- backend/app/route_planner.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/backend/app/route_planner.py b/backend/app/route_planner.py index ee13c2b..97dceb4 100644 --- a/backend/app/route_planner.py +++ b/backend/app/route_planner.py @@ -12,7 +12,6 @@ from geojson_pydantic import LineString as PydanticLineString from geojson_pydantic import Point as PydanticPoint from geojson_pydantic.types import Position2D, Position3D -from networkx.exception import NetworkXNoPath from app.costs import ( FALLBACK_EDGE_COST, @@ -54,6 +53,10 @@ type NearestNodeFunction = Callable[..., int] +class ParetoSearchLimitExceededError(RuntimeError): + """Raised when bounded Pareto search exhausts its label budget.""" + + @dataclass(frozen=True, slots=True) class RouteCandidate: """A fully resolved route candidate ready for response serialization.""" @@ -580,7 +583,12 @@ def _build_pareto_route_candidates( ) if not destination_label_ids: - raise NetworkXNoPath + error_message = ( + "Bounded Pareto search did not find a destination label before reaching " + "the configured label limit." + ) + + raise ParetoSearchLimitExceededError(error_message) ranked_label_ids = sorted( destination_label_ids, @@ -826,8 +834,10 @@ def build_route_feature_collection( destination_node_id, route_options, ) - except NetworkXNoPath: - raise HTTPException(status_code=500, detail="No path found.") from None + except ParetoSearchLimitExceededError as exception: + raise HTTPException( + status_code=500, detail=f"Path calculation failed: {exception}" + ) from None except Exception as exception: # pragma: no cover - library failure path raise HTTPException( status_code=500, From bba4cdba567322be8aac2eda6d7a74b7b83f4395 Mon Sep 17 00:00:00 2001 From: Martin Kedmenec Date: Mon, 4 May 2026 17:27:18 +0200 Subject: [PATCH 3/7] Reformat Jupyter Notebook --- backend/notebooks/test.ipynb | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/backend/notebooks/test.ipynb b/backend/notebooks/test.ipynb index 7a3577d..4f5bbf8 100644 --- a/backend/notebooks/test.ipynb +++ b/backend/notebooks/test.ipynb @@ -54,7 +54,9 @@ "cell_type": "code", "id": "786fa2b4142c599f", "metadata": {}, - "source": "fig, ax = ox.plot.plot_graph(GRAPH_CYCLING)", + "source": [ + "fig, ax = ox.plot.plot_graph(GRAPH_CYCLING)" + ], "outputs": [], "execution_count": null }, @@ -62,7 +64,9 @@ "cell_type": "code", "id": "ebb5dc7e6caf7817", "metadata": {}, - "source": "list(GRAPH_CYCLING.edges(keys=True, data=True))[34]", + "source": [ + "list(GRAPH_CYCLING.edges(keys=True, data=True))[34]" + ], "outputs": [], "execution_count": null }, From 41125673638d1fe9e0a2b86c98d024586e6835cc Mon Sep 17 00:00:00 2001 From: Martin Kedmenec Date: Mon, 4 May 2026 17:27:35 +0200 Subject: [PATCH 4/7] Rearrange Makefile --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 2fcfc8f..0d19f54 100644 --- a/Makefile +++ b/Makefile @@ -65,11 +65,11 @@ export-schema: generate-client: $(OPENAPI_JSON) cd $(FRONTEND_DIR) && pnpm generate-client -openapi: export-schema generate-client - $(OPENAPI_JSON): $(BACKEND_DIR)/app/main.py $(BACKEND_DIR)/export_openapi.py cd $(BACKEND_DIR) && uv run python export_openapi.py +openapi: export-schema generate-client + install-backend: cd $(BACKEND_DIR) && uv sync From 6d446e0d2d06d60acc5ba1b347db4262ac58d9ee Mon Sep 17 00:00:00 2001 From: Martin Kedmenec Date: Mon, 4 May 2026 17:27:56 +0200 Subject: [PATCH 5/7] Update all packages --- backend/uv.lock | 30 ++++----- frontend/package.json | 12 ++-- frontend/pnpm-lock.yaml | 136 ++++++++++++++++++++-------------------- 3 files changed, 89 insertions(+), 89 deletions(-) diff --git a/backend/uv.lock b/backend/uv.lock index e04df46..ac986f7 100644 --- a/backend/uv.lock +++ b/backend/uv.lock @@ -986,7 +986,7 @@ wheels = [ [[package]] name = "jupyter-server" -version = "2.17.0" +version = "2.18.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -1008,9 +1008,9 @@ dependencies = [ { name = "traitlets" }, { name = "websocket-client" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/5b/ac/e040ec363d7b6b1f11304cc9f209dac4517ece5d5e01821366b924a64a50/jupyter_server-2.17.0.tar.gz", hash = "sha256:c38ea898566964c888b4772ae1ed58eca84592e88251d2cfc4d171f81f7e99d5", size = 731949, upload-time = "2025-08-21T14:42:54.042Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/ec/9302cec1ccacdd33c1b1312ac31681c8975cae56c626d783ab49edf9c681/jupyter_server-2.18.0.tar.gz", hash = "sha256:568b27bce4320a53c3eebf1bdcbee9acf48a8ab7f66ec83d900ca9909d4fb770", size = 751152, upload-time = "2026-05-04T13:39:29.685Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/92/80/a24767e6ca280f5a49525d987bf3e4d7552bf67c8be07e8ccf20271f8568/jupyter_server-2.17.0-py3-none-any.whl", hash = "sha256:e8cb9c7db4251f51ed307e329b81b72ccf2056ff82d50524debde1ee1870e13f", size = 388221, upload-time = "2025-08-21T14:42:52.034Z" }, + { url = "https://files.pythonhosted.org/packages/cf/f9/050312d92072ddb9ce14c11171804c07435790c98d4350935a780d9e10c2/jupyter_server-2.18.0-py3-none-any.whl", hash = "sha256:69a5397a039d689da81a45955f9b23e95ee167f6d8a8d64372fb616f2aac650a", size = 391687, upload-time = "2026-05-04T13:39:27.549Z" }, ] [[package]] @@ -1222,11 +1222,11 @@ wheels = [ [[package]] name = "mistune" -version = "3.2.0" +version = "3.2.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/9d/55/d01f0c4b45ade6536c51170b9043db8b2ec6ddf4a35c7ea3f5f559ac935b/mistune-3.2.0.tar.gz", hash = "sha256:708487c8a8cdd99c9d90eb3ed4c3ed961246ff78ac82f03418f5183ab70e398a", size = 95467, upload-time = "2025-12-23T11:36:34.994Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/84/620cc3f7e3adf6f5067e10f4dbae71295d8f9e16d5d3f9ef97c40f2f592c/mistune-3.2.1.tar.gz", hash = "sha256:7c8e5501d38bac1582e067e46c8343f17d57ea1aaa735823f3aba1fd59c88a28", size = 98003, upload-time = "2026-05-03T14:33:22.312Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9b/f7/4a5e785ec9fbd65146a27b6b70b6cdc161a66f2024e4b04ac06a67f5578b/mistune-3.2.0-py3-none-any.whl", hash = "sha256:febdc629a3c78616b94393c6580551e0e34cc289987ec6c35ed3f4be42d0eee1", size = 53598, upload-time = "2025-12-23T11:36:33.211Z" }, + { url = "https://files.pythonhosted.org/packages/2a/7f/a946aa4f8752b37102b41e64dca18a1976ac705c3a0d1dfe74d820a02552/mistune-3.2.1-py3-none-any.whl", hash = "sha256:78cdb0ba5e938053ccf63651b352508d2efa9411dc8810bfb05f2dc5140c0048", size = 53749, upload-time = "2026-05-03T14:33:20.551Z" }, ] [[package]] @@ -2181,15 +2181,15 @@ wheels = [ [[package]] name = "sentry-sdk" -version = "2.58.0" +version = "2.59.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "certifi" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/26/b3/fb8291170d0e844173164709fc0fa0c221ed75a5da740c8746f2a83b4eb1/sentry_sdk-2.58.0.tar.gz", hash = "sha256:c1144d947352d54e5b7daa63596d9f848adf684989c06c4f5a659f0c85a18f6f", size = 438764, upload-time = "2026-04-13T17:23:26.265Z" } +sdist = { url = "https://files.pythonhosted.org/packages/65/e0/9bf5e5fc7442b10880f3ec0eff0ef4208b84a099606f343ec4f5445227fb/sentry_sdk-2.59.0.tar.gz", hash = "sha256:cd265808ef8bf3f3edf69b527c0a0b2b6b1322762679e55b8987db2e9584aec1", size = 447331, upload-time = "2026-05-04T12:19:06.538Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/fa/eb/d875669993b762556ae8b2efd86219943b4c0864d22204d622a9aee3052b/sentry_sdk-2.58.0-py2.py3-none-any.whl", hash = "sha256:688d1c704ddecf382ea3326f21a67453d4caa95592d722b7c780a36a9d23109e", size = 460919, upload-time = "2026-04-13T17:23:24.675Z" }, + { url = "https://files.pythonhosted.org/packages/bf/00/b8cc413748fb6383d1582e7cda51314f99743351c462a92dc690d5b5853b/sentry_sdk-2.59.0-py2.py3-none-any.whl", hash = "sha256:abcf65ee9a9d9cdebf9ad369782408ecca9c1c792686ef06ba34f5ab233527fe", size = 468432, upload-time = "2026-05-04T12:19:04.741Z" }, ] [[package]] @@ -2386,14 +2386,14 @@ wheels = [ [[package]] name = "types-requests" -version = "2.33.0.20260408" +version = "2.33.0.20260503" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/69/6a/749dc53a54a3f35842c1f8197b3ca6b54af6d7458a1bfc75f6629b6da666/types_requests-2.33.0.20260408.tar.gz", hash = "sha256:95b9a86376807a216b2fb412b47617b202091c3ea7c078f47cc358d5528ccb7b", size = 23882, upload-time = "2026-04-08T04:34:49.33Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/b8/57e94268c0d82ac3eaa2fc35aa8ca7bbc2542f726b67dcf90b0b00a3b14d/types_requests-2.33.0.20260503.tar.gz", hash = "sha256:9721b2d9dbee7131f2fb39f20f0ebb1999c18cef4b512c9a7932f3722de7c5f4", size = 23931, upload-time = "2026-05-03T05:20:08.882Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/90/b8/78fd6c037de4788c040fdd323b3369804400351b7827473920f6c1d03c10/types_requests-2.33.0.20260408-py3-none-any.whl", hash = "sha256:81f31d5ea4acb39f03be7bc8bed569ba6d5a9c5d97e89f45ac43d819b68ca50f", size = 20739, upload-time = "2026-04-08T04:34:48.325Z" }, + { url = "https://files.pythonhosted.org/packages/c3/82/959113a6351f3ca046cd0a8cd2cee071d7ea47473560557a01eeae9a6fe2/types_requests-2.33.0.20260503-py3-none-any.whl", hash = "sha256:02aaa7e3577a13471715bb1bddb693cc985ea514f754b503bf033e6a09a3e528", size = 20736, upload-time = "2026-05-03T05:20:07.858Z" }, ] [[package]] @@ -2536,11 +2536,11 @@ wheels = [ [[package]] name = "wcwidth" -version = "0.6.0" +version = "0.7.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/35/a2/8e3becb46433538a38726c948d3399905a4c7cabd0df578ede5dc51f0ec2/wcwidth-0.6.0.tar.gz", hash = "sha256:cdc4e4262d6ef9a1a57e018384cbeb1208d8abbc64176027e2c2455c81313159", size = 159684, upload-time = "2026-02-06T19:19:40.919Z" } +sdist = { url = "https://files.pythonhosted.org/packages/2c/ee/afaf0f85a9a18fe47a67f1e4422ed6cf1fe642f0ae0a2f81166231303c52/wcwidth-0.7.0.tar.gz", hash = "sha256:90e3a7ea092341c44b99562e75d09e4d5160fe7a3974c6fb842a101a95e7eed0", size = 182132, upload-time = "2026-05-02T16:04:12.653Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/68/5a/199c59e0a824a3db2b89c5d2dade7ab5f9624dbf6448dc291b46d5ec94d3/wcwidth-0.6.0-py3-none-any.whl", hash = "sha256:1a3a1e510b553315f8e146c54764f4fb6264ffad731b3d78088cdb1478ffbdad", size = 94189, upload-time = "2026-02-06T19:19:39.646Z" }, + { url = "https://files.pythonhosted.org/packages/41/52/e465037f5375f43533d1a80b6923955201596a99142ed524d77b571a1418/wcwidth-0.7.0-py3-none-any.whl", hash = "sha256:5d69154c429a82910e241c738cd0e2976fac8a2dd47a1a805f4afed1c0f136f2", size = 110825, upload-time = "2026-05-02T16:04:11.033Z" }, ] [[package]] diff --git a/frontend/package.json b/frontend/package.json index 5474ab7..10d5c29 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -18,12 +18,12 @@ "@mantine/form": "^9.1.1", "@mantine/hooks": "^9.1.1", "@tabler/icons-react": "^3.41.1", - "@tanstack/query-async-storage-persister": "^5.100.8", - "@tanstack/react-query": "^5.100.8", - "@tanstack/react-query-persist-client": "^5.100.8", + "@tanstack/query-async-storage-persister": "^5.100.9", + "@tanstack/react-query": "^5.100.9", + "@tanstack/react-query-persist-client": "^5.100.9", "@turf/boolean-point-in-polygon": "^7.3.5", "@turf/helpers": "^7.3.5", - "axios": "^1.15.2", + "axios": "^1.16.0", "leaflet": "^1.9.4", "react": "^19.2.5", "react-dom": "^19.2.5", @@ -35,8 +35,8 @@ "@eslint/js": "^9.39.4", "@hey-api/openapi-ts": "0.97.0", "@rolldown/plugin-babel": "^0.2.3", - "@tanstack/eslint-plugin-query": "^5.100.8", - "@tanstack/react-query-devtools": "^5.100.8", + "@tanstack/eslint-plugin-query": "^5.100.9", + "@tanstack/react-query-devtools": "^5.100.9", "@types/babel__core": "^7.20.5", "@types/geojson": "^7946.0.16", "@types/leaflet": "^1.9.21", diff --git a/frontend/pnpm-lock.yaml b/frontend/pnpm-lock.yaml index a66aa79..633ab92 100644 --- a/frontend/pnpm-lock.yaml +++ b/frontend/pnpm-lock.yaml @@ -21,14 +21,14 @@ importers: specifier: ^3.41.1 version: 3.41.1(react@19.2.5) '@tanstack/query-async-storage-persister': - specifier: ^5.100.8 - version: 5.100.8 + specifier: ^5.100.9 + version: 5.100.9 '@tanstack/react-query': - specifier: ^5.100.8 - version: 5.100.8(react@19.2.5) + specifier: ^5.100.9 + version: 5.100.9(react@19.2.5) '@tanstack/react-query-persist-client': - specifier: ^5.100.8 - version: 5.100.8(@tanstack/react-query@5.100.8(react@19.2.5))(react@19.2.5) + specifier: ^5.100.9 + version: 5.100.9(@tanstack/react-query@5.100.9(react@19.2.5))(react@19.2.5) '@turf/boolean-point-in-polygon': specifier: ^7.3.5 version: 7.3.5 @@ -36,8 +36,8 @@ importers: specifier: ^7.3.5 version: 7.3.5 axios: - specifier: ^1.15.2 - version: 1.15.2 + specifier: ^1.16.0 + version: 1.16.0 leaflet: specifier: ^1.9.4 version: 1.9.4 @@ -67,11 +67,11 @@ importers: specifier: ^0.2.3 version: 0.2.3(@babel/core@7.29.0)(@babel/runtime@7.29.2)(rolldown@1.0.0-rc.17)(vite@8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.13))(yaml@2.8.3)) '@tanstack/eslint-plugin-query': - specifier: ^5.100.8 - version: 5.100.8(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) + specifier: ^5.100.9 + version: 5.100.9(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) '@tanstack/react-query-devtools': - specifier: ^5.100.8 - version: 5.100.8(@tanstack/react-query@5.100.8(react@19.2.5))(react@19.2.5) + specifier: ^5.100.9 + version: 5.100.9(@tanstack/react-query@5.100.9(react@19.2.5))(react@19.2.5) '@types/babel__core': specifier: ^7.20.5 version: 7.20.5 @@ -686,8 +686,8 @@ packages: '@tabler/icons@3.41.1': resolution: {integrity: sha512-OaRnVbRmH2nHtFeg+RmMJ/7m2oBIF9XCJAUD5gQnMrpK9f05ydj8MZrAf3NZQqOXyxGN1UBL0D5IKLLEUfr74Q==} - '@tanstack/eslint-plugin-query@5.100.8': - resolution: {integrity: sha512-Hn2zEnjAPPBdHvFgSmcG8rUuskl2IS49LDjziF4CW8i8EmNT4/CkKudOckawOqzvDm/hjqZy0VFjyrQI1ZSw7Q==} + '@tanstack/eslint-plugin-query@5.100.9': + resolution: {integrity: sha512-3jZwyxAZWSBqI7EXEdw+rktFfX1opMpqn9Lruwz52DEzQdi7kbKnqixjhR3dJ1xFfG05YxV9vsqXGxXqcLAmjA==} peerDependencies: eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 typescript: ^5.4.0 || ^6.0.0 @@ -695,32 +695,32 @@ packages: typescript: optional: true - '@tanstack/query-async-storage-persister@5.100.8': - resolution: {integrity: sha512-LrCF9SGyWgtgSq+nuyrIg5fXXt5OV24Bg70HB9mehdqcWfzeILu0NBlDzMl6bG96BO+eQ1ms/pVdyqVthdGl6g==} + '@tanstack/query-async-storage-persister@5.100.9': + resolution: {integrity: sha512-KTWqpXIAwuR1bSIxfOnoRr7hOMxfrtLk9kxSuDfOu4sSBEFqHp9+aDp8Ig6YdHxCclNI86SizPnmKmSqNxcJxg==} - '@tanstack/query-core@5.100.8': - resolution: {integrity: sha512-ceYwSFOqjPwET5TA6IOYxzxlGc0ekyH/gfOtWkP0PX43rzX9bxW48Iuw8KAduKCToi4rJAQ6nRy2kAe8gszdmg==} + '@tanstack/query-core@5.100.9': + resolution: {integrity: sha512-SJSFw1S8+kQ0+knv/XGfrbocWoAlT7vDKsSImtLx3ZPQmEcR46hkDjLSvynSy25N8Ms4tIEini1FuBd5k7IscQ==} - '@tanstack/query-devtools@5.100.8': - resolution: {integrity: sha512-29D6k564h7eudwNdfRcq6Je2VFUWGxHwADPg1xC2yHxrovYBwZiqzIv/DkPRsK/EMoOIPIvPq+IU0uCxiQXYPA==} + '@tanstack/query-devtools@5.100.9': + resolution: {integrity: sha512-gqiptrTIhbK2PuCaPRHmWXfJG1NGYVFpAr0HqogEqiSBNB5xDz6fmesQt7w4WgMOqOQPnPHJ3ZDMuhDaXvNO8g==} - '@tanstack/query-persist-client-core@5.100.8': - resolution: {integrity: sha512-FxhXz6Zy8VgZFeIEQom8FKvAHcW9cQe8yx2LZ2o304CE8fVzo4RRfVeI/t4qBonLy8g7+OELb8wIzrn99rxMTQ==} + '@tanstack/query-persist-client-core@5.100.9': + resolution: {integrity: sha512-sCPZZp3D9sOeqcA4SDxjUIm4wVq8PwHebH4ouFZetwjT4xvGjT/cLBQ4Sst+BFcFuk745pCPkf3T4MFliLHECQ==} - '@tanstack/react-query-devtools@5.100.8': - resolution: {integrity: sha512-BKpysWo1u3kVMtv92XOv/Gu6eCbE/IxBLJPs0GG/qyySUQvZI2h7mqRwyf8Aa6WfUoX8Yf2AAh0uugQLAr8KtQ==} + '@tanstack/react-query-devtools@5.100.9': + resolution: {integrity: sha512-mM3slaVGXJmz+pOLgXdANj75ikgQCyudyl3kmFvm6brI1JyVeY/+IeD17uDHIvZrD8hfoO2sdZ54RFsHdYAuhA==} peerDependencies: - '@tanstack/react-query': ^5.100.8 + '@tanstack/react-query': ^5.100.9 react: ^18 || ^19 - '@tanstack/react-query-persist-client@5.100.8': - resolution: {integrity: sha512-zbmX8Fj8NsBYFA7f/qNfWHw7tjNNS4QYBaWz9FYUM8pgbeJ845KJXIc2QRE3eWpgdH7IHhDN4dJMUxlBj68vYg==} + '@tanstack/react-query-persist-client@5.100.9': + resolution: {integrity: sha512-qO7j+3VUZm4YH4T3dWszDqgvO+5+6NB1kSY+QmF7JD6+IONfeNmGyOzyobjmrX+6CYLPQtQ+sjM9vFYaSOAv6A==} peerDependencies: - '@tanstack/react-query': ^5.100.8 + '@tanstack/react-query': ^5.100.9 react: ^18 || ^19 - '@tanstack/react-query@5.100.8': - resolution: {integrity: sha512-iNNEekixXU5vtAGKKZX2lx3jTooG5yNY+kv0wSgEdEYG0Mj0JM5bcuQtC35ZAP3nDopT6jciUK3xeX65U7AnfA==} + '@tanstack/react-query@5.100.9': + resolution: {integrity: sha512-Oa44XkaI3kCNN6ME0KByU3xT3SEUNOMfZpHxL6+wFoTm+OeUFYHKdeYVe0aOXlRDm/f15sgLwEt2HDorIdW8+A==} peerDependencies: react: ^18 || ^19 @@ -733,8 +733,8 @@ packages: '@turf/invariant@7.3.5': resolution: {integrity: sha512-ZVIvsBvjr8lO7WxC5zYNjRsjSDvyGvWkJMjuWaJjTU8x+1tmfNnw3gDX/TI2Sit83gcRYLYkNo23lB/udqx/Hg==} - '@tybys/wasm-util@0.10.1': - resolution: {integrity: sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==} + '@tybys/wasm-util@0.10.2': + resolution: {integrity: sha512-RoBvJ2X0wuKlWFIjrwffGw1IqZHKQqzIchKaadZZfnNpsAYp2mM0h36JtPCjNDAHGgYez/15uMBpfGwchhiMgg==} '@types/babel__core@7.20.5': resolution: {integrity: sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==} @@ -987,8 +987,8 @@ packages: resolution: {integrity: sha512-KunSNx+TVpkAw/6ULfhnx+HWRecjqZGTOyquAoWHYLRSdK1tB5Ihce1ZW+UY3fj33bYAFWPu7W/GRSmmrCGuxA==} engines: {node: '>=4'} - axios@1.15.2: - resolution: {integrity: sha512-wLrXxPtcrPTsNlJmKjkPnNPK2Ihe0hn0wGSaTEiHRPxwjvJwT3hKmXF4dpqxmPO9SoNb2FsYXj/xEo0gHN+D5A==} + axios@1.16.0: + resolution: {integrity: sha512-6hp5CwvTPlN2A31g5dxnwAX0orzM7pmCRDLnZSX772mv8WDqICwFjowHuPs04Mc8deIld1+ejhtaMn5vp6b+1w==} axobject-query@4.1.0: resolution: {integrity: sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==} @@ -1004,8 +1004,8 @@ packages: resolution: {integrity: sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==} engines: {node: 18 || 20 || >=22} - baseline-browser-mapping@2.10.25: - resolution: {integrity: sha512-QO/VHsXCQdnzADMfmkeOPvHdIAkoB7i0/rGjINPJEetLx75hNttVWGQ/jycHUDP9zZ9rupbm60WRxcwViB0MiA==} + baseline-browser-mapping@2.10.27: + resolution: {integrity: sha512-zEs/ufmZoUd7WftKpKyXaT6RFxpQ5Qm9xytKRHvJfxFV9DFJkZph9RvJ1LcOUi0Z1ZVijMte65JbILeV+8QQEA==} engines: {node: '>=6.0.0'} hasBin: true @@ -1387,8 +1387,8 @@ packages: fast-levenshtein@2.0.6: resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} - fast-uri@3.1.0: - resolution: {integrity: sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==} + fast-uri@3.1.1: + resolution: {integrity: sha512-h2r7rcm6Ee/J8o0LD5djLuFVcfbZxhvho4vvsbeV0aMvXjUgqv4YpxpkEx0d68l6+IleVfLAdVEfhR7QNMkGHQ==} fdir@6.5.0: resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==} @@ -2459,8 +2459,8 @@ packages: peerDependencies: zod: ^3.25.0 || ^4.0.0 - zod@4.4.2: - resolution: {integrity: sha512-IynmDyxsEsb9RKzO3J9+4SxXnl2FTFSzNBaKKaMV6tsSk0rw9gYw9gs+JFCq/qk2LCZ78KDwyj+Z289TijSkUw==} + zod@4.4.3: + resolution: {integrity: sha512-ytENFjIJFl2UwYglde2jchW2Hwm4GJFLDiSXWdTrJQBIN9Fcyp7n4DhxJEiWNAJMV1/BqWfW/kkg71UDcHJyTQ==} snapshots: @@ -2858,7 +2858,7 @@ snapshots: dependencies: '@emnapi/core': 1.10.0 '@emnapi/runtime': 1.10.0 - '@tybys/wasm-util': 0.10.1 + '@tybys/wasm-util': 0.10.2 optional: true '@oxc-project/types@0.127.0': {} @@ -2954,7 +2954,7 @@ snapshots: '@tabler/icons@3.41.1': {} - '@tanstack/eslint-plugin-query@5.100.8(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3)': + '@tanstack/eslint-plugin-query@5.100.9(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3)': dependencies: '@typescript-eslint/utils': 8.59.1(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) eslint: 9.39.4(jiti@2.6.1) @@ -2963,34 +2963,34 @@ snapshots: transitivePeerDependencies: - supports-color - '@tanstack/query-async-storage-persister@5.100.8': + '@tanstack/query-async-storage-persister@5.100.9': dependencies: - '@tanstack/query-core': 5.100.8 - '@tanstack/query-persist-client-core': 5.100.8 + '@tanstack/query-core': 5.100.9 + '@tanstack/query-persist-client-core': 5.100.9 - '@tanstack/query-core@5.100.8': {} + '@tanstack/query-core@5.100.9': {} - '@tanstack/query-devtools@5.100.8': {} + '@tanstack/query-devtools@5.100.9': {} - '@tanstack/query-persist-client-core@5.100.8': + '@tanstack/query-persist-client-core@5.100.9': dependencies: - '@tanstack/query-core': 5.100.8 + '@tanstack/query-core': 5.100.9 - '@tanstack/react-query-devtools@5.100.8(@tanstack/react-query@5.100.8(react@19.2.5))(react@19.2.5)': + '@tanstack/react-query-devtools@5.100.9(@tanstack/react-query@5.100.9(react@19.2.5))(react@19.2.5)': dependencies: - '@tanstack/query-devtools': 5.100.8 - '@tanstack/react-query': 5.100.8(react@19.2.5) + '@tanstack/query-devtools': 5.100.9 + '@tanstack/react-query': 5.100.9(react@19.2.5) react: 19.2.5 - '@tanstack/react-query-persist-client@5.100.8(@tanstack/react-query@5.100.8(react@19.2.5))(react@19.2.5)': + '@tanstack/react-query-persist-client@5.100.9(@tanstack/react-query@5.100.9(react@19.2.5))(react@19.2.5)': dependencies: - '@tanstack/query-persist-client-core': 5.100.8 - '@tanstack/react-query': 5.100.8(react@19.2.5) + '@tanstack/query-persist-client-core': 5.100.9 + '@tanstack/react-query': 5.100.9(react@19.2.5) react: 19.2.5 - '@tanstack/react-query@5.100.8(react@19.2.5)': + '@tanstack/react-query@5.100.9(react@19.2.5)': dependencies: - '@tanstack/query-core': 5.100.8 + '@tanstack/query-core': 5.100.9 react: 19.2.5 '@turf/boolean-point-in-polygon@7.3.5': @@ -3012,7 +3012,7 @@ snapshots: '@types/geojson': 7946.0.16 tslib: 2.8.1 - '@tybys/wasm-util@0.10.1': + '@tybys/wasm-util@0.10.2': dependencies: tslib: 2.8.1 optional: true @@ -3252,7 +3252,7 @@ snapshots: ajv@8.18.0: dependencies: fast-deep-equal: 3.1.3 - fast-uri: 3.1.0 + fast-uri: 3.1.1 json-schema-traverse: 1.0.0 require-from-string: 2.0.2 @@ -3335,7 +3335,7 @@ snapshots: axe-core@4.11.4: {} - axios@1.15.2: + axios@1.16.0: dependencies: follow-redirects: 1.16.0 form-data: 4.0.5 @@ -3353,7 +3353,7 @@ snapshots: balanced-match@4.0.4: {} - baseline-browser-mapping@2.10.25: {} + baseline-browser-mapping@2.10.27: {} brace-expansion@1.1.14: dependencies: @@ -3366,7 +3366,7 @@ snapshots: browserslist@4.28.2: dependencies: - baseline-browser-mapping: 2.10.25 + baseline-browser-mapping: 2.10.27 caniuse-lite: 1.0.30001791 electron-to-chromium: 1.5.349 node-releases: 2.0.38 @@ -3742,8 +3742,8 @@ snapshots: '@babel/parser': 7.29.3 eslint: 9.39.4(jiti@2.6.1) hermes-parser: 0.25.1 - zod: 4.4.2 - zod-validation-error: 4.0.2(zod@4.4.2) + zod: 4.4.3 + zod-validation-error: 4.0.2(zod@4.4.3) transitivePeerDependencies: - supports-color @@ -3863,7 +3863,7 @@ snapshots: fast-levenshtein@2.0.6: {} - fast-uri@3.1.0: {} + fast-uri@3.1.1: {} fdir@6.5.0(picomatch@4.0.4): optionalDependencies: @@ -4955,8 +4955,8 @@ snapshots: yocto-queue@0.1.0: {} - zod-validation-error@4.0.2(zod@4.4.2): + zod-validation-error@4.0.2(zod@4.4.3): dependencies: - zod: 4.4.2 + zod: 4.4.3 - zod@4.4.2: {} + zod@4.4.3: {} From 7d9ad8c6d44970ffb3cc2fb212811cf07e9a318b Mon Sep 17 00:00:00 2001 From: Martin Kedmenec Date: Tue, 5 May 2026 12:57:54 +0200 Subject: [PATCH 6/7] Install matplotlib, update all packages --- backend/pyproject.toml | 1 + backend/uv.lock | 8 +- frontend/package.json | 8 +- frontend/pnpm-lock.yaml | 273 ++++++++++++++++++++-------------------- 4 files changed, 147 insertions(+), 143 deletions(-) diff --git a/backend/pyproject.toml b/backend/pyproject.toml index 36bc8c1..07627bb 100644 --- a/backend/pyproject.toml +++ b/backend/pyproject.toml @@ -19,6 +19,7 @@ dependencies = [ dev = [ "basedpyright>=1.38.2", "jupyterlab>=4.5.4", + "matplotlib>=3.10.9", "pytest>=9.0.2", "pytest-cov>=7.0.0", "ruff>=0.15.0", diff --git a/backend/uv.lock b/backend/uv.lock index ac986f7..17a4f32 100644 --- a/backend/uv.lock +++ b/backend/uv.lock @@ -402,6 +402,7 @@ dependencies = [ dev = [ { name = "basedpyright" }, { name = "jupyterlab" }, + { name = "matplotlib" }, { name = "pytest" }, { name = "pytest-cov" }, { name = "ruff" }, @@ -424,6 +425,7 @@ requires-dist = [ dev = [ { name = "basedpyright", specifier = ">=1.38.2" }, { name = "jupyterlab", specifier = ">=4.5.4" }, + { name = "matplotlib", specifier = ">=3.10.9" }, { name = "pytest", specifier = ">=9.0.2" }, { name = "pytest-cov", specifier = ">=7.0.0" }, { name = "ruff", specifier = ">=0.15.0" }, @@ -986,7 +988,7 @@ wheels = [ [[package]] name = "jupyter-server" -version = "2.18.0" +version = "2.18.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -1008,9 +1010,9 @@ dependencies = [ { name = "traitlets" }, { name = "websocket-client" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f1/ec/9302cec1ccacdd33c1b1312ac31681c8975cae56c626d783ab49edf9c681/jupyter_server-2.18.0.tar.gz", hash = "sha256:568b27bce4320a53c3eebf1bdcbee9acf48a8ab7f66ec83d900ca9909d4fb770", size = 751152, upload-time = "2026-05-04T13:39:29.685Z" } +sdist = { url = "https://files.pythonhosted.org/packages/33/b0/666586d557a71a58cd9960b154fb9aee0ed81dd62a50371195ab95731909/jupyter_server-2.18.1.tar.gz", hash = "sha256:f62be526369b791625e03bd658070563c1a4e9a0a2f439ea1f9dbacea5f7191a", size = 752024, upload-time = "2026-05-05T09:17:51.101Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/cf/f9/050312d92072ddb9ce14c11171804c07435790c98d4350935a780d9e10c2/jupyter_server-2.18.0-py3-none-any.whl", hash = "sha256:69a5397a039d689da81a45955f9b23e95ee167f6d8a8d64372fb616f2aac650a", size = 391687, upload-time = "2026-05-04T13:39:27.549Z" }, + { url = "https://files.pythonhosted.org/packages/a4/45/bfe3779fd06714a379128f2c4eaf7c99414f0eb081f9f34c135f6b3d511c/jupyter_server-2.18.1-py3-none-any.whl", hash = "sha256:db0374d52a975f88a92a7f20de44e08ef5be9763ba7e99630baf16c46ac8dbf0", size = 391844, upload-time = "2026-05-05T09:17:48.521Z" }, ] [[package]] diff --git a/frontend/package.json b/frontend/package.json index 10d5c29..2f78a77 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -17,7 +17,7 @@ "@mantine/core": "^9.1.1", "@mantine/form": "^9.1.1", "@mantine/hooks": "^9.1.1", - "@tabler/icons-react": "^3.41.1", + "@tabler/icons-react": "^3.42.0", "@tanstack/query-async-storage-persister": "^5.100.9", "@tanstack/react-query": "^5.100.9", "@tanstack/react-query-persist-client": "^5.100.9", @@ -33,7 +33,7 @@ "devDependencies": { "@babel/core": "^7.29.0", "@eslint/js": "^9.39.4", - "@hey-api/openapi-ts": "0.97.0", + "@hey-api/openapi-ts": "0.97.1", "@rolldown/plugin-babel": "^0.2.3", "@tanstack/eslint-plugin-query": "^5.100.9", "@tanstack/react-query-devtools": "^5.100.9", @@ -52,12 +52,12 @@ "eslint-plugin-react-refresh": "^0.5.2", "eslint-plugin-tsdoc": "^0.5.2", "globals": "^17.6.0", - "postcss": "^8.5.13", + "postcss": "^8.5.14", "postcss-preset-mantine": "^1.18.0", "postcss-simple-vars": "^7.0.1", "prettier": "3.8.3", "typescript": "~5.9.3", - "typescript-eslint": "^8.59.1", + "typescript-eslint": "^8.59.2", "vite": "^8.0.10" }, "packageManager": "pnpm@10.33.2+sha512.a90faf6feeab71ad6c6e57f94e0fe1a12f5dcc22cd754db40ae9593eb6a3e0b6b12e3540218bb37ae083404b1f2ce6db2a4121e979829b4aff94b99f49da1cf8", diff --git a/frontend/pnpm-lock.yaml b/frontend/pnpm-lock.yaml index 633ab92..004580b 100644 --- a/frontend/pnpm-lock.yaml +++ b/frontend/pnpm-lock.yaml @@ -18,8 +18,8 @@ importers: specifier: ^9.1.1 version: 9.1.1(react@19.2.5) '@tabler/icons-react': - specifier: ^3.41.1 - version: 3.41.1(react@19.2.5) + specifier: ^3.42.0 + version: 3.42.0(react@19.2.5) '@tanstack/query-async-storage-persister': specifier: ^5.100.9 version: 5.100.9 @@ -61,11 +61,11 @@ importers: specifier: ^9.39.4 version: 9.39.4 '@hey-api/openapi-ts': - specifier: 0.97.0 - version: 0.97.0(typescript@5.9.3) + specifier: 0.97.1 + version: 0.97.1(typescript@5.9.3) '@rolldown/plugin-babel': specifier: ^0.2.3 - version: 0.2.3(@babel/core@7.29.0)(@babel/runtime@7.29.2)(rolldown@1.0.0-rc.17)(vite@8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.13))(yaml@2.8.3)) + version: 0.2.3(@babel/core@7.29.0)(@babel/runtime@7.29.2)(rolldown@1.0.0-rc.17)(vite@8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.14))(yaml@2.8.3)) '@tanstack/eslint-plugin-query': specifier: ^5.100.9 version: 5.100.9(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) @@ -92,7 +92,7 @@ importers: version: 19.2.3(@types/react@19.2.14) '@vitejs/plugin-react': specifier: ^6.0.1 - version: 6.0.1(@rolldown/plugin-babel@0.2.3(@babel/core@7.29.0)(@babel/runtime@7.29.2)(rolldown@1.0.0-rc.17)(vite@8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.13))(yaml@2.8.3)))(babel-plugin-react-compiler@1.0.0)(vite@8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.13))(yaml@2.8.3)) + version: 6.0.1(@rolldown/plugin-babel@0.2.3(@babel/core@7.29.0)(@babel/runtime@7.29.2)(rolldown@1.0.0-rc.17)(vite@8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.14))(yaml@2.8.3)))(babel-plugin-react-compiler@1.0.0)(vite@8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.14))(yaml@2.8.3)) babel-plugin-react-compiler: specifier: ^1.0.0 version: 1.0.0 @@ -101,7 +101,7 @@ importers: version: 9.39.4(jiti@2.6.1) eslint-config-mantine: specifier: ^4.0.3 - version: 4.0.3(@eslint/js@9.39.4)(eslint-plugin-jsx-a11y@6.10.2(eslint@9.39.4(jiti@2.6.1)))(eslint-plugin-react@7.37.5(eslint@9.39.4(jiti@2.6.1)))(eslint@9.39.4(jiti@2.6.1))(typescript-eslint@8.59.1(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3)) + version: 4.0.3(@eslint/js@9.39.4)(eslint-plugin-jsx-a11y@6.10.2(eslint@9.39.4(jiti@2.6.1)))(eslint-plugin-react@7.37.5(eslint@9.39.4(jiti@2.6.1)))(eslint@9.39.4(jiti@2.6.1))(typescript-eslint@8.59.2(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3)) eslint-config-prettier: specifier: ^10.1.8 version: 10.1.8(eslint@9.39.4(jiti@2.6.1)) @@ -118,14 +118,14 @@ importers: specifier: ^17.6.0 version: 17.6.0 postcss: - specifier: ^8.5.13 - version: 8.5.13 + specifier: ^8.5.14 + version: 8.5.14 postcss-preset-mantine: specifier: ^1.18.0 - version: 1.18.0(postcss@8.5.13) + version: 1.18.0(postcss@8.5.14) postcss-simple-vars: specifier: ^7.0.1 - version: 7.0.1(postcss@8.5.13) + version: 7.0.1(postcss@8.5.14) prettier: specifier: 3.8.3 version: 3.8.3 @@ -133,11 +133,11 @@ importers: specifier: ~5.9.3 version: 5.9.3 typescript-eslint: - specifier: ^8.59.1 - version: 8.59.1(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) + specifier: ^8.59.2 + version: 8.59.2(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) vite: specifier: ^8.0.10 - version: 8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.13))(yaml@2.8.3) + version: 8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.14))(yaml@2.8.3) packages: @@ -440,19 +440,19 @@ packages: resolution: {integrity: sha512-Iciv2vUCJTW9lWM/ROvyZLblmcbYJHPuXfzb1SzeDVVn4xEXu2ilLU1pq3fn+09FZ/Y0P7VyvRE47UDU6om8xA==} engines: {node: '>=22.13.0'} - '@hey-api/json-schema-ref-parser@1.4.1': - resolution: {integrity: sha512-DoPJGxVApDlktP1yYLjmOrF0YBEqb32ieCbx1S1i09n8TyCgdoh4yQaQ3kp0sMTauH+bwNKPsFh7S8qiWCoKZA==} + '@hey-api/json-schema-ref-parser@1.4.2': + resolution: {integrity: sha512-ZhCFSKI2ipZHEbgmtUHdyddvRU3wJ4elgCfYUC7T7hZa4EivSrVflTQf2w+v3TuaYxR1Y2V2kq3otqTttrrK8Q==} engines: {node: '>=22.13.0'} - '@hey-api/openapi-ts@0.97.0': - resolution: {integrity: sha512-WZkKgrDlZpxKlDv2HkBCzaAYeuM+EtZKFmKGBv9/JblAKpX3JQTROi7PzlCZE3eisetRPSakbcRgn+LGyB7EiQ==} + '@hey-api/openapi-ts@0.97.1': + resolution: {integrity: sha512-LksUJeXAqwf6OhcCCr3/B4YjnBs5rqSqjDUKMBvkgp4OhaCQiJrOvntctFxdnugy8jUojP4yi/eJf5xYzcYzCQ==} engines: {node: '>=22.13.0'} hasBin: true peerDependencies: typescript: '>=5.5.3 || >=6.0.0 || 6.0.1-rc' - '@hey-api/shared@0.4.2': - resolution: {integrity: sha512-4fconS10E0Xr4/acV8G+BkApxaIStxrT0GhB9BDTQWvrFTy5/nV933SyFk8qImcbpKvgv9hpn3N+7bV8oFrbjA==} + '@hey-api/shared@0.4.3': + resolution: {integrity: sha512-3tHfZNXgGOt+3P3Kq9cvqmZ9i7e3jtrkip1uDpZTX1+hTNboHhYdjxnT8AbrDuvslTaQHoAOlP4/iCDdzd9Jag==} engines: {node: '>=22.13.0'} '@hey-api/spec-types@0.2.0': @@ -678,13 +678,13 @@ packages: '@standard-schema/utils@0.3.0': resolution: {integrity: sha512-e7Mew686owMaPJVNNLs55PUvgz371nKgwsc4vxE49zsODpJEnxgxRo2y/OKrqueavXgZNMDVj3DdHFlaSAeU8g==} - '@tabler/icons-react@3.41.1': - resolution: {integrity: sha512-kUgweE+DJtAlMZVIns1FTDdcbpRVnkK7ZpUOXmoxy3JAF0rSHj0TcP4VHF14+gMJGnF+psH2Zt26BLT6owetBA==} + '@tabler/icons-react@3.42.0': + resolution: {integrity: sha512-WvKhHYLdJaZbiY4Jm31fmTbzIwxokXcE1HM/m9rmXvh7UoHG4mM8n+9NOB6xEwB5SZQ+G/Z102eMj1F3NqDMVg==} peerDependencies: react: '>= 16' - '@tabler/icons@3.41.1': - resolution: {integrity: sha512-OaRnVbRmH2nHtFeg+RmMJ/7m2oBIF9XCJAUD5gQnMrpK9f05ydj8MZrAf3NZQqOXyxGN1UBL0D5IKLLEUfr74Q==} + '@tabler/icons@3.42.0': + resolution: {integrity: sha512-h0nFIRgwrE/9iVgN+GuLijbiLIBWJ3chNvIWhqUZhy4D9fv3tkoQ3EYFAvxvfdvQUNNVAhJhj+ar54y6t016Vg==} '@tanstack/eslint-plugin-query@5.100.9': resolution: {integrity: sha512-3jZwyxAZWSBqI7EXEdw+rktFfX1opMpqn9Lruwz52DEzQdi7kbKnqixjhR3dJ1xFfG05YxV9vsqXGxXqcLAmjA==} @@ -801,16 +801,16 @@ packages: '@types/use-sync-external-store@0.0.6': resolution: {integrity: sha512-zFDAD+tlpf2r4asuHEj0XH6pY6i0g5NeAHPn+15wk3BV6JA69eERFXC1gyGThDkVa1zCyKr5jox1+2LbV/AMLg==} - '@typescript-eslint/eslint-plugin@8.59.1': - resolution: {integrity: sha512-BOziFIfE+6osHO9FoJG4zjoHUcvI7fTNBSpdAwrNH0/TLvzjsk2oo8XSSOT2HhqUyhZPfHv4UOffoJ9oEEQ7Ag==} + '@typescript-eslint/eslint-plugin@8.59.2': + resolution: {integrity: sha512-j/bwmkBvHUtPNxzuWe5z6BEk3q54YRyGlBXkSsmfoih7zNrBvl5A9A98anlp/7JbyZcWIJ8KXo/3Tq/DjFLtuQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: - '@typescript-eslint/parser': ^8.59.1 + '@typescript-eslint/parser': ^8.59.2 eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 typescript: '>=4.8.4 <6.1.0' - '@typescript-eslint/parser@8.59.1': - resolution: {integrity: sha512-HDQH9O/47Dxi1ceDhBXdaldtf/WV9yRYMjbjCuNk3qnaTD564qwv61Y7+gTxwxRKzSrgO5uhtw584igXVuuZkA==} + '@typescript-eslint/parser@8.59.2': + resolution: {integrity: sha512-plR3pp6D+SSUn1HM7xvSkx12/DhoHInI2YF35KAcVFNZvlC0gtrWqx7Qq1oH2Ssgi0vlFRCTbP+DZc7B9+TtsQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 @@ -822,8 +822,8 @@ packages: peerDependencies: typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/project-service@8.59.1': - resolution: {integrity: sha512-+MuHQlHiEr00Of/IQbE/MmEoi44znZHbR/Pz7Opq4HryUOlRi+/44dro9Ycy8Fyo+/024IWtw8m4JUMCGTYxDg==} + '@typescript-eslint/project-service@8.59.2': + resolution: {integrity: sha512-+2hqvEkeyf/0FBor67duF0Ll7Ot8jyKzDQOSrxazF/danillRq2DwR9dLptsXpoZQqxE1UisSmoZewrlPas9Vw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: typescript: '>=4.8.4 <6.1.0' @@ -832,8 +832,8 @@ packages: resolution: {integrity: sha512-YAi4VDKcIZp0O4tz/haYKhmIDZFEUPOreKbfdAN3SzUDMcPhJ8QI99xQXqX+HoUVq8cs85eRKnD+rne2UAnj2w==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@typescript-eslint/scope-manager@8.59.1': - resolution: {integrity: sha512-LwuHQI4pDOYVKvmH2dkaJo6YZCSgouVgnS/z7yBPKBMvgtBvyLqiLy9Z6b7+m/TRcX1NFYUqZetI5Y+aT4GEfg==} + '@typescript-eslint/scope-manager@8.59.2': + resolution: {integrity: sha512-JzfyEpEtOU89CcFSwyNS3mu4MLvLSXqnmX05+aKBDM+TdR5jzcGOEBwxwGNxrEQ7p/z6kK2WyioCGBf2zZBnvg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} '@typescript-eslint/tsconfig-utils@8.56.1': @@ -842,14 +842,14 @@ packages: peerDependencies: typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/tsconfig-utils@8.59.1': - resolution: {integrity: sha512-/0nEyPbX7gRsk0Uwfe4ALwwgxuA66d/l2mhRDNlAvaj4U3juhUtJNq0DsY8M2AYwwb9rEq2hrC3IcIcEt++iJA==} + '@typescript-eslint/tsconfig-utils@8.59.2': + resolution: {integrity: sha512-BKK4alN7oi4C/zv4VqHQ+uRU+lTa6JGIZ7s1juw7b3RHo9OfKB+bKX3u0iVZetdsUCBBkSbdWbarJbmN0fTeSw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: typescript: '>=4.8.4 <6.1.0' - '@typescript-eslint/type-utils@8.59.1': - resolution: {integrity: sha512-klWPBR2ciQHS3f++ug/mVnWKPjBUo7icEL3FAO1lhAR1Z1i5NQYZ1EannMSRYcq5qCv5wNALlXr6fksRHyYl7w==} + '@typescript-eslint/type-utils@8.59.2': + resolution: {integrity: sha512-nhqaj1nmTdVVl/BP5omXNRGO38jn5iosis2vbdmupF2txCf8ylWT8lx+JlvMYYVqzGVKtjojUFoQ3JRWK+mfzQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 @@ -859,8 +859,8 @@ packages: resolution: {integrity: sha512-dbMkdIUkIkchgGDIv7KLUpa0Mda4IYjo4IAMJUZ+3xNoUXxMsk9YtKpTHSChRS85o+H9ftm51gsK1dZReY9CVw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@typescript-eslint/types@8.59.1': - resolution: {integrity: sha512-ZDCjgccSdYPw5Bxh+my4Z0lJU96ZDN7jbBzvmEn0FZx3RtU1C7VWl6NbDx94bwY3V5YsgwRzJPOgeY2Q/nLG8A==} + '@typescript-eslint/types@8.59.2': + resolution: {integrity: sha512-e82GVOE8Ps3E++Egvb6Y3Dw0S10u8NkQ9KXmtRhCWJJ8kDhOJTvtMAWnFL16kB1583goCWXsr0NieKCZMs2/0Q==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} '@typescript-eslint/typescript-estree@8.56.1': @@ -869,8 +869,8 @@ packages: peerDependencies: typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/typescript-estree@8.59.1': - resolution: {integrity: sha512-OUd+vJS05sSkOip+BkZ/2NS8RMxrAAJemsC6vU3kmfLyeaJT0TftHkV9mcx2107MmsBVXXexhVu4F0TZXyMl4g==} + '@typescript-eslint/typescript-estree@8.59.2': + resolution: {integrity: sha512-o0XPGNwcWw+FIwStOWn+BwBuEmL6QXP0rsvAFg7ET1dey1Nr6Wb1ac8p5HEsK0ygO/6mUxlk+YWQD9xcb/nnXg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: typescript: '>=4.8.4 <6.1.0' @@ -882,8 +882,8 @@ packages: eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/utils@8.59.1': - resolution: {integrity: sha512-3pIeoXhCeYH9FSCBI8P3iNwJlGuzPlYKkTlen2O9T1DSeeg8UG8jstq6BLk+Mda0qup7mgk4z4XL4OzRaxZ8LA==} + '@typescript-eslint/utils@8.59.2': + resolution: {integrity: sha512-Juw3EinkXqjaffxz6roowvV7GZT/kET5vSKKZT6upl5TXdWkLkYmNPXwDDL2Vkt2DPn0nODIS4egC/0AGxKo/Q==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 @@ -893,8 +893,8 @@ packages: resolution: {integrity: sha512-KiROIzYdEV85YygXw6BI/Dx4fnBlFQu6Mq4QE4MOH9fFnhohw6wX/OAvDY2/C+ut0I3RSPKenvZJIVYqJNkhEw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@typescript-eslint/visitor-keys@8.59.1': - resolution: {integrity: sha512-LdDNl6C5iJExcM0Yh0PwAIBb9PrSiCsWamF/JyEZawm3kFDnRoaq3LGE4bpyRao/fWeGKKyw7icx0YxrLFC5Cg==} + '@typescript-eslint/visitor-keys@8.59.2': + resolution: {integrity: sha512-NwjLUnGy8/Zfx23fl50tRC8rYaYnM52xNRYFAXvmiil9yh1+K6aRVQMnzW6gQB/1DLgWt977lYQn7C+wtgXZiA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} '@vitejs/plugin-react@6.0.1': @@ -1387,8 +1387,8 @@ packages: fast-levenshtein@2.0.6: resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} - fast-uri@3.1.1: - resolution: {integrity: sha512-h2r7rcm6Ee/J8o0LD5djLuFVcfbZxhvho4vvsbeV0aMvXjUgqv4YpxpkEx0d68l6+IleVfLAdVEfhR7QNMkGHQ==} + fast-uri@3.1.2: + resolution: {integrity: sha512-rVjf7ArG3LTk+FS6Yw81V1DLuZl1bRbNrev6Tmd/9RaroeeRRJhAt7jg/6YFxbvAQXUCavSoZhPPj6oOx+5KjQ==} fdir@6.5.0: resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==} @@ -1541,8 +1541,8 @@ packages: immer@10.2.0: resolution: {integrity: sha512-d/+XTN3zfODyjr89gM3mPq1WNX2B8pYsu7eORitdwyA2sBubnTl3laYlBk4sXY5FUa5qTZGBDPJICVbvqzjlbw==} - immer@11.1.4: - resolution: {integrity: sha512-XREFCPo6ksxVzP4E0ekD5aMdf8WMwmdNaz6vuvxgI40UaEiu6q3p8X52aU6GdyvLY3XXX/8R7JOTXStz/nBbRw==} + immer@11.1.6: + resolution: {integrity: sha512-uwrF08UBQfxk49i9WcUeCx045wjB1zXEHNJmbYHPVVspxmjwSeWCoKbB8DEIvs3XkBJV6lcRAyLaWJ2+u3MMCw==} import-fresh@3.3.1: resolution: {integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==} @@ -1992,8 +1992,8 @@ packages: peerDependencies: postcss: ^8.2.1 - postcss@8.5.13: - resolution: {integrity: sha512-qif0+jGGZoLWdHey3UFHHWP0H7Gbmsk8T5VEqyYFbWqPr1XqvLGBbk/sl8V5exGmcYJklJOhOQq1pV9IcsiFag==} + postcss@8.5.14: + resolution: {integrity: sha512-SoSL4+OSEtR99LHFZQiJLkT59C5B1amGO1NzTwj7TT1qCUgUO6hxOvzkOYxD+vMrXBM3XJIKzokoERdqQq/Zmg==} engines: {node: ^10 || ^12 || >=14} powershell-utils@0.1.0: @@ -2310,8 +2310,8 @@ packages: resolution: {integrity: sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg==} engines: {node: '>= 0.4'} - typescript-eslint@8.59.1: - resolution: {integrity: sha512-xqDcFVBmlrltH64lklOVp1wYxgJr6LVdg3NamBgH2OOQDLFdTKfIZXF5PfghrnXQKXZGTQs8tr1vL7fJvq8CTQ==} + typescript-eslint@8.59.2: + resolution: {integrity: sha512-pJw051uomb3ZeCzGTpRb8RbEqB5Y4WWet8gl/GcTlU35BSx0PVdZ86/bqkQCyKKuraVQEK7r6kBHQXF+fBhkoQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 @@ -2741,17 +2741,17 @@ snapshots: transitivePeerDependencies: - magicast - '@hey-api/json-schema-ref-parser@1.4.1': + '@hey-api/json-schema-ref-parser@1.4.2': dependencies: '@jsdevtools/ono': 7.1.3 '@types/json-schema': 7.0.15 - yaml: 2.8.3 + js-yaml: 4.1.1 - '@hey-api/openapi-ts@0.97.0(typescript@5.9.3)': + '@hey-api/openapi-ts@0.97.1(typescript@5.9.3)': dependencies: '@hey-api/codegen-core': 0.8.1 - '@hey-api/json-schema-ref-parser': 1.4.1 - '@hey-api/shared': 0.4.2 + '@hey-api/json-schema-ref-parser': 1.4.2 + '@hey-api/shared': 0.4.3 '@hey-api/spec-types': 0.2.0 '@hey-api/types': 0.1.4 '@lukeed/ms': 2.0.2 @@ -2763,10 +2763,10 @@ snapshots: transitivePeerDependencies: - magicast - '@hey-api/shared@0.4.2': + '@hey-api/shared@0.4.3': dependencies: '@hey-api/codegen-core': 0.8.1 - '@hey-api/json-schema-ref-parser': 1.4.1 + '@hey-api/json-schema-ref-parser': 1.4.2 '@hey-api/spec-types': 0.2.0 '@hey-api/types': 0.1.4 ansi-colors: 4.1.3 @@ -2873,7 +2873,7 @@ snapshots: dependencies: '@standard-schema/spec': 1.1.0 '@standard-schema/utils': 0.3.0 - immer: 11.1.4 + immer: 11.1.6 redux: 5.0.1 redux-thunk: 3.1.0(redux@5.0.1) reselect: 5.1.1 @@ -2930,14 +2930,14 @@ snapshots: '@rolldown/binding-win32-x64-msvc@1.0.0-rc.17': optional: true - '@rolldown/plugin-babel@0.2.3(@babel/core@7.29.0)(@babel/runtime@7.29.2)(rolldown@1.0.0-rc.17)(vite@8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.13))(yaml@2.8.3))': + '@rolldown/plugin-babel@0.2.3(@babel/core@7.29.0)(@babel/runtime@7.29.2)(rolldown@1.0.0-rc.17)(vite@8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.14))(yaml@2.8.3))': dependencies: '@babel/core': 7.29.0 picomatch: 4.0.4 rolldown: 1.0.0-rc.17 optionalDependencies: '@babel/runtime': 7.29.2 - vite: 8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.13))(yaml@2.8.3) + vite: 8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.14))(yaml@2.8.3) '@rolldown/pluginutils@1.0.0-rc.17': {} @@ -2947,16 +2947,16 @@ snapshots: '@standard-schema/utils@0.3.0': {} - '@tabler/icons-react@3.41.1(react@19.2.5)': + '@tabler/icons-react@3.42.0(react@19.2.5)': dependencies: - '@tabler/icons': 3.41.1 + '@tabler/icons': 3.42.0 react: 19.2.5 - '@tabler/icons@3.41.1': {} + '@tabler/icons@3.42.0': {} '@tanstack/eslint-plugin-query@5.100.9(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3)': dependencies: - '@typescript-eslint/utils': 8.59.1(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/utils': 8.59.2(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) eslint: 9.39.4(jiti@2.6.1) optionalDependencies: typescript: 5.9.3 @@ -3086,14 +3086,14 @@ snapshots: '@types/use-sync-external-store@0.0.6': {} - '@typescript-eslint/eslint-plugin@8.59.1(@typescript-eslint/parser@8.59.1(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3)': + '@typescript-eslint/eslint-plugin@8.59.2(@typescript-eslint/parser@8.59.2(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3)': dependencies: '@eslint-community/regexpp': 4.12.2 - '@typescript-eslint/parser': 8.59.1(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) - '@typescript-eslint/scope-manager': 8.59.1 - '@typescript-eslint/type-utils': 8.59.1(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) - '@typescript-eslint/utils': 8.59.1(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) - '@typescript-eslint/visitor-keys': 8.59.1 + '@typescript-eslint/parser': 8.59.2(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/scope-manager': 8.59.2 + '@typescript-eslint/type-utils': 8.59.2(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/utils': 8.59.2(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/visitor-keys': 8.59.2 eslint: 9.39.4(jiti@2.6.1) ignore: 7.0.5 natural-compare: 1.4.0 @@ -3102,12 +3102,12 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/parser@8.59.1(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3)': + '@typescript-eslint/parser@8.59.2(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3)': dependencies: - '@typescript-eslint/scope-manager': 8.59.1 - '@typescript-eslint/types': 8.59.1 - '@typescript-eslint/typescript-estree': 8.59.1(typescript@5.9.3) - '@typescript-eslint/visitor-keys': 8.59.1 + '@typescript-eslint/scope-manager': 8.59.2 + '@typescript-eslint/types': 8.59.2 + '@typescript-eslint/typescript-estree': 8.59.2(typescript@5.9.3) + '@typescript-eslint/visitor-keys': 8.59.2 debug: 4.4.3 eslint: 9.39.4(jiti@2.6.1) typescript: 5.9.3 @@ -3123,10 +3123,10 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/project-service@8.59.1(typescript@5.9.3)': + '@typescript-eslint/project-service@8.59.2(typescript@5.9.3)': dependencies: - '@typescript-eslint/tsconfig-utils': 8.59.1(typescript@5.9.3) - '@typescript-eslint/types': 8.59.1 + '@typescript-eslint/tsconfig-utils': 8.59.2(typescript@5.9.3) + '@typescript-eslint/types': 8.59.2 debug: 4.4.3 typescript: 5.9.3 transitivePeerDependencies: @@ -3137,24 +3137,24 @@ snapshots: '@typescript-eslint/types': 8.56.1 '@typescript-eslint/visitor-keys': 8.56.1 - '@typescript-eslint/scope-manager@8.59.1': + '@typescript-eslint/scope-manager@8.59.2': dependencies: - '@typescript-eslint/types': 8.59.1 - '@typescript-eslint/visitor-keys': 8.59.1 + '@typescript-eslint/types': 8.59.2 + '@typescript-eslint/visitor-keys': 8.59.2 '@typescript-eslint/tsconfig-utils@8.56.1(typescript@5.9.3)': dependencies: typescript: 5.9.3 - '@typescript-eslint/tsconfig-utils@8.59.1(typescript@5.9.3)': + '@typescript-eslint/tsconfig-utils@8.59.2(typescript@5.9.3)': dependencies: typescript: 5.9.3 - '@typescript-eslint/type-utils@8.59.1(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3)': + '@typescript-eslint/type-utils@8.59.2(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3)': dependencies: - '@typescript-eslint/types': 8.59.1 - '@typescript-eslint/typescript-estree': 8.59.1(typescript@5.9.3) - '@typescript-eslint/utils': 8.59.1(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/types': 8.59.2 + '@typescript-eslint/typescript-estree': 8.59.2(typescript@5.9.3) + '@typescript-eslint/utils': 8.59.2(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) debug: 4.4.3 eslint: 9.39.4(jiti@2.6.1) ts-api-utils: 2.5.0(typescript@5.9.3) @@ -3164,7 +3164,7 @@ snapshots: '@typescript-eslint/types@8.56.1': {} - '@typescript-eslint/types@8.59.1': {} + '@typescript-eslint/types@8.59.2': {} '@typescript-eslint/typescript-estree@8.56.1(typescript@5.9.3)': dependencies: @@ -3181,12 +3181,12 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/typescript-estree@8.59.1(typescript@5.9.3)': + '@typescript-eslint/typescript-estree@8.59.2(typescript@5.9.3)': dependencies: - '@typescript-eslint/project-service': 8.59.1(typescript@5.9.3) - '@typescript-eslint/tsconfig-utils': 8.59.1(typescript@5.9.3) - '@typescript-eslint/types': 8.59.1 - '@typescript-eslint/visitor-keys': 8.59.1 + '@typescript-eslint/project-service': 8.59.2(typescript@5.9.3) + '@typescript-eslint/tsconfig-utils': 8.59.2(typescript@5.9.3) + '@typescript-eslint/types': 8.59.2 + '@typescript-eslint/visitor-keys': 8.59.2 debug: 4.4.3 minimatch: 10.2.5 semver: 7.7.4 @@ -3207,12 +3207,12 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/utils@8.59.1(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3)': + '@typescript-eslint/utils@8.59.2(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3)': dependencies: '@eslint-community/eslint-utils': 4.9.1(eslint@9.39.4(jiti@2.6.1)) - '@typescript-eslint/scope-manager': 8.59.1 - '@typescript-eslint/types': 8.59.1 - '@typescript-eslint/typescript-estree': 8.59.1(typescript@5.9.3) + '@typescript-eslint/scope-manager': 8.59.2 + '@typescript-eslint/types': 8.59.2 + '@typescript-eslint/typescript-estree': 8.59.2(typescript@5.9.3) eslint: 9.39.4(jiti@2.6.1) typescript: 5.9.3 transitivePeerDependencies: @@ -3223,17 +3223,17 @@ snapshots: '@typescript-eslint/types': 8.56.1 eslint-visitor-keys: 5.0.1 - '@typescript-eslint/visitor-keys@8.59.1': + '@typescript-eslint/visitor-keys@8.59.2': dependencies: - '@typescript-eslint/types': 8.59.1 + '@typescript-eslint/types': 8.59.2 eslint-visitor-keys: 5.0.1 - '@vitejs/plugin-react@6.0.1(@rolldown/plugin-babel@0.2.3(@babel/core@7.29.0)(@babel/runtime@7.29.2)(rolldown@1.0.0-rc.17)(vite@8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.13))(yaml@2.8.3)))(babel-plugin-react-compiler@1.0.0)(vite@8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.13))(yaml@2.8.3))': + '@vitejs/plugin-react@6.0.1(@rolldown/plugin-babel@0.2.3(@babel/core@7.29.0)(@babel/runtime@7.29.2)(rolldown@1.0.0-rc.17)(vite@8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.14))(yaml@2.8.3)))(babel-plugin-react-compiler@1.0.0)(vite@8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.14))(yaml@2.8.3))': dependencies: '@rolldown/pluginutils': 1.0.0-rc.7 - vite: 8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.13))(yaml@2.8.3) + vite: 8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.14))(yaml@2.8.3) optionalDependencies: - '@rolldown/plugin-babel': 0.2.3(@babel/core@7.29.0)(@babel/runtime@7.29.2)(rolldown@1.0.0-rc.17)(vite@8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.13))(yaml@2.8.3)) + '@rolldown/plugin-babel': 0.2.3(@babel/core@7.29.0)(@babel/runtime@7.29.2)(rolldown@1.0.0-rc.17)(vite@8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.14))(yaml@2.8.3)) babel-plugin-react-compiler: 1.0.0 acorn-jsx@5.3.2(acorn@8.16.0): @@ -3252,7 +3252,7 @@ snapshots: ajv@8.18.0: dependencies: fast-deep-equal: 3.1.3 - fast-uri: 3.1.1 + fast-uri: 3.1.2 json-schema-traverse: 1.0.0 require-from-string: 2.0.2 @@ -3705,13 +3705,13 @@ snapshots: escape-string-regexp@4.0.0: {} - eslint-config-mantine@4.0.3(@eslint/js@9.39.4)(eslint-plugin-jsx-a11y@6.10.2(eslint@9.39.4(jiti@2.6.1)))(eslint-plugin-react@7.37.5(eslint@9.39.4(jiti@2.6.1)))(eslint@9.39.4(jiti@2.6.1))(typescript-eslint@8.59.1(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3)): + eslint-config-mantine@4.0.3(@eslint/js@9.39.4)(eslint-plugin-jsx-a11y@6.10.2(eslint@9.39.4(jiti@2.6.1)))(eslint-plugin-react@7.37.5(eslint@9.39.4(jiti@2.6.1)))(eslint@9.39.4(jiti@2.6.1))(typescript-eslint@8.59.2(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3)): dependencies: '@eslint/js': 9.39.4 eslint: 9.39.4(jiti@2.6.1) eslint-plugin-jsx-a11y: 6.10.2(eslint@9.39.4(jiti@2.6.1)) eslint-plugin-react: 7.37.5(eslint@9.39.4(jiti@2.6.1)) - typescript-eslint: 8.59.1(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) + typescript-eslint: 8.59.2(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) eslint-config-prettier@10.1.8(eslint@9.39.4(jiti@2.6.1)): dependencies: @@ -3863,7 +3863,7 @@ snapshots: fast-levenshtein@2.0.6: {} - fast-uri@3.1.1: {} + fast-uri@3.1.2: {} fdir@6.5.0(picomatch@4.0.4): optionalDependencies: @@ -4000,7 +4000,7 @@ snapshots: immer@10.2.0: {} - immer@11.1.4: {} + immer@11.1.6: {} import-fresh@3.3.1: dependencies: @@ -4390,40 +4390,40 @@ snapshots: possible-typed-array-names@1.1.0: {} - postcss-js@4.1.0(postcss@8.5.13): + postcss-js@4.1.0(postcss@8.5.14): dependencies: camelcase-css: 2.0.1 - postcss: 8.5.13 + postcss: 8.5.14 - postcss-mixins@12.1.2(postcss@8.5.13): + postcss-mixins@12.1.2(postcss@8.5.14): dependencies: - postcss: 8.5.13 - postcss-js: 4.1.0(postcss@8.5.13) - postcss-simple-vars: 7.0.1(postcss@8.5.13) - sugarss: 5.0.1(postcss@8.5.13) + postcss: 8.5.14 + postcss-js: 4.1.0(postcss@8.5.14) + postcss-simple-vars: 7.0.1(postcss@8.5.14) + sugarss: 5.0.1(postcss@8.5.14) tinyglobby: 0.2.16 - postcss-nested@7.0.2(postcss@8.5.13): + postcss-nested@7.0.2(postcss@8.5.14): dependencies: - postcss: 8.5.13 + postcss: 8.5.14 postcss-selector-parser: 7.1.1 - postcss-preset-mantine@1.18.0(postcss@8.5.13): + postcss-preset-mantine@1.18.0(postcss@8.5.14): dependencies: - postcss: 8.5.13 - postcss-mixins: 12.1.2(postcss@8.5.13) - postcss-nested: 7.0.2(postcss@8.5.13) + postcss: 8.5.14 + postcss-mixins: 12.1.2(postcss@8.5.14) + postcss-nested: 7.0.2(postcss@8.5.14) postcss-selector-parser@7.1.1: dependencies: cssesc: 3.0.0 util-deprecate: 1.0.2 - postcss-simple-vars@7.0.1(postcss@8.5.13): + postcss-simple-vars@7.0.1(postcss@8.5.14): dependencies: - postcss: 8.5.13 + postcss: 8.5.14 - postcss@8.5.13: + postcss@8.5.14: dependencies: nanoid: 3.3.12 picocolors: 1.1.1 @@ -4744,9 +4744,9 @@ snapshots: strip-json-comments@3.1.1: {} - sugarss@5.0.1(postcss@8.5.13): + sugarss@5.0.1(postcss@8.5.14): dependencies: - postcss: 8.5.13 + postcss: 8.5.14 supports-color@7.2.0: dependencies: @@ -4812,12 +4812,12 @@ snapshots: possible-typed-array-names: 1.1.0 reflect.getprototypeof: 1.0.10 - typescript-eslint@8.59.1(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3): + typescript-eslint@8.59.2(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3): dependencies: - '@typescript-eslint/eslint-plugin': 8.59.1(@typescript-eslint/parser@8.59.1(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) - '@typescript-eslint/parser': 8.59.1(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) - '@typescript-eslint/typescript-estree': 8.59.1(typescript@5.9.3) - '@typescript-eslint/utils': 8.59.1(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/eslint-plugin': 8.59.2(@typescript-eslint/parser@8.59.2(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/parser': 8.59.2(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/typescript-estree': 8.59.2(typescript@5.9.3) + '@typescript-eslint/utils': 8.59.2(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) eslint: 9.39.4(jiti@2.6.1) typescript: 5.9.3 transitivePeerDependencies: @@ -4882,11 +4882,11 @@ snapshots: d3-time: 3.1.0 d3-timer: 3.0.1 - vite@8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.13))(yaml@2.8.3): + vite@8.0.10(@types/node@24.12.2)(esbuild@0.27.4)(jiti@2.6.1)(sugarss@5.0.1(postcss@8.5.14))(yaml@2.8.3): dependencies: lightningcss: 1.32.0 picomatch: 4.0.4 - postcss: 8.5.13 + postcss: 8.5.14 rolldown: 1.0.0-rc.17 tinyglobby: 0.2.16 optionalDependencies: @@ -4894,7 +4894,7 @@ snapshots: esbuild: 0.27.4 fsevents: 2.3.3 jiti: 2.6.1 - sugarss: 5.0.1(postcss@8.5.13) + sugarss: 5.0.1(postcss@8.5.14) yaml: 2.8.3 which-boxed-primitive@1.1.1: @@ -4951,7 +4951,8 @@ snapshots: yallist@3.1.1: {} - yaml@2.8.3: {} + yaml@2.8.3: + optional: true yocto-queue@0.1.0: {} From 3c9185636b8f7387784186d3f3b4ac11a866025b Mon Sep 17 00:00:00 2001 From: Martin Kedmenec Date: Tue, 5 May 2026 14:28:58 +0200 Subject: [PATCH 7/7] Fix evaluation scripts, move export_openapi.py to scripts/, improve Makefile --- Makefile | 6 +- backend/pyproject.toml | 2 +- backend/scripts/__init__.py | 1 + backend/{ => scripts}/export_openapi.py | 0 backend/scripts/plot_evaluation.py | 320 ++++++++++----- backend/scripts/run_evaluation.py | 507 ++++++++++++++++-------- 6 files changed, 567 insertions(+), 269 deletions(-) create mode 100644 backend/scripts/__init__.py rename backend/{ => scripts}/export_openapi.py (100%) diff --git a/Makefile b/Makefile index 0d19f54..576b11e 100644 --- a/Makefile +++ b/Makefile @@ -60,13 +60,13 @@ frontend: cd $(FRONTEND_DIR) && pnpm dev export-schema: - cd $(BACKEND_DIR) && uv run python export_openapi.py + cd $(BACKEND_DIR) && uv run python -m scripts.export_openapi generate-client: $(OPENAPI_JSON) cd $(FRONTEND_DIR) && pnpm generate-client -$(OPENAPI_JSON): $(BACKEND_DIR)/app/main.py $(BACKEND_DIR)/export_openapi.py - cd $(BACKEND_DIR) && uv run python export_openapi.py +$(OPENAPI_JSON): $(BACKEND_DIR)/app/main.py $(BACKEND_DIR)/scripts/export_openapi.py + cd $(BACKEND_DIR) && uv run python -m scripts.export_openapi openapi: export-schema generate-client diff --git a/backend/pyproject.toml b/backend/pyproject.toml index 07627bb..7d7ffc0 100644 --- a/backend/pyproject.toml +++ b/backend/pyproject.toml @@ -51,7 +51,7 @@ convention = "google" [tool.basedpyright] typeCheckingMode = "all" -include = ["app", "tests", "export_openapi.py"] +include = ["app", "tests", "scripts"] exclude = ["notebooks"] venvPath = "." venv = ".venv" diff --git a/backend/scripts/__init__.py b/backend/scripts/__init__.py new file mode 100644 index 0000000..ab98ff5 --- /dev/null +++ b/backend/scripts/__init__.py @@ -0,0 +1 @@ +"""Evaluation scripts.""" diff --git a/backend/export_openapi.py b/backend/scripts/export_openapi.py similarity index 100% rename from backend/export_openapi.py rename to backend/scripts/export_openapi.py diff --git a/backend/scripts/plot_evaluation.py b/backend/scripts/plot_evaluation.py index f63aabc..121099d 100644 --- a/backend/scripts/plot_evaluation.py +++ b/backend/scripts/plot_evaluation.py @@ -2,62 +2,161 @@ Run from the backend folder after run_evaluation.py: - uv run python scripts/plot_evaluation.py --input evaluation-output --figures ../paper/figures + uv run python scripts/plot_evaluation.py --input evaluation-output \ + --figures ../paper/figures The script writes summary CSV files and vector PDF figures. """ -from __future__ import annotations +# ruff: noqa: T201 import argparse import csv import math import statistics from collections import defaultdict +from collections.abc import Mapping, Sequence +from dataclasses import dataclass from pathlib import Path -from typing import Any +from typing import TYPE_CHECKING, cast -import matplotlib.pyplot as plt +if TYPE_CHECKING: + from typing import Protocol + class _Pyplot(Protocol): + def figure(self, *, figsize: tuple[float, float]) -> object: ... -def parse_args() -> argparse.Namespace: + def boxplot( + self, + x: Sequence[Sequence[float]], + *, + tick_labels: Sequence[str], + showfliers: bool, + ) -> object: ... + + def ylabel(self, ylabel: str) -> object: ... + + def yscale(self, value: str) -> None: ... + + def tight_layout(self) -> None: ... + + def savefig(self, fname: Path) -> None: ... + + def close(self) -> None: ... + + def scatter( + self, + x: Sequence[float], + y: Sequence[float], + **kwargs: object, + ) -> object: ... + + def axhline(self, y: float, *, linewidth: float) -> object: ... + + def axvline(self, x: float, *, linewidth: float) -> object: ... + + def xlabel(self, xlabel: str) -> object: ... + + def legend(self, *, frameon: bool) -> object: ... + + def bar(self, x: Sequence[str], height: Sequence[float]) -> object: ... + + def ylim(self, bottom: float, top: float) -> tuple[float, float]: ... + + def hist(self, x: Sequence[float], *, bins: int) -> object: ... + + plt: _Pyplot +else: + import matplotlib.pyplot as plt + + +type _CsvValue = str | int | float | bool +type _CsvInputRow = Mapping[str, str] +type _CsvRow = Mapping[str, _CsvValue] +type _CsvOutputRow = dict[str, _CsvValue] + + +@dataclass(frozen=True, slots=True) +class _PlotEvaluationArgs: + input: Path + figures: Path + + +def _parse_path(value: object) -> Path: + if isinstance(value, Path): + return value + + return Path(str(value)) + + +def _parse_args() -> _PlotEvaluationArgs: parser = argparse.ArgumentParser() _ = parser.add_argument("--input", type=Path, default=Path("evaluation-output")) _ = parser.add_argument( "--figures", type=Path, default=Path("evaluation-output/figures") ) - return parser.parse_args() + values = cast("Mapping[str, object]", vars(parser.parse_args())) + + return _PlotEvaluationArgs( + input=_parse_path(values["input"]), + figures=_parse_path(values["figures"]), + ) -def read_csv(path: Path) -> list[dict[str, str]]: +def _read_csv(path: Path) -> list[dict[str, str]]: + rows: list[dict[str, str]] = [] + with path.open(newline="", encoding="utf-8") as file: - return list(csv.DictReader(file)) + for raw_row in csv.DictReader(file): + row = { + key: value + for key, value in raw_row.items() + if key is not None and isinstance(value, str) + } + rows.append(row) + + return rows + + +def _as_text(row: _CsvRow, key: str) -> str: + value = row.get(key, "") + + if isinstance(value, str): + return value + + return str(value) -def as_float(row: dict[str, str], key: str, default: float = math.nan) -> float: +def _as_float(row: _CsvRow, key: str, default: float = math.nan) -> float: value = row.get(key, "") + if value == "": return default + return float(value) -def percentile(values: list[float], q: float) -> float: +def _percentile(values: Sequence[float], q: float) -> float: if not values: return math.nan + sorted_values = sorted(values) index = (len(sorted_values) - 1) * q lower = math.floor(index) upper = math.ceil(index) + if lower == upper: return sorted_values[int(index)] + return sorted_values[lower] * (upper - index) + sorted_values[upper] * ( index - lower ) -def write_csv(path: Path, rows: list[dict[str, Any]]) -> None: +def _write_csv(path: Path, rows: Sequence[_CsvRow]) -> None: if not rows: return + path.parent.mkdir(parents=True, exist_ok=True) with path.open("w", newline="", encoding="utf-8") as file: writer = csv.DictWriter(file, fieldnames=list(rows[0].keys())) @@ -65,36 +164,40 @@ def write_csv(path: Path, rows: list[dict[str, Any]]) -> None: writer.writerows(rows) -def shortest_by_pair(routes: list[dict[str, str]]) -> dict[str, dict[str, str]]: - result = {} +def _shortest_by_pair(routes: Sequence[_CsvInputRow]) -> dict[str, _CsvInputRow]: + result: dict[str, _CsvInputRow] = {} + for row in routes: if row["method"] == "shortest" and row["route_index"] == "0": result[row["pair_id"]] = row + return result -def add_quality_fields(routes: list[dict[str, str]]) -> list[dict[str, Any]]: - shortest = shortest_by_pair(routes) - enriched = [] +def _add_quality_fields(routes: Sequence[_CsvInputRow]) -> list[_CsvOutputRow]: + shortest = _shortest_by_pair(routes) + enriched: list[_CsvOutputRow] = [] + for row in routes: base = shortest.get(row["pair_id"]) if base is None: continue - distance = as_float(row, "distance_m") - base_distance = as_float(base, "distance_m") + + distance = _as_float(row, "distance_m") + base_distance = _as_float(base, "distance_m") distance_overhead_pct = ( ((distance / base_distance) - 1.0) * 100.0 if base_distance > 0 else math.nan ) - score_gains = [ - as_float(row, "snow_free_score") - as_float(base, "snow_free_score"), - as_float(row, "flat_score") - as_float(base, "flat_score"), - as_float(row, "scenic_score") - as_float(base, "scenic_score"), - ] - enriched.append( + score_gains = ( + _as_float(row, "snow_free_score") - _as_float(base, "snow_free_score"), + _as_float(row, "flat_score") - _as_float(base, "flat_score"), + _as_float(row, "scenic_score") - _as_float(base, "scenic_score"), + ) + enriched_row = cast("_CsvOutputRow", dict(row)) + enriched_row.update( { - **row, "distance_overhead_pct": distance_overhead_pct, "max_score_gain_pp": max(score_gains), "snow_free_gain_pp": score_gains[0], @@ -102,24 +205,27 @@ def add_quality_fields(routes: list[dict[str, str]]) -> list[dict[str, Any]]: "scenic_gain_pp": score_gains[2], } ) + enriched.append(enriched_row) + return enriched -def summarize_runs(runs: list[dict[str, str]]) -> list[dict[str, Any]]: - by_method: dict[str, list[dict[str, str]]] = defaultdict(list) +def _summarize_runs(runs: Sequence[_CsvInputRow]) -> list[_CsvOutputRow]: + by_method: defaultdict[str, list[_CsvInputRow]] = defaultdict(list) + for row in runs: if row["success"] == "True": by_method[row["method"]].append(row) - rows = [] + rows: list[_CsvOutputRow] = [] for method, group in sorted(by_method.items()): - runtimes = [as_float(row, "runtime_ms") for row in group] - route_counts = [as_float(row, "route_count") for row in group] + runtimes = [_as_float(row, "runtime_ms") for row in group] + route_counts = [_as_float(row, "route_count") for row in group] total_labels = [ - as_float(row, "total_labels") for row in group if row["total_labels"] + _as_float(row, "total_labels") for row in group if row["total_labels"] ] destination_labels = [ - as_float(row, "destination_labels") + _as_float(row, "destination_labels") for row in group if row["destination_labels"] ] @@ -133,7 +239,7 @@ def summarize_runs(runs: list[dict[str, str]]) -> list[dict[str, Any]]: "method": method, "requests": len(group), "median_runtime_ms": round(statistics.median(runtimes), 2), - "p95_runtime_ms": round(percentile(runtimes, 0.95), 2), + "p95_runtime_ms": round(_percentile(runtimes, 0.95), 2), "max_runtime_ms": round(max(runtimes), 2), "median_route_count": round(statistics.median(route_counts), 2), "median_total_labels": round(statistics.median(total_labels), 2) @@ -149,24 +255,28 @@ def summarize_runs(runs: list[dict[str, str]]) -> list[dict[str, Any]]: else "", } ) + return rows -def summarize_quality( - enriched: list[dict[str, Any]], *, include_neutral: bool -) -> list[dict[str, Any]]: - by_method: dict[str, list[dict[str, Any]]] = defaultdict(list) +def _summarize_quality( + enriched: Sequence[_CsvRow], *, include_neutral: bool +) -> list[_CsvOutputRow]: + by_method: defaultdict[str, list[_CsvRow]] = defaultdict(list) + for row in enriched: - if row["method"] == "shortest": + method = _as_text(row, "method") + profile = _as_text(row, "profile") + if method == "shortest": continue - if not include_neutral and row["profile"] == "neutral": + if not include_neutral and profile == "neutral": continue - by_method[row["method"]].append(row) + by_method[method].append(row) - rows = [] + rows: list[_CsvOutputRow] = [] for method, group in sorted(by_method.items()): - overhead = [float(row["distance_overhead_pct"]) for row in group] - gain = [float(row["max_score_gain_pp"]) for row in group] + overhead = [_as_float(row, "distance_overhead_pct") for row in group] + gain = [_as_float(row, "max_score_gain_pp") for row in group] rows.append( { "method": method, @@ -175,50 +285,56 @@ def summarize_quality( else "preference_profiles", "route_rows": len(group), "median_distance_overhead_pct": round(statistics.median(overhead), 2), - "p95_distance_overhead_pct": round(percentile(overhead, 0.95), 2), + "p95_distance_overhead_pct": round(_percentile(overhead, 0.95), 2), "median_max_score_gain_pp": round(statistics.median(gain), 2), - "p95_max_score_gain_pp": round(percentile(gain, 0.95), 2), + "p95_max_score_gain_pp": round(_percentile(gain, 0.95), 2), } ) + return rows -def summarize_quality_by_profile( - enriched: list[dict[str, Any]], -) -> list[dict[str, Any]]: - by_group: dict[tuple[str, str], list[dict[str, Any]]] = defaultdict(list) +def _summarize_quality_by_profile( + enriched: Sequence[_CsvRow], +) -> list[_CsvOutputRow]: + by_group: defaultdict[tuple[str, str], list[_CsvRow]] = defaultdict(list) + for row in enriched: - if row["method"] != "shortest": - by_group[(row["method"], row["profile"])].append(row) + method = _as_text(row, "method") + profile = _as_text(row, "profile") + if method != "shortest": + by_group[(method, profile)].append(row) - rows = [] + rows: list[_CsvOutputRow] = [] for (method, profile), group in sorted(by_group.items()): - overhead = [float(row["distance_overhead_pct"]) for row in group] - gain = [float(row["max_score_gain_pp"]) for row in group] + overhead = [_as_float(row, "distance_overhead_pct") for row in group] + gain = [_as_float(row, "max_score_gain_pp") for row in group] rows.append( { "method": method, "profile": profile, "route_rows": len(group), "median_distance_overhead_pct": round(statistics.median(overhead), 2), - "p95_distance_overhead_pct": round(percentile(overhead, 0.95), 2), + "p95_distance_overhead_pct": round(_percentile(overhead, 0.95), 2), "median_max_score_gain_pp": round(statistics.median(gain), 2), - "p95_max_score_gain_pp": round(percentile(gain, 0.95), 2), + "p95_max_score_gain_pp": round(_percentile(gain, 0.95), 2), } ) + return rows -def summarize_sensitivity(routes: list[dict[str, str]]) -> list[dict[str, Any]]: +def _summarize_sensitivity(routes: Sequence[_CsvInputRow]) -> list[_CsvOutputRow]: # Top route only, because this is what the UI recommends by default. - groups: dict[tuple[str, str], dict[str, str]] = defaultdict(dict) + groups: defaultdict[tuple[str, str], dict[str, str]] = defaultdict(dict) + for row in routes: if row["method"] == "shortest" or row["route_index"] != "0": continue groups[(row["method"], row["pair_id"])][row["profile"]] = row["signature"] - by_method: dict[str, list[int]] = defaultdict(list) - changed_from_neutral: dict[str, list[int]] = defaultdict(list) + by_method: defaultdict[str, list[int]] = defaultdict(list) + changed_from_neutral: defaultdict[str, list[int]] = defaultdict(list) for (method, _pair_id), signatures_by_profile in groups.items(): signatures = set(signatures_by_profile.values()) by_method[method].append(len(signatures)) @@ -231,7 +347,7 @@ def summarize_sensitivity(routes: list[dict[str, str]]) -> list[dict[str, Any]]: ) changed_from_neutral[method].append(1 if changed else 0) - rows = [] + rows: list[_CsvOutputRow] = [] for method in sorted(by_method): unique_counts = by_method[method] changed_counts = changed_from_neutral[method] @@ -248,14 +364,15 @@ def summarize_sensitivity(routes: list[dict[str, str]]) -> list[dict[str, Any]]: else "", } ) + return rows -def plot_runtime(runs: list[dict[str, str]], output: Path) -> None: +def _plot_runtime(runs: Sequence[_CsvInputRow], output: Path) -> None: methods = ["shortest", "weighted", "pareto"] data = [ [ - as_float(row, "runtime_ms") + _as_float(row, "runtime_ms") for row in runs if row["method"] == method and row["success"] == "True" ] @@ -270,24 +387,27 @@ def plot_runtime(runs: list[dict[str, str]], output: Path) -> None: plt.close() -def plot_detour_gain(enriched: list[dict[str, Any]], output: Path) -> None: +def _plot_detour_gain(enriched: Sequence[_CsvRow], output: Path) -> None: _ = plt.figure(figsize=(3.35, 2.35)) + for method, marker in [("weighted", "o"), ("pareto", "x")]: group = [ row for row in enriched - if row["method"] == method and row["profile"] != "neutral" + if _as_text(row, "method") == method + and _as_text(row, "profile") != "neutral" ] _ = plt.scatter( - [float(row["distance_overhead_pct"]) for row in group], - [float(row["max_score_gain_pp"]) for row in group], + [_as_float(row, "distance_overhead_pct") for row in group], + [_as_float(row, "max_score_gain_pp") for row in group], marker=marker, s=16, alpha=0.75, label=method, ) - _ = plt.axhline(0, linewidth=0.8) - _ = plt.axvline(0, linewidth=0.8) + + _ = plt.axhline(0.0, linewidth=0.8) + _ = plt.axvline(0.0, linewidth=0.8) _ = plt.xlabel("Distance overhead vs. shortest (%)") _ = plt.ylabel("Best score gain (pp)") _ = plt.legend(frameon=False) @@ -296,26 +416,28 @@ def plot_detour_gain(enriched: list[dict[str, Any]], output: Path) -> None: plt.close() -def plot_sensitivity(summary: list[dict[str, Any]], output: Path) -> None: - methods = [row["method"] for row in summary] - values = [float(row["mean_unique_top_routes"]) for row in summary] - plt.figure(figsize=(3.35, 2.20)) - plt.bar(methods, values) - plt.ylabel("Mean unique top routes") - plt.ylim(0, max(values + [1]) + 0.5) +def _plot_sensitivity(summary: Sequence[_CsvRow], output: Path) -> None: + methods = [_as_text(row, "method") for row in summary] + values = [_as_float(row, "mean_unique_top_routes") for row in summary] + _ = plt.figure(figsize=(3.35, 2.20)) + _ = plt.bar(methods, values) + _ = plt.ylabel("Mean unique top routes") + _ = plt.ylim(0.0, max([*values, 1.0]) + 0.5) plt.tight_layout() plt.savefig(output) plt.close() -def plot_pareto_labels(runs: list[dict[str, str]], output: Path) -> None: +def _plot_pareto_labels(runs: Sequence[_CsvInputRow], output: Path) -> None: labels = [ - as_float(row, "total_labels") / 1000.0 + _as_float(row, "total_labels") / 1000.0 for row in runs if row["method"] == "pareto" and row["total_labels"] ] + if not labels: return + _ = plt.figure(figsize=(3.35, 2.20)) _ = plt.hist(labels, bins=12) _ = plt.xlabel("Labels generated (thousands)") @@ -325,38 +447,38 @@ def plot_pareto_labels(runs: list[dict[str, str]], output: Path) -> None: plt.close() -def main() -> None: - args = parse_args() +def _main() -> None: + args = _parse_args() args.figures.mkdir(parents=True, exist_ok=True) - runs = read_csv(args.input / "runs.csv") - routes = read_csv(args.input / "routes.csv") - enriched = add_quality_fields(routes) + runs = _read_csv(args.input / "runs.csv") + routes = _read_csv(args.input / "routes.csv") + enriched = _add_quality_fields(routes) - run_summary = summarize_runs(runs) - quality_summary = summarize_quality(enriched, include_neutral=True) - quality_preference_summary = summarize_quality(enriched, include_neutral=False) - quality_by_profile = summarize_quality_by_profile(enriched) - sensitivity_summary = summarize_sensitivity(routes) + run_summary = _summarize_runs(runs) + quality_summary = _summarize_quality(enriched, include_neutral=True) + quality_preference_summary = _summarize_quality(enriched, include_neutral=False) + quality_by_profile = _summarize_quality_by_profile(enriched) + sensitivity_summary = _summarize_sensitivity(routes) - write_csv(args.input / "runtime_summary.csv", run_summary) - write_csv(args.input / "route_quality_summary.csv", quality_summary) - write_csv( + _write_csv(args.input / "runtime_summary.csv", run_summary) + _write_csv(args.input / "route_quality_summary.csv", quality_summary) + _write_csv( args.input / "route_quality_preference_summary.csv", quality_preference_summary ) - write_csv(args.input / "route_quality_by_profile.csv", quality_by_profile) - write_csv(args.input / "sensitivity_summary.csv", sensitivity_summary) - write_csv(args.input / "routes_enriched.csv", enriched) + _write_csv(args.input / "route_quality_by_profile.csv", quality_by_profile) + _write_csv(args.input / "sensitivity_summary.csv", sensitivity_summary) + _write_csv(args.input / "routes_enriched.csv", enriched) - plot_runtime(runs, args.figures / "evaluation_runtime_boxplot.pdf") - plot_detour_gain(enriched, args.figures / "evaluation_detour_gain_scatter.pdf") - plot_sensitivity( + _plot_runtime(runs, args.figures / "evaluation_runtime_boxplot.pdf") + _plot_detour_gain(enriched, args.figures / "evaluation_detour_gain_scatter.pdf") + _plot_sensitivity( sensitivity_summary, args.figures / "evaluation_weight_sensitivity.pdf" ) - plot_pareto_labels(runs, args.figures / "evaluation_pareto_labels_histogram.pdf") + _plot_pareto_labels(runs, args.figures / "evaluation_pareto_labels_histogram.pdf") print("Wrote summary CSV files and figures to", args.input, "and", args.figures) if __name__ == "__main__": - main() + _main() diff --git a/backend/scripts/run_evaluation.py b/backend/scripts/run_evaluation.py index 7495b5d..135d85b 100644 --- a/backend/scripts/run_evaluation.py +++ b/backend/scripts/run_evaluation.py @@ -3,8 +3,9 @@ Place this file in backend/scripts/run_evaluation.py and run it from the backend folder, for example: - uv run python scripts/run_evaluation.py --pairs 20 --seed 7 --travel-mode cycling \ - --min-distance 2000 --max-distance 8000 --pair-filter active + uv run python scripts/run_evaluation.py --pairs 20 --seed 7 \ + --travel-mode cycling --min-distance 2000 --max-distance 8000 \ + --pair-filter active The script loads the same graph state as the FastAPI app, samples origin-destination pairs from graph nodes, runs shortest, weighted, and Pareto @@ -15,7 +16,7 @@ browser caching, geocoding, or front-end rendering noise. """ -from __future__ import annotations +# ruff: noqa: T201 import argparse import csv @@ -25,10 +26,11 @@ import statistics import sys import time +from collections.abc import Callable, Iterable, Mapping, Sequence from dataclasses import dataclass from itertools import pairwise from pathlib import Path -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Literal, cast from unittest.mock import patch import networkx as nx @@ -43,6 +45,7 @@ build_weighted_edge_cost_function, compute_edge_cost_components, normalize_route_preference_weights, + select_parallel_edge_attributes, ) from app.graph_state import ( # noqa: E402 GRAPH_STATE, @@ -51,17 +54,29 @@ ) from app.main import OVERLAY_DIRECTORY, PLACE_NAME # noqa: E402 from app.models import ( # noqa: E402 + ParetoSearchLabel, RouteCoordinates, + RouteFeatureCollection, + RouteOptimizationMethod, RoutePlanningOptions, RoutePreferenceWeights, + TravelMode, ) +from app.value_parsing import parse_float_or_default # noqa: E402 if TYPE_CHECKING: - from collections.abc import Iterable + from app.typing_aliases import EdgeAttributeMap, MultiDiGraphAny + + +type _CsvValue = str | int | float | bool +type _CsvRow = dict[str, _CsvValue] +type _PairFilter = Literal["random", "active", "changed"] +type _RouteCoordinate = Sequence[float] +type _ShortestPathFunction = Callable[..., list[int]] @dataclass(frozen=True, slots=True) -class Pair: +class _Pair: pair_id: int origin_node: int destination_node: int @@ -79,23 +94,101 @@ class Pair: @dataclass(frozen=True, slots=True) -class WeightProfile: +class _WeightProfile: name: str scenic_weight: int snow_free_weight: int flat_weight: int -WEIGHT_PROFILES: tuple[WeightProfile, ...] = ( - WeightProfile("neutral", scenic_weight=0, snow_free_weight=0, flat_weight=0), - WeightProfile("scenic", scenic_weight=100, snow_free_weight=0, flat_weight=0), - WeightProfile("snow_free", scenic_weight=0, snow_free_weight=100, flat_weight=0), - WeightProfile("flat", scenic_weight=0, snow_free_weight=0, flat_weight=100), - WeightProfile("balanced", scenic_weight=50, snow_free_weight=50, flat_weight=50), +@dataclass(frozen=True, slots=True) +class _RunEvaluationArgs: + pairs: int + seed: int + travel_mode: TravelMode + min_distance: float + max_distance: float + max_sampling_attempts: int + pair_filter: _PairFilter + active_epsilon: float + pareto_max_routes: int + pareto_max_labels_per_node: int + pareto_max_total_labels: int + out: Path + profiles: str + + +@dataclass(frozen=True, slots=True) +class _SamplingOptions: + count: int + seed: int + min_distance: float + max_distance: float + max_attempts: int + pair_filter: _PairFilter + active_epsilon: float + + +@dataclass(frozen=True, slots=True) +class _RoutingOptions: + travel_mode: TravelMode + pareto_max_routes: int + pareto_max_labels_per_node: int + pareto_max_total_labels: int + + +WEIGHT_PROFILES: tuple[_WeightProfile, ...] = ( + _WeightProfile("neutral", scenic_weight=0, snow_free_weight=0, flat_weight=0), + _WeightProfile("scenic", scenic_weight=100, snow_free_weight=0, flat_weight=0), + _WeightProfile("snow_free", scenic_weight=0, snow_free_weight=100, flat_weight=0), + _WeightProfile("flat", scenic_weight=0, snow_free_weight=0, flat_weight=100), + _WeightProfile("balanced", scenic_weight=50, snow_free_weight=50, flat_weight=50), ) -def parse_args() -> argparse.Namespace: +def _parse_travel_mode(value: object) -> TravelMode: + if value in {"walking", "cycling"}: + return cast("TravelMode", value) + + error_message = f"Unsupported travel mode: {value}" + raise SystemExit(error_message) + + +def _parse_pair_filter(value: object) -> _PairFilter: + if value in {"random", "active", "changed"}: + return cast("_PairFilter", value) + + error_message = f"Unsupported pair filter: {value}" + raise SystemExit(error_message) + + +def _parse_path(value: object) -> Path: + if isinstance(value, Path): + return value + + return Path(str(value)) + + +def _parse_int(value: object) -> int: + if isinstance(value, int): + return value + + if isinstance(value, str): + return int(value) + + error_message = f"Expected integer argument, got {value!r}." + raise SystemExit(error_message) + + +def _parse_float(value: object) -> float: + if isinstance(value, int | float | str): + return float(value) + + error_message = f"Expected numeric argument, got {value!r}." + raise SystemExit(error_message) + + +def _parse_args() -> _RunEvaluationArgs: parser = argparse.ArgumentParser() _ = parser.add_argument("--pairs", type=int, default=20) _ = parser.add_argument("--seed", type=int, default=7) @@ -127,60 +220,96 @@ def parse_args() -> argparse.Namespace: help="Comma-separated subset of: neutral,scenic,snow_free,flat,balanced", ) - return parser.parse_args() + values = cast("Mapping[str, object]", vars(parser.parse_args())) + + return _RunEvaluationArgs( + pairs=_parse_int(values["pairs"]), + seed=_parse_int(values["seed"]), + travel_mode=_parse_travel_mode(values["travel_mode"]), + min_distance=_parse_float(values["min_distance"]), + max_distance=_parse_float(values["max_distance"]), + max_sampling_attempts=_parse_int(values["max_sampling_attempts"]), + pair_filter=_parse_pair_filter(values["pair_filter"]), + active_epsilon=_parse_float(values["active_epsilon"]), + pareto_max_routes=_parse_int(values["pareto_max_routes"]), + pareto_max_labels_per_node=_parse_int(values["pareto_max_labels_per_node"]), + pareto_max_total_labels=_parse_int(values["pareto_max_total_labels"]), + out=_parse_path(values["out"]), + profiles=str(values["profiles"]), + ) -def load_profiles(profile_arg: str) -> list[WeightProfile]: +def _load_profiles(profile_arg: str) -> list[_WeightProfile]: names = {name.strip() for name in profile_arg.split(",") if name.strip()} profiles = [profile for profile in WEIGHT_PROFILES if profile.name in names] if not profiles: - raise SystemExit("No valid weight profiles selected.") + error_message = "No valid weight profiles selected." + raise SystemExit(error_message) return profiles -def node_coordinates(graph: nx.MultiDiGraph, node_id: int) -> tuple[float, float]: - node = graph.nodes[node_id] +def _node_coordinates(graph: MultiDiGraphAny, node_id: int) -> tuple[float, float]: + node = cast("Mapping[str, object]", graph.nodes[node_id]) - return float(node["x"]), float(node["y"]) + return ( + parse_float_or_default(node.get("x"), default=0.0), + parse_float_or_default(node.get("y"), default=0.0), + ) -def percent_score(penalty: float, distance: float) -> float: +def _percent_score(penalty: float, distance: float) -> float: if distance <= 0: return 0.0 + return max(0.0, min(100.0, (1.0 - penalty / distance) * 100.0)) def _select_shortest_edge_attributes( - graph: nx.MultiDiGraph, u: int, v: int -) -> dict[str, Any]: - payload = graph.get_edge_data(u, v) - if not isinstance(payload, dict) or not payload: + graph: MultiDiGraphAny, source_node_id: int, target_node_id: int +) -> EdgeAttributeMap: + edge_attributes = select_parallel_edge_attributes( + graph, + source_node_id, + target_node_id, + ranking_key=lambda attrs: parse_float_or_default( + attrs.get("length"), + default=0.0, + ), + ) + + if edge_attributes is None: raise nx.NetworkXNoPath - return min(payload.values(), key=lambda attrs: float(attrs.get("length", 0.0))) + return edge_attributes -def path_cost_vector( - graph: nx.MultiDiGraph, node_path: list[int] + +def _path_cost_vector( + graph: MultiDiGraphAny, node_path: Sequence[int] ) -> tuple[float, float, float, float]: distance = snow = hills = scenic = 0.0 - for u, v in pairwise(node_path): - edge_attrs = _select_shortest_edge_attributes(graph, u, v) - d, s, h, c = compute_edge_cost_components(edge_attrs) - distance += d - snow += s - hills += h - scenic += c + for source_node_id, target_node_id in pairwise(node_path): + edge_attrs = _select_shortest_edge_attributes( + graph, source_node_id, target_node_id + ) + edge_distance, snow_penalty, hill_penalty, scenic_penalty = ( + compute_edge_cost_components(edge_attrs) + ) + distance += edge_distance + snow += snow_penalty + hills += hill_penalty + scenic += scenic_penalty return distance, snow, hills, scenic -def path_has_active_objective( +def _path_has_active_objective( cost_vector: tuple[float, float, float, float], *, epsilon: float ) -> bool: distance, snow_penalty, uphill_penalty, scenic_penalty = cost_vector + return ( snow_penalty > epsilon or uphill_penalty > epsilon @@ -188,19 +317,24 @@ def path_has_active_objective( ) -def weighted_path_signature( - graph: nx.MultiDiGraph, source: int, target: int, profile: WeightProfile +def _weighted_path_signature( + graph: MultiDiGraphAny, + source_node_id: int, + target_node_id: int, + profile: _WeightProfile, ) -> tuple[int, ...]: weights = RoutePreferenceWeights( scenic_weight=profile.scenic_weight, snow_free_weight=profile.snow_free_weight, flat_weight=profile.flat_weight, ) + shortest_path = cast("_ShortestPathFunction", nx.shortest_path) + return tuple( - nx.shortest_path( + shortest_path( graph, - source=source, - target=target, + source=source_node_id, + target=target_node_id, weight=build_weighted_edge_cost_function( normalize_route_preference_weights(weights) ), @@ -208,65 +342,70 @@ def weighted_path_signature( ) -def weighted_path_changes( - graph: nx.MultiDiGraph, source: int, target: int, profiles: list[WeightProfile] +def _weighted_path_changes( + graph: MultiDiGraphAny, + source_node_id: int, + target_node_id: int, + profiles: Sequence[_WeightProfile], ) -> bool: - neutral = weighted_path_signature(graph, source, target, WEIGHT_PROFILES[0]) + neutral = _weighted_path_signature( + graph, source_node_id, target_node_id, WEIGHT_PROFILES[0] + ) + return any( - weighted_path_signature(graph, source, target, profile) != neutral + _weighted_path_signature(graph, source_node_id, target_node_id, profile) + != neutral for profile in profiles if profile.name != "neutral" ) -def sample_pairs( - graph: nx.MultiDiGraph, - *, - count: int, - seed: int, - min_distance: float, - max_distance: float, - max_attempts: int, - pair_filter: str, - active_epsilon: float, - profiles: list[WeightProfile], -) -> list[Pair]: - rng = random.Random(seed) - nodes = list(graph.nodes) - pairs: list[Pair] = [] +def _sample_pairs( + graph: MultiDiGraphAny, + options: _SamplingOptions, + profiles: Sequence[_WeightProfile], +) -> list[_Pair]: + rng = random.Random(options.seed) # noqa: S311 - deterministic benchmark sampling. + nodes = list(cast("Iterable[int]", graph.nodes)) + pairs: list[_Pair] = [] seen: set[tuple[int, int]] = set() + shortest_path = cast("_ShortestPathFunction", nx.shortest_path) - for _attempt in range(max_attempts): - if len(pairs) >= count: + for _attempt in range(options.max_attempts): + if len(pairs) >= options.count: break origin_node, destination_node = rng.sample(nodes, 2) if (origin_node, destination_node) in seen: continue + seen.add((origin_node, destination_node)) try: - shortest_path = nx.shortest_path( + path = shortest_path( graph, source=origin_node, target=destination_node, weight="length", ) - cost_vector = path_cost_vector(graph, shortest_path) + cost_vector = _path_cost_vector(graph, path) except nx.NetworkXNoPath, nx.NodeNotFound: continue distance, snow_penalty, uphill_penalty, scenic_penalty = cost_vector - if not (min_distance <= distance <= max_distance): + if not (options.min_distance <= distance <= options.max_distance): continue - if pair_filter in {"active", "changed"} and not path_has_active_objective( + if options.pair_filter in { + "active", + "changed", + } and not _path_has_active_objective( cost_vector, - epsilon=active_epsilon, + epsilon=options.active_epsilon, ): continue - if pair_filter == "changed" and not weighted_path_changes( + if options.pair_filter == "changed" and not _weighted_path_changes( graph, origin_node, destination_node, @@ -274,10 +413,10 @@ def sample_pairs( ): continue - origin_lon, origin_lat = node_coordinates(graph, origin_node) - destination_lon, destination_lat = node_coordinates(graph, destination_node) + origin_lon, origin_lat = _node_coordinates(graph, origin_node) + destination_lon, destination_lat = _node_coordinates(graph, destination_node) pairs.append( - Pair( + _Pair( pair_id=len(pairs), origin_node=origin_node, destination_node=destination_node, @@ -289,78 +428,77 @@ def sample_pairs( shortest_snow_penalty=snow_penalty, shortest_uphill_penalty=uphill_penalty, shortest_scenic_penalty=scenic_penalty, - shortest_snow_free_score=percent_score(snow_penalty, distance), - shortest_flat_score=percent_score(uphill_penalty, distance), - shortest_scenic_score=percent_score(scenic_penalty, distance), + shortest_snow_free_score=_percent_score(snow_penalty, distance), + shortest_flat_score=_percent_score(uphill_penalty, distance), + shortest_scenic_score=_percent_score(scenic_penalty, distance), ) ) - if len(pairs) < count: - raise SystemExit( - f"Only sampled {len(pairs)} valid pairs after {max_attempts} attempts. " - "Try widening --min-distance/--max-distance, increasing " + if len(pairs) < options.count: + count = len(pairs) + attempts = options.max_attempts + sampled = f"Only sampled {count} valid pairs after {attempts} attempts." + advice = "Try widening --min-distance/--max-distance, increasing" + command_hint = ( "--max-sampling-attempts, lowering --pairs, or using --pair-filter random." ) + error_message = f"{sampled} {advice} {command_hint}" + raise SystemExit(error_message) return pairs -def route_signature(coordinates: Iterable[Any]) -> str: - rounded = [] +def _route_signature(coordinates: Iterable[_RouteCoordinate]) -> str: + rounded: list[tuple[float, float]] = [] + for coordinate in coordinates: lon = float(coordinate[0]) lat = float(coordinate[1]) rounded.append((round(lon, 6), round(lat, 6))) + payload = json.dumps(rounded, separators=(",", ":")) - return hashlib.sha1(payload.encode("utf-8")).hexdigest()[:12] + + return hashlib.sha256(payload.encode("utf-8")).hexdigest()[:12] -def route_options( +def _route_options( *, - method: str, - profile: WeightProfile, - pareto_max_routes: int, - pareto_max_labels_per_node: int, - pareto_max_total_labels: int, + method: RouteOptimizationMethod, + profile: _WeightProfile, + routing_options: _RoutingOptions, ) -> RoutePlanningOptions: return RoutePlanningOptions( - route_optimization_method=method, # type: ignore[arg-type] + route_optimization_method=method, preference_weights=RoutePreferenceWeights( scenic_weight=profile.scenic_weight, snow_free_weight=profile.snow_free_weight, flat_weight=profile.flat_weight, ), - pareto_max_routes=pareto_max_routes, - pareto_max_labels_per_node=pareto_max_labels_per_node, - pareto_max_total_labels=pareto_max_total_labels, + pareto_max_routes=routing_options.pareto_max_routes, + pareto_max_labels_per_node=routing_options.pareto_max_labels_per_node, + pareto_max_total_labels=routing_options.pareto_max_total_labels, ) -def run_one_request( - *, - pair: Pair, - travel_mode: str, - method: str, - profile: WeightProfile, - pareto_max_routes: int, - pareto_max_labels_per_node: int, - pareto_max_total_labels: int, -) -> tuple[dict[str, Any], list[dict[str, Any]]]: +def _run_one_request( + pair: _Pair, + method: RouteOptimizationMethod, + profile: _WeightProfile, + routing_options: _RoutingOptions, +) -> tuple[_CsvRow, list[_CsvRow]]: coordinates = RouteCoordinates( origin_longitude=pair.origin_longitude, origin_latitude=pair.origin_latitude, destination_longitude=pair.destination_longitude, destination_latitude=pair.destination_latitude, ) - options = route_options( + options = _route_options( method=method, profile=profile, - pareto_max_routes=pareto_max_routes, - pareto_max_labels_per_node=pareto_max_labels_per_node, - pareto_max_total_labels=pareto_max_total_labels, + routing_options=routing_options, ) - pareto_stats: dict[str, Any] = { + pareto_stats: _CsvRow = { "total_labels": "", "destination_labels": "", "hit_total_label_cap": "", @@ -368,11 +506,27 @@ def run_one_request( original_pareto_search = route_planner_module.run_pareto_label_search - def instrumented_pareto_search(*args: Any, **kwargs: Any): - labels, destination_label_ids = original_pareto_search(*args, **kwargs) + def instrumented_pareto_search( + graph: MultiDiGraphAny, + origin_node_id: int, + destination_node_id: int, + *, + max_labels_per_node: int, + max_total_labels: int, + ) -> tuple[list[ParetoSearchLabel], list[int]]: + labels, destination_label_ids = original_pareto_search( + graph, + origin_node_id, + destination_node_id, + max_labels_per_node=max_labels_per_node, + max_total_labels=max_total_labels, + ) pareto_stats["total_labels"] = len(labels) pareto_stats["destination_labels"] = len(destination_label_ids) - pareto_stats["hit_total_label_cap"] = len(labels) >= pareto_max_total_labels + pareto_stats["hit_total_label_cap"] = ( + len(labels) >= routing_options.pareto_max_total_labels + ) + return labels, destination_label_ids start = time.perf_counter() @@ -382,17 +536,19 @@ def instrumented_pareto_search(*args: Any, **kwargs: Any): "app.route_planner.run_pareto_label_search", side_effect=instrumented_pareto_search, ): - response = route_planner_module.build_route_feature_collection( - graph_state=GRAPH_STATE, - route_coordinates=coordinates, - travel_mode=travel_mode, # type: ignore[arg-type] - route_options=options, + response: RouteFeatureCollection | None = ( + route_planner_module.build_route_feature_collection( + graph_state=GRAPH_STATE, + route_coordinates=coordinates, + travel_mode=routing_options.travel_mode, + route_options=options, + ) ) else: response = route_planner_module.build_route_feature_collection( graph_state=GRAPH_STATE, route_coordinates=coordinates, - travel_mode=travel_mode, # type: ignore[arg-type] + travel_mode=routing_options.travel_mode, route_options=options, ) success = True @@ -404,9 +560,9 @@ def instrumented_pareto_search(*args: Any, **kwargs: Any): runtime_ms = (time.perf_counter() - start) * 1000.0 - run_row = { + run_row: _CsvRow = { "pair_id": pair.pair_id, - "travel_mode": travel_mode, + "travel_mode": routing_options.travel_mode, "method": method, "profile": profile.name, "scenic_weight": profile.scenic_weight, @@ -422,12 +578,13 @@ def instrumented_pareto_search(*args: Any, **kwargs: Any): **pareto_stats, } - route_rows: list[dict[str, Any]] = [] + route_rows: list[_CsvRow] = [] if response is not None: for feature in response.features: breakdown = feature.properties.penalty_breakdown if breakdown is None: continue + distance = float(breakdown.distance) snow_penalty = float(breakdown.snow_penalty) uphill_penalty = float(breakdown.uphill_penalty) @@ -435,7 +592,7 @@ def instrumented_pareto_search(*args: Any, **kwargs: Any): route_rows.append( { "pair_id": pair.pair_id, - "travel_mode": travel_mode, + "travel_mode": routing_options.travel_mode, "method": method, "profile": profile.name, "route_index": feature.properties.route_index, @@ -446,19 +603,23 @@ def instrumented_pareto_search(*args: Any, **kwargs: Any): "snow_penalty": snow_penalty, "uphill_penalty": uphill_penalty, "scenic_penalty": scenic_penalty, - "snow_free_score": percent_score(snow_penalty, distance), - "flat_score": percent_score(uphill_penalty, distance), - "scenic_score": percent_score(scenic_penalty, distance), - "signature": route_signature(feature.geometry.coordinates), + "snow_free_score": _percent_score(snow_penalty, distance), + "flat_score": _percent_score(uphill_penalty, distance), + "scenic_score": _percent_score(scenic_penalty, distance), + "signature": _route_signature( + cast("Iterable[_RouteCoordinate]", feature.geometry.coordinates) + ), } ) return run_row, route_rows -def write_csv(path: Path, rows: list[dict[str, Any]]) -> None: +def _write_csv(path: Path, rows: Sequence[_CsvRow]) -> None: if not rows: - raise SystemExit(f"No rows to write for {path}") + error_message = f"No rows to write for {path}" + raise SystemExit(error_message) + path.parent.mkdir(parents=True, exist_ok=True) with path.open("w", newline="", encoding="utf-8") as file: writer = csv.DictWriter(file, fieldnames=list(rows[0].keys())) @@ -466,9 +627,13 @@ def write_csv(path: Path, rows: list[dict[str, Any]]) -> None: writer.writerows(rows) -def pair_rows( - pairs: list[Pair], *, pair_filter: str, min_distance: float, max_distance: float -) -> list[dict[str, Any]]: +def _pair_rows( + pairs: Sequence[_Pair], + *, + pair_filter: _PairFilter, + min_distance: float, + max_distance: float, +) -> list[_CsvRow]: return [ { "pair_id": pair.pair_id, @@ -493,9 +658,24 @@ def pair_rows( ] -def main() -> None: - args = parse_args() - profiles = load_profiles(args.profiles) +def _main() -> None: + args = _parse_args() + profiles = _load_profiles(args.profiles) + sampling_options = _SamplingOptions( + count=args.pairs, + seed=args.seed, + min_distance=args.min_distance, + max_distance=args.max_distance, + max_attempts=args.max_sampling_attempts, + pair_filter=args.pair_filter, + active_epsilon=args.active_epsilon, + ) + routing_options = _RoutingOptions( + travel_mode=args.travel_mode, + pareto_max_routes=args.pareto_max_routes, + pareto_max_labels_per_node=args.pareto_max_labels_per_node, + pareto_max_total_labels=args.pareto_max_total_labels, + ) print("Loading graphs and overlays ...", flush=True) load_graph_state( @@ -506,27 +686,20 @@ def main() -> None: graph = get_graph_for_travel_mode(GRAPH_STATE, args.travel_mode) print("Sampling origin-destination pairs ...", flush=True) - pairs = sample_pairs( - graph, - count=args.pairs, - seed=args.seed, - min_distance=args.min_distance, - max_distance=args.max_distance, - max_attempts=args.max_sampling_attempts, - pair_filter=args.pair_filter, - active_epsilon=args.active_epsilon, - profiles=profiles, - ) - print( - f"Sampled {len(pairs)} pairs; shortest-distance median " - f"{statistics.median(pair.shortest_distance_m for pair in pairs):.0f} m.", - flush=True, + pairs = _sample_pairs(graph, sampling_options, profiles) + median_distance = statistics.median(pair.shortest_distance_m for pair in pairs) + sampled_message = " ".join( + ( + f"Sampled {len(pairs)} pairs;", + f"shortest-distance median {median_distance:.0f} m.", + ) ) + print(sampled_message, flush=True) - run_rows: list[dict[str, Any]] = [] - route_rows: list[dict[str, Any]] = [] + run_rows: list[_CsvRow] = [] + route_rows: list[_CsvRow] = [] - requests: list[tuple[Pair, str, WeightProfile]] = [] + requests: list[tuple[_Pair, RouteOptimizationMethod, _WeightProfile]] = [] for pair in pairs: requests.append((pair, "shortest", WEIGHT_PROFILES[0])) for profile in profiles: @@ -535,40 +708,42 @@ def main() -> None: print(f"Running {len(requests)} route requests ...", flush=True) for index, (pair, method, profile) in enumerate(requests, start=1): - print( - f"[{index}/{len(requests)}] pair={pair.pair_id} method={method} profile={profile.name}", - flush=True, + progress = " ".join( + ( + f"[{index}/{len(requests)}]", + f"pair={pair.pair_id}", + f"method={method}", + f"profile={profile.name}", + ) ) - run_row, rows_for_request = run_one_request( - pair=pair, - travel_mode=args.travel_mode, - method=method, - profile=profile, - pareto_max_routes=args.pareto_max_routes, - pareto_max_labels_per_node=args.pareto_max_labels_per_node, - pareto_max_total_labels=args.pareto_max_total_labels, + print(progress, flush=True) + run_row, rows_for_request = _run_one_request( + pair, + method, + profile, + routing_options, ) run_rows.append(run_row) route_rows.extend(rows_for_request) args.out.mkdir(parents=True, exist_ok=True) - write_csv( + _write_csv( args.out / "pairs.csv", - pair_rows( + _pair_rows( pairs, pair_filter=args.pair_filter, min_distance=args.min_distance, max_distance=args.max_distance, ), ) - write_csv(args.out / "runs.csv", run_rows) - write_csv(args.out / "routes.csv", route_rows) + _write_csv(args.out / "runs.csv", run_rows) + _write_csv(args.out / "routes.csv", route_rows) - failure_count = sum(1 for row in run_rows if not row["success"]) + failure_count = sum(1 for row in run_rows if row["success"] is False) print(f"Done. Wrote CSV files to {args.out}.") if failure_count: print(f"Warning: {failure_count} requests failed. Inspect runs.csv.") if __name__ == "__main__": - main() + _main()