From a7635c81625d8e897aa62c70612a9a5e75f48daa Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Wed, 4 Mar 2026 00:56:35 +1100 Subject: [PATCH 01/32] cya later yucky stuff. Tests are still a bit poo though. Signed-off-by: Max Chesterfield --- README.md | 8 + pyproject.toml | 9 + src/zepben/eas/__init__.py | 5 - src/zepben/eas/client/eas_client.py | 1286 ++---------- src/zepben/eas/client/enums.py | 16 + .../eas/client/feeder_load_analysis_input.py | 54 - src/zepben/eas/client/fla_forecast_config.py | 31 - src/zepben/eas/client/hc_commons.py | 58 - src/zepben/eas/client/ingestor.py | 72 - src/zepben/eas/client/opendss.py | 60 - .../eas/client/patched_generated_client.py | 38 + src/zepben/eas/client/study.py | 56 - src/zepben/eas/client/util.py | 13 - src/zepben/eas/client/work_package.py | 995 --------- src/zepben/eas/lib/__init__.py | 5 + .../lib/generated_graphql_client/__init__.py | 222 ++ .../async_base_client.py | 391 ++++ .../generated_graphql_client/base_model.py | 30 + .../base_operation.py | 156 ++ .../lib/generated_graphql_client/client.py | 109 + .../generated_graphql_client/custom_fields.py | 1854 +++++++++++++++++ .../custom_mutations.py | 491 +++++ .../custom_queries.py | 855 ++++++++ .../custom_typing_fields.py | 429 ++++ .../eas/lib/generated_graphql_client/enums.py | 225 ++ .../generated_graphql_client/exceptions.py | 85 + .../generated_graphql_client/input_types.py | 775 +++++++ test/test_eas_client.py | 955 ++++----- test/test_feeder_load_analysis_input.py | 11 +- 29 files changed, 6277 insertions(+), 3017 deletions(-) create mode 100644 src/zepben/eas/client/enums.py delete mode 100644 src/zepben/eas/client/feeder_load_analysis_input.py delete mode 100644 src/zepben/eas/client/fla_forecast_config.py delete mode 100644 src/zepben/eas/client/hc_commons.py delete mode 100644 src/zepben/eas/client/ingestor.py delete mode 100644 src/zepben/eas/client/opendss.py create mode 100644 src/zepben/eas/client/patched_generated_client.py delete mode 100644 src/zepben/eas/client/study.py delete mode 100644 src/zepben/eas/client/util.py delete mode 100644 src/zepben/eas/client/work_package.py create mode 100644 src/zepben/eas/lib/__init__.py create mode 100644 src/zepben/eas/lib/generated_graphql_client/__init__.py create mode 100644 src/zepben/eas/lib/generated_graphql_client/async_base_client.py create mode 100644 src/zepben/eas/lib/generated_graphql_client/base_model.py create mode 100644 src/zepben/eas/lib/generated_graphql_client/base_operation.py create mode 100644 src/zepben/eas/lib/generated_graphql_client/client.py create mode 100644 src/zepben/eas/lib/generated_graphql_client/custom_fields.py create mode 100644 src/zepben/eas/lib/generated_graphql_client/custom_mutations.py create mode 100644 src/zepben/eas/lib/generated_graphql_client/custom_queries.py create mode 100644 src/zepben/eas/lib/generated_graphql_client/custom_typing_fields.py create mode 100644 src/zepben/eas/lib/generated_graphql_client/enums.py create mode 100644 src/zepben/eas/lib/generated_graphql_client/exceptions.py create mode 100644 src/zepben/eas/lib/generated_graphql_client/input_types.py diff --git a/README.md b/README.md index aaea292..4af07ab 100644 --- a/README.md +++ b/README.md @@ -117,4 +117,12 @@ async def upload(): ) await eas_client.aclose() +``` + +# Development # + +To regenerate the graphql client you will need to install `zepben.eas` with `eas-codegen` optional dependencies, then run: + +```shell +ariadne-codegen ``` \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 5e9f952..f71572e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,6 +50,15 @@ test = [ "pytest-httpserver==1.0.8", "trustme==0.9.0" ] +eas-codegen = [ + "ariadne-codegen" +] [tool.setuptools.packages.find] where = ["src/"] + +[tool.ariadne-codegen] +remote_schema_url = "http://127.0.0.1:7654/api/graphql" +enable_custom_operations=true +target_package_path='src/zepben/eas/lib' +target_package_name='generated_graphql_client' \ No newline at end of file diff --git a/src/zepben/eas/__init__.py b/src/zepben/eas/__init__.py index 3e6d392..b4ff019 100644 --- a/src/zepben/eas/__init__.py +++ b/src/zepben/eas/__init__.py @@ -6,8 +6,3 @@ # from zepben.eas.client.eas_client import * -from zepben.eas.client.feeder_load_analysis_input import * -from zepben.eas.client.fla_forecast_config import * -from zepben.eas.client.opendss import * -from zepben.eas.client.study import * -from zepben.eas.client.work_package import * diff --git a/src/zepben/eas/client/eas_client.py b/src/zepben/eas/client/eas_client.py index 3446ebe..efed17a 100644 --- a/src/zepben/eas/client/eas_client.py +++ b/src/zepben/eas/client/eas_client.py @@ -1,32 +1,27 @@ -# Copyright 2020 Zeppelin Bend Pty Ltd +# Copyright 2025 Zeppelin Bend Pty Ltd # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. import ssl -import warnings from asyncio import get_event_loop -from dataclasses import asdict from datetime import datetime -from hashlib import sha256 from http import HTTPStatus -from json import dumps -from typing import Optional, List -import aiohttp +import httpx from aiohttp import ClientSession -from urllib3.exceptions import InsecureRequestWarning -from zepben.eas.client.feeder_load_analysis_input import FeederLoadAnalysisInput -from zepben.eas.client.ingestor import IngestorConfigInput, IngestorRunsFilterInput, IngestorRunsSortCriteriaInput -from zepben.eas.client.opendss import OpenDssConfig, GetOpenDssModelsFilterInput, GetOpenDssModelsSortCriteriaInput -from zepben.eas.client.study import Study -from zepben.eas.client.util import construct_url -from zepben.eas.client.work_package import WorkPackageConfig, FixedTime, TimePeriod, ForecastConfig, FeederConfigs, \ - GeneratorConfig, ModelConfig __all__ = ["EasClient"] +from zepben.eas.client.patched_generated_client import PatchedClient as Client + +from zepben.eas.lib.generated_graphql_client import WorkPackageInput, FeederLoadAnalysisInput, StudyInput, \ + IngestorConfigInput, IngestorRunsFilterInput, IngestorRunsSortCriteriaInput, HcGeneratorConfigInput, \ + HcModelConfigInput, OpenDssModelInput, GetOpenDssModelsFilterInput, GetOpenDssModelsSortCriteriaInput +from zepben.eas.lib.generated_graphql_client.custom_mutations import Mutation +from zepben.eas.lib.generated_graphql_client.custom_queries import Query + class EasClient: """ @@ -38,9 +33,9 @@ def __init__( host: str, port: int, protocol: str = "https", - access_token: Optional[str] = None, + access_token: str | None = None, verify_certificate: bool = True, - ca_filename: Optional[str] = None, + ca_filename: str | None = None, session: ClientSession = None, json_serialiser=None ): @@ -69,364 +64,58 @@ def __init__( self._protocol = protocol self._host = host self._port = port - self._verify_certificate = verify_certificate - self._ca_filename = ca_filename - self._access_token = access_token - - if session is None: - conn = aiohttp.TCPConnector(limit=200, limit_per_host=0) - timeout = aiohttp.ClientTimeout(total=60) - self.session = aiohttp.ClientSession(json_serialize=json_serialiser or dumps, connector=conn, timeout=timeout) - else: - self.session = session - - def close(self): - return get_event_loop().run_until_complete(self.aclose()) + self._base_url = f"{protocol}://{host}:{port}" - async def aclose(self): - await self.session.close() + verify = False + if verify_certificate: + try: + verify = ssl.create_default_context(cafile=ca_filename) + except ssl.SSLError: + verify = ssl.create_default_context(capath=ca_filename) - def _get_request_headers(self, content_type: str = "application/json") -> dict: - headers = {"content-type": content_type} - if self._access_token: - headers["authorization"] = f"Bearer {self._access_token}" - return headers + http_client = httpx.AsyncClient( + headers=dict(authorization=f"Bearer {access_token}") if access_token else None, + verify=verify, + ) + self._gql_client = Client( + f"{self._base_url}/api/graphql", + http_client=http_client, + ) - def generator_config_to_json(self, generator_config: Optional[GeneratorConfig]) -> Optional[dict]: - return generator_config and { - "model": generator_config.model and { - "vmPu": generator_config.model.vm_pu, - "loadVMinPu": generator_config.model.load_vmin_pu, - "loadVMaxPu": generator_config.model.load_vmax_pu, - "genVMinPu": generator_config.model.gen_vmin_pu, - "genVMaxPu": generator_config.model.gen_vmax_pu, - "loadModel": generator_config.model.load_model, - "collapseSWER": generator_config.model.collapse_swer, - "calibration": generator_config.model.calibration, - "pFactorBaseExports": generator_config.model.p_factor_base_exports, - "pFactorForecastPv": generator_config.model.p_factor_forecast_pv, - "pFactorBaseImports": generator_config.model.p_factor_base_imports, - "fixSinglePhaseLoads": generator_config.model.fix_single_phase_loads, - "maxSinglePhaseLoad": generator_config.model.max_single_phase_load, - "fixOverloadingConsumers": generator_config.model.fix_overloading_consumers, - "maxLoadTxRatio": generator_config.model.max_load_tx_ratio, - "maxGenTxRatio": generator_config.model.max_gen_tx_ratio, - "fixUndersizedServiceLines": generator_config.model.fix_undersized_service_lines, - "maxLoadServiceLineRatio": generator_config.model.max_load_service_line_ratio, - "maxLoadLvLineRatio": generator_config.model.max_load_lv_line_ratio, - "simplifyNetwork": generator_config.model.simplify_network, - "collapseLvNetworks": generator_config.model.collapse_lv_networks, - "collapseNegligibleImpedances": generator_config.model.collapse_negligible_impedances, - "combineCommonImpedances": generator_config.model.combine_common_impedances, - "feederScenarioAllocationStrategy": generator_config.model.feeder_scenario_allocation_strategy and generator_config.model.feeder_scenario_allocation_strategy.name, - "closedLoopVRegEnabled": generator_config.model.closed_loop_v_reg_enabled, - "closedLoopVRegReplaceAll": generator_config.model.closed_loop_v_reg_replace_all, - "closedLoopVRegSetPoint": generator_config.model.closed_loop_v_reg_set_point, - "closedLoopVBand": generator_config.model.closed_loop_v_band, - "closedLoopTimeDelay": generator_config.model.closed_loop_time_delay, - "closedLoopVLimit": generator_config.model.closed_loop_v_limit, - "defaultTapChangerTimeDelay": generator_config.model.default_tap_changer_time_delay, - "defaultTapChangerSetPointPu": generator_config.model.default_tap_changer_set_point_pu, - "defaultTapChangerBand": generator_config.model.default_tap_changer_band, - "splitPhaseDefaultLoadLossPercentage": generator_config.model.split_phase_default_load_loss_percentage, - "splitPhaseLVKV": generator_config.model.split_phase_lv_kv, - "swerVoltageToLineVoltage": generator_config.model.swer_voltage_to_line_voltage, - "loadPlacement": generator_config.model.load_placement and generator_config.model.load_placement.name, - "loadIntervalLengthHours": generator_config.model.load_interval_length_hours, - "meterPlacementConfig": generator_config.model.meter_placement_config and { - "feederHead": generator_config.model.meter_placement_config.feeder_head, - "distTransformers": generator_config.model.meter_placement_config.dist_transformers, - "switchMeterPlacementConfigs": generator_config.model.meter_placement_config.switch_meter_placement_configs and [ - { - "meterSwitchClass": spc.meter_switch_class and spc.meter_switch_class.name, - "namePattern": spc.name_pattern - } for spc in - generator_config.model.meter_placement_config.switch_meter_placement_configs - ], - "energyConsumerMeterGroup": generator_config.model.meter_placement_config.energy_consumer_meter_group - }, - "seed": generator_config.model.seed, - "defaultLoadWatts": generator_config.model.default_load_watts, - "defaultGenWatts": generator_config.model.default_gen_watts, - "defaultLoadVar": generator_config.model.default_load_var, - "defaultGenVar": generator_config.model.default_gen_var, - "transformerTapSettings": generator_config.model.transformer_tap_settings, - "ctPrimScalingFactor": generator_config.model.ct_prim_scaling_factor, - "useSpanLevelThreshold": generator_config.model.use_span_level_threshold, - "ratingThreshold": generator_config.model.rating_threshold, - "simplifyPLSIThreshold": generator_config.model.simplify_plsi_threshold, - "emergAmpScaling": generator_config.model.emerg_amp_scaling, - "inverterControlConfig": generator_config.model.inverter_control_config and { - "cutOffDate": generator_config.model.inverter_control_config.cut_off_date and generator_config.model.inverter_control_config.cut_off_date.isoformat(), - "beforeCutOffProfile": generator_config.model.inverter_control_config.before_cut_off_profile, - "afterCutOffProfile": generator_config.model.inverter_control_config.after_cut_off_profile - } - }, - "solve": generator_config.solve and { - "normVMinPu": generator_config.solve.norm_vmin_pu, - "normVMaxPu": generator_config.solve.norm_vmax_pu, - "emergVMinPu": generator_config.solve.emerg_vmin_pu, - "emergVMaxPu": generator_config.solve.emerg_vmax_pu, - "baseFrequency": generator_config.solve.base_frequency, - "voltageBases": generator_config.solve.voltage_bases, - "maxIter": generator_config.solve.max_iter, - "maxControlIter": generator_config.solve.max_control_iter, - "mode": generator_config.solve.mode and generator_config.solve.mode.name, - "stepSizeMinutes": generator_config.solve.step_size_minutes - }, - "rawResults": generator_config.raw_results and { - "energyMeterVoltagesRaw": generator_config.raw_results.energy_meter_voltages_raw, - "energyMetersRaw": generator_config.raw_results.energy_meters_raw, - "resultsPerMeter": generator_config.raw_results.results_per_meter, - "overloadsRaw": generator_config.raw_results.overloads_raw, - "voltageExceptionsRaw": generator_config.raw_results.voltage_exceptions_raw - }, - "nodeLevelResults": generator_config.node_level_results and { - "collectVoltage": generator_config.node_level_results.collect_voltage, - "collectCurrent": generator_config.node_level_results.collect_current, - "collectPower": generator_config.node_level_results.collect_power, - "mridsToCollect": generator_config.node_level_results.mrids_to_collect, - "collectAllSwitches": generator_config.node_level_results.collect_all_switches, - "collectAllTransformers": generator_config.node_level_results.collect_all_transformers, - "collectAllConductors": generator_config.node_level_results.collect_all_conductors, - "collectAllEnergyConsumers": generator_config.node_level_results.collect_all_energy_consumers, - } - } - def work_package_config_to_json(self, work_package: WorkPackageConfig) -> dict: - return { - "feederConfigs": { - "configs": [ - { - "feeder": config.feeder, - "years": config.years, - "scenarios": config.scenarios, - "timePeriod": { - "startTime": config.load_time.start_time.isoformat(), - "endTime": config.load_time.end_time.isoformat(), - "overrides": config.load_time.load_overrides and [ - { - "loadId": key, - "loadWattsOverride": value.load_watts, - "genWattsOverride": value.gen_watts, - "loadVarOverride": value.load_var, - "genVarOverride": value.gen_var, - } for key, value in config.load_time.load_overrides.items() - ] - } if isinstance(config.load_time, TimePeriod) else None, - "fixedTime": config.load_time and { - "loadTime": config.load_time.load_time.isoformat(), - "overrides": config.load_time.load_overrides and [ - { - "loadId": key, - "loadWattsOverride": value.load_watts, - "genWattsOverride": value.gen_watts, - "loadVarOverride": value.load_var, - "genVarOverride": value.gen_var, - } for key, value in config.load_time.load_overrides.items() - ] - } if isinstance(config.load_time, FixedTime) else None, - } for config in work_package.syf_config.configs - ] - } if isinstance(work_package.syf_config, FeederConfigs) else None, - "forecastConfig": { - "feeders": work_package.syf_config.feeders, - "years": work_package.syf_config.years, - "scenarios": work_package.syf_config.scenarios, - "timePeriod": { - "startTime": work_package.syf_config.load_time.start_time.isoformat(), - "endTime": work_package.syf_config.load_time.end_time.isoformat(), - "overrides": work_package.syf_config.load_time.load_overrides and [ - { - "loadId": key, - "loadWattsOverride": value.load_watts, - "genWattsOverride": value.gen_watts, - "loadVarOverride": value.load_var, - "genVarOverride": value.gen_var, - } for key, value in work_package.syf_config.load_time.load_overrides.items() - ] - } if isinstance(work_package.syf_config.load_time, TimePeriod) else None, - "fixedTime": work_package.syf_config.load_time and { - "loadTime": work_package.syf_config.load_time.load_time.isoformat(), - "overrides": work_package.syf_config.load_time.load_overrides and [ - { - "loadId": key, - "loadWattsOverride": value.load_watts, - "genWattsOverride": value.gen_watts, - "loadVarOverride": value.load_var, - "genVarOverride": value.gen_var, - } for key, value in work_package.syf_config.load_time.load_overrides.items() - ] - } if isinstance(work_package.syf_config.load_time, FixedTime) else None - } if isinstance(work_package.syf_config, ForecastConfig) else None, - "qualityAssuranceProcessing": work_package.quality_assurance_processing, - "generatorConfig": self.generator_config_to_json(work_package.generator_config), - "executorConfig": {}, - "resultProcessorConfig": work_package.result_processor_config and { - "storedResults": work_package.result_processor_config.stored_results and { - "energyMeterVoltagesRaw": work_package.result_processor_config.stored_results.energy_meter_voltages_raw, - "energyMetersRaw": work_package.result_processor_config.stored_results.energy_meters_raw, - "overloadsRaw": work_package.result_processor_config.stored_results.overloads_raw, - "voltageExceptionsRaw": work_package.result_processor_config.stored_results.voltage_exceptions_raw, - }, - "metrics": work_package.result_processor_config.metrics and { - "calculatePerformanceMetrics": work_package.result_processor_config.metrics.calculate_performance_metrics - }, - "writerConfig": work_package.result_processor_config.writer_config and { - "writerType": work_package.result_processor_config.writer_config.writer_type and work_package.result_processor_config.writer_config.writer_type.name, - "outputWriterConfig": work_package.result_processor_config.writer_config.output_writer_config and { - "enhancedMetricsConfig": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config and { - "populateEnhancedMetrics": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config.populate_enhanced_metrics, - "populateEnhancedMetricsProfile": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config.populate_enhanced_metrics_profile, - "populateDurationCurves": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config.populate_duration_curves, - "populateConstraints": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config.populate_constraints, - "populateWeeklyReports": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config.populate_weekly_reports, - "calculateNormalForLoadThermal": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config.calculate_normal_for_load_thermal, - "calculateEmergForLoadThermal": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config.calculate_emerg_for_load_thermal, - "calculateNormalForGenThermal": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config.calculate_normal_for_gen_thermal, - "calculateEmergForGenThermal": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config.calculate_emerg_for_gen_thermal, - "calculateCO2": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config.calculate_co2 - } - } - } - }, - "intervention": work_package.intervention and ( - { - "baseWorkPackageId": work_package.intervention.base_work_package_id, - "interventionType": work_package.intervention.intervention_type.name, - "candidateGeneration": work_package.intervention.candidate_generation and { - "type": work_package.intervention.candidate_generation.type.name, - "interventionCriteriaName": work_package.intervention.candidate_generation.intervention_criteria_name, - "averageVoltageSpreadThreshold": work_package.intervention.candidate_generation.average_voltage_spread_threshold, - "voltageUnderLimitHoursThreshold": work_package.intervention.candidate_generation.voltage_under_limit_hours_threshold, - "voltageOverLimitHoursThreshold": work_package.intervention.candidate_generation.voltage_over_limit_hours_threshold, - "tapWeightingFactorLowerThreshold": work_package.intervention.candidate_generation.tap_weighting_factor_lower_threshold, - "tapWeightingFactorUpperThreshold": work_package.intervention.candidate_generation.tap_weighting_factor_upper_threshold - }, - "allocationCriteria": work_package.intervention.allocation_criteria, - "specificAllocationInstance": work_package.intervention.specific_allocation_instance, - "phaseRebalanceProportions": work_package.intervention.phase_rebalance_proportions and { - "a": work_package.intervention.phase_rebalance_proportions.a, - "b": work_package.intervention.phase_rebalance_proportions.b, - "c": work_package.intervention.phase_rebalance_proportions.c - }, - "dvms": work_package.intervention.dvms and { - "lowerLimit": work_package.intervention.dvms.lower_limit, - "upperLimit": work_package.intervention.dvms.upper_limit, - "lowerPercentile": work_package.intervention.dvms.lower_percentile, - "upperPercentile": work_package.intervention.dvms.upper_percentile, - "maxIterations": work_package.intervention.dvms.max_iterations, - "regulatorConfig": { - "puTarget": work_package.intervention.dvms.regulator_config.pu_target, - "puDeadbandPercent": work_package.intervention.dvms.regulator_config.pu_deadband_percent, - "maxTapChangePerStep": work_package.intervention.dvms.regulator_config.max_tap_change_per_step, - "allowPushToLimit": work_package.intervention.dvms.regulator_config.allow_push_to_limit - } - } - } | - ( - {"allocationLimitPerYear": work_package.intervention.allocation_limit_per_year} - if work_package.intervention.allocation_limit_per_year is not None else {} - ) | - ( - { - "yearRange": { - "maxYear": work_package.intervention.year_range.max_year, - "minYear": work_package.intervention.year_range.min_year - } - } - if work_package.intervention.year_range is not None else {} - ) - ) - } - - def run_hosting_capacity_work_package(self, work_package: WorkPackageConfig): - """ - Send request to hosting capacity service to run work package + def close(self): + return get_event_loop().run_until_complete(self.aclose()) - :param work_package: An instance of the `WorkPackageConfig` data class representing the work package configuration for the run - :return: The HTTP response received from the Evolve App Server after attempting to run work package - """ - return get_event_loop().run_until_complete(self.async_run_hosting_capacity_work_package(work_package)) + async def aclose(self): # FIXME: __axeit__ ? + return - def get_work_package_cost_estimation(self, work_package: WorkPackageConfig): + def get_work_package_cost_estimation(self, work_package: WorkPackageInput): """ Send request to hosting capacity service to get an estimate cost of supplied work package :param work_package: An instance of the `WorkPackageConfig` data class representing the work package configuration for the run :return: The HTTP response received from the Evolve App Server after attempting to run work package """ - return get_event_loop().run_until_complete(self.async_get_work_package_cost_estimation(work_package)) - - async def async_get_work_package_cost_estimation(self, work_package: WorkPackageConfig): - """ - Send asynchronous request to hosting capacity service to get an estimate cost of supplied work package - - :param work_package: An instance of the `WorkPackageConfig` data class representing the work package configuration for the run - :return: The HTTP response received from the Evolve App Server after attempting to run work package - """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - query getWorkPackageCostEstimation($input: WorkPackageInput!) { - getWorkPackageCostEstimation(input: $input) - } - """, - "variables": { - "workPackageName": work_package.name, - "input": self.work_package_config_to_json(work_package) - } - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + return get_event_loop().run_until_complete( + self._gql_client.query( + Query.get_work_package_cost_estimation(work_package), + operation_name="getWorkPackageCostEstimation", + ) + ) - async def async_run_hosting_capacity_work_package(self, work_package: WorkPackageConfig): + def run_hosting_capacity_work_package(self, work_package: WorkPackageInput, work_package_name: str): """ - Send asynchronous request to hosting capacity service to run work package + Send request to hosting capacity service to run work package :param work_package: An instance of the `WorkPackageConfig` data class representing the work package configuration for the run :return: The HTTP response received from the Evolve App Server after attempting to run work package """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - mutation runWorkPackage($input: WorkPackageInput!, $workPackageName: String!) { - runWorkPackage(input: $input, workPackageName: $workPackageName) - } - """, - "variables": { - "workPackageName": work_package.name, - "input": self.work_package_config_to_json(work_package) - } - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + return get_event_loop().run_until_complete( + self._gql_client.mutation( + Mutation.run_work_package(work_package, work_package_name=work_package_name), + operation_name="runWorkPackage", + ) + ) def cancel_hosting_capacity_work_package(self, work_package_id: str): """ @@ -435,90 +124,26 @@ def cancel_hosting_capacity_work_package(self, work_package_id: str): :param work_package_id: The id of the running work package to cancel :return: The HTTP response received from the Evolve App Server after attempting to cancel work package """ - return get_event_loop().run_until_complete(self.async_cancel_hosting_capacity_work_package(work_package_id)) - - async def async_cancel_hosting_capacity_work_package(self, work_package_id: str): - """ - Send asynchronous request to hosting capacity service to cancel a running work package - - :param work_package_id: The id of the running work package to cancel - :return: The HTTP response received from the Evolve App Server after attempting to cancel work package - """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - mutation cancelWorkPackage($workPackageId: ID!) { - cancelWorkPackage(workPackageId: $workPackageId) - } - """, - "variables": {"workPackageId": work_package_id} - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + return get_event_loop().run_until_complete( + self._gql_client.mutation( + Mutation.cancel_work_package(work_package_id=work_package_id), + operation_name="cancelWorkPackage" + ) + ) - def get_hosting_capacity_work_packages_progress(self): + def get_hosting_capacity_work_packages_progress(self): # FIXME: why is this info not returned by get_work_package_by_id ? """ Retrieve running work packages progress information from hosting capacity service :return: The HTTP response received from the Evolve App Server after requesting work packages progress info """ - return get_event_loop().run_until_complete(self.async_get_hosting_capacity_work_packages_progress()) - - async def async_get_hosting_capacity_work_packages_progress(self): - """ - Asynchronously retrieve running work packages progress information from hosting capacity service - - :return: The HTTP response received from the Evolve App Server after requesting work packages progress info - """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - query getWorkPackageProgress { - getWorkPackageProgress { - pending - inProgress { - id - progressPercent - pending - generation - execution - resultProcessing - failureProcessing - complete - } - } - } - """, - "variables": {} - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + raise NotImplementedError() + return get_event_loop().run_until_complete( + self._gql_client.query( + Query.get_work_packages(), + operation_name="getWorkPackagesProgres", + ) + ) def run_feeder_load_analysis_report(self, feeder_load_analysis_input: FeederLoadAnalysisInput): """ @@ -528,62 +153,11 @@ def run_feeder_load_analysis_report(self, feeder_load_analysis_input: FeederLoad :return: The HTTP response received from the Evolve App Server after attempting to run work package """ return get_event_loop().run_until_complete( - self.async_run_feeder_load_analysis_report(feeder_load_analysis_input)) - - async def async_run_feeder_load_analysis_report(self, feeder_load_analysis_input: FeederLoadAnalysisInput): - """ - Asynchronously send request to evolve app server to run a feeder load analysis study - - :return: The HTTP response received from the Evolve App Server after requesting a feeder load analysis report - """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": - """ - mutation runFeederLoadAnalysis($input: FeederLoadAnalysisInput!) { - runFeederLoadAnalysis(input: $input) - } - """, - "variables": { - "input": { - "feeders": feeder_load_analysis_input.feeders, - "substations": feeder_load_analysis_input.substations, - "subGeographicalRegions": feeder_load_analysis_input.sub_geographical_regions, - "geographicalRegions": feeder_load_analysis_input.geographical_regions, - "startDate": feeder_load_analysis_input.start_date, - "endDate": feeder_load_analysis_input.end_date, - "fetchLvNetwork": feeder_load_analysis_input.fetch_lv_network, - "processFeederLoads": feeder_load_analysis_input.process_feeder_loads, - "processCoincidentLoads": feeder_load_analysis_input.process_coincident_loads, - "produceConductorReport": True, # We currently only support conductor report - "aggregateAtFeederLevel": feeder_load_analysis_input.aggregate_at_feeder_level, - "output": feeder_load_analysis_input.output, - "flaForecastConfig": - ({ - "scenarioID": feeder_load_analysis_input.fla_forecast_config.scenario_id, - "year": feeder_load_analysis_input.fla_forecast_config.year, - "pvUpgradeThreshold": feeder_load_analysis_input.fla_forecast_config.pv_upgrade_threshold, - "bessUpgradeThreshold": feeder_load_analysis_input.fla_forecast_config.bess_upgrade_threshold, - "seed": feeder_load_analysis_input.fla_forecast_config.seed - } if feeder_load_analysis_input.fla_forecast_config else None) - } - } - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + self._gql_client.mutation( + Mutation.run_feeder_load_analysis(feeder_load_analysis_input), + operation_name="runFeederLoadAnalysisReport" + ) + ) def get_feeder_load_analysis_report_status(self, report_id: str, full_spec: bool = False): """ @@ -594,174 +168,36 @@ def get_feeder_load_analysis_report_status(self, report_id: str, full_spec: bool :return: The HTTP response received from the Evolve App Server after requesting a feeder load analysis report status """ return get_event_loop().run_until_complete( - self.async_get_feeder_load_analysis_report_status(report_id, full_spec)) - - async def async_get_feeder_load_analysis_report_status(self, report_id: str, full_spec: bool = False): - """ - Asynchronously send request to evolve app server to retrieve a feeder load analysis report status - - :param report_id: Feeder load analysis report ID - :param full_spec: If true the response will include the request sent to generate the report - :return: The HTTP response received from the Evolve App Server after requesting a feeder load analysis report status - """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": - """ - query getFeederLoadAnalysisReportStatus($reportId: ID!, $fullSpec: Boolean!) { - getFeederLoadAnalysisReportStatus(reportId: $reportId, fullSpec: $fullSpec) { - id - name - createdAt - createdBy - completedAt - state - errors - generationSpec { - feeders - substations - subGeographicalRegions - geographicalRegions - startDate - endDate - fetchLvNetwork - processFeederLoads - processCoincidentLoads - produceBasicReport - produceConductorReport - aggregateAtFeederLevel - output - } - } - } - """, - "variables": { - "reportId": report_id, - "fullSpec": full_spec, - } - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() - - def upload_study(self, study: Study): - """ - Uploads a new study to the Evolve App Server - :param study: An instance of a data class representing a new study - """ - return get_event_loop().run_until_complete(self.async_upload_study(study)) + self._gql_client.query( + Query.get_feeder_load_analysis_report_status(report_id, full_spec=full_spec), + operation_name="getFeederLoadAnalysisReportStatus", + ) + ) - async def async_upload_study(self, study: Study): + def upload_study(self, study: StudyInput | list[StudyInput]): """ Uploads a new study to the Evolve App Server :param study: An instance of a data class representing a new study - :return: The HTTP response received from the Evolve App Server after attempting to upload the study """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - mutation uploadStudy($study: StudyInput!) { - addStudies(studies: [$study]) - } - """, - "variables": { - "study": { - "name": study.name, - "description": study.description, - "tags": study.tags, - "styles": study.styles, - "results": [{ - "name": result.name, - "geoJsonOverlay": result.geo_json_overlay and { - "data": result.geo_json_overlay.data, - "sourceProperties": result.geo_json_overlay.source_properties, - "styles": result.geo_json_overlay.styles - }, - "stateOverlay": result.state_overlay and { - "data": result.state_overlay.data, - "styles": result.state_overlay.styles - }, - "sections": [{ - "type": section.type, - "name": section.name, - "description": section.description, - "columns": section.columns, - "data": section.data - } for section in result.sections] - } for result in study.results] - } - } - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + return get_event_loop().run_until_complete( + self._gql_client.mutation( + Mutation.add_studies(study if isinstance(study, list) else [study]), + operation_name="addStudy", + ) + ) - def run_ingestor(self, run_config: List[IngestorConfigInput]): + def run_ingestor(self, run_config: list[IngestorConfigInput]): """ Send request to perform an ingestor run :param run_config: A list of IngestorConfigInput :return: The HTTP response received from the Evolve App Server after attempting to run the ingestor """ return get_event_loop().run_until_complete( - self.async_run_ingestor(run_config)) - - async def async_run_ingestor(self, run_config: List[IngestorConfigInput]): - """ - Send asynchronous request to perform an ingestor run - :param run_config: A list of IngestorConfigInput - :return: The HTTP response received from the Evolve App Server after attempting to run the ingestor - """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - mutation executeIngestor($runConfig: [IngestorConfigInput!]) { - executeIngestor(runConfig: $runConfig) - } - """, - "variables": { - "runConfig": [asdict(x) for x in run_config], - } - } - - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + self._gql_client.mutation( + Mutation.execute_ingestor(run_config=run_config), + operation_name="executeIngestor", + ) + ) def get_ingestor_run(self, ingestor_run_id: int): """ @@ -770,53 +206,17 @@ def get_ingestor_run(self, ingestor_run_id: int): :return: The HTTP response received from the Evolve App Server including the ingestor run information (if found). """ return get_event_loop().run_until_complete( - self.async_get_ingestor_run(ingestor_run_id)) - - async def async_get_ingestor_run(self, ingestor_run_id: int): - """ - Send asynchronous request to retrieve the record of a particular ingestor run. - :param ingestor_run_id: The ID of the ingestor run to retrieve execution information about. - :return: The HTTP response received from the Evolve App Server including the ingestor run information (if found). - """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - query getIngestorRun($id: Int!) { - getIngestorRun(id: $id) { - id - containerRuntimeType, - payload, - token, - status, - startedAt, - statusLastUpdatedAt, - completedAt - } - } - """, - "variables": { - "id": ingestor_run_id, - } - } - - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - raise response.raise_for_status() + self._gql_client.query( + Query.get_ingestor_run(ingestor_run_id), + operation_name="getIngestorRun", + ) + ) - def get_ingestor_run_list(self, query_filter: Optional[IngestorRunsFilterInput] = None, - query_sort: Optional[IngestorRunsSortCriteriaInput] = None): + def get_ingestor_run_list( + self, + query_filter: IngestorRunsFilterInput | None = None, + query_sort: IngestorRunsSortCriteriaInput | None = None + ): """ Send request to retrieve a list of ingestor run records matching the provided filter parameters. :param query_filter: An `IngestorRunsFilterInput` object. Only records matching the provided values will be returned. @@ -825,72 +225,19 @@ def get_ingestor_run_list(self, query_filter: Optional[IngestorRunsFilterInput] :return: The HTTP response received from the Evolve App Server including all matching ingestor records found. """ return get_event_loop().run_until_complete( - self.async_get_ingestor_run_list(query_filter, query_sort)) - - async def async_get_ingestor_run_list(self, query_filter: Optional[IngestorRunsFilterInput] = None, - query_sort: Optional[IngestorRunsSortCriteriaInput] = None): - """ - Send asynchronous request to retrieve a list of ingestor run records matching the provided filter parameters. - :param query_filter: An `IngestorRunsFilterInput` object. Only records matching the provided values will be returned. - If not supplied all records will be returned. (Optional) - :param query_sort: An `IngestorRunsSortCriteriaInput` that can control the order of the returned record based on a number of fields. (Optional) - :return: The HTTP response received from the Evolve App Server including all matching ingestor records found. - """ - - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - query listIngestorRuns($filter: IngestorRunsFilterInput, $sort: IngestorRunsSortCriteriaInput) { - listIngestorRuns(filter: $filter, sort: $sort) { - id - containerRuntimeType, - payload, - token, - status, - startedAt, - statusLastUpdatedAt, - completedAt - } - } - """, - "variables": { - **({"filter": { - "id": query_filter.id, - "status": query_filter.status and [state.name for state in query_filter.status], - "completed": query_filter.completed, - "containerRuntimeType": query_filter.container_runtime_type and [runtime.name for runtime in - query_filter.container_runtime_type] - }} if query_filter else {}), - **({"sort": { - "status": query_sort.status and query_sort.status.name, - "startedAt": query_sort.started_at and query_sort.started_at.name, - "statusLastUpdatedAt": query_sort.status_last_updated_at and query_sort.status_last_updated_at.name, - "completedAt": query_sort.completed_at and query_sort.completed_at.name, - "containerRuntimeType": query_sort.container_runtime_type and query_sort.container_runtime_type.name, - }} if query_sort else {}) - } - } - - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - raise response.raise_for_status() + self._gql_client.query( + Query.list_ingestor_runs(filter_=query_filter, sort=query_sort), + operation_name="listIngestorRuns", + ) + ) - def run_hosting_capacity_calibration(self, calibration_name: str, local_calibration_time: datetime, - feeders: Optional[List[str]] = None, - transformer_tap_settings: Optional[str] = None, - generator_config: Optional[GeneratorConfig] = None): + def run_hosting_capacity_calibration( + self, + calibration_name: str, + local_calibration_time: datetime, + feeders: list[str] | None = None, + transformer_tap_settings: str | None = None, + generator_config: HcGeneratorConfigInput | None = None): """ Send request to run hosting capacity calibration :param calibration_name: A string representation of the calibration name @@ -904,72 +251,25 @@ def run_hosting_capacity_calibration(self, calibration_name: str, local_calibrat :return: The HTTP response received from the Evolve App Server after attempting to run the calibration """ - return get_event_loop().run_until_complete( - self.async_run_hosting_capacity_calibration(calibration_name, local_calibration_time, feeders, - transformer_tap_settings, - generator_config)) - - async def async_run_hosting_capacity_calibration(self, calibration_name: str, - calibration_time_local: datetime, - feeders: Optional[List[str]] = None, - transformer_tap_settings: Optional[str] = None, - generator_config: Optional[GeneratorConfig] = None): - """ - Send asynchronous request to run hosting capacity calibration - :param calibration_name: A string representation of the calibration name - :param calibration_time_local: A datetime representation of the calibration time, in the timezone of your pqv data ("model time"). - :param feeders: A list of feeder ID's to run the calibration over. If not supplied then the calibration is run over all feeders in the network. - :param transformer_tap_settings: A set of transformer tap settings to apply before running the calibration work package. - If provided, this will take precedence over any 'transformer_tap_settings' supplied in via the generator_config parameter - :param generator_config: A `GeneratorConfig` object that overrides the default values in the `WorkPackageConfig` used by calibration. - Note: The following fields cannot be overridden during calibration: generator_config.model.calibration, generator_config.model.meter_placement_config, generator_config.solve.step_size_minutes, and generator_config.raw_results. - - :return: The HTTP response received from the Evolve App Server after attempting to run the calibration - """ - - # Only replace microsecond, as in database we only have down to second precision. - # tzinfo will be whatever the user passed through, which should be the timezone of their load data. - parsed_time = calibration_time_local.replace(microsecond=0, tzinfo=None) - if transformer_tap_settings: - if generator_config: - if generator_config.model: - generator_config.model.transformer_tap_settings = transformer_tap_settings - else: - generator_config.model = ModelConfig(transformer_tap_settings=transformer_tap_settings) - else: - generator_config = GeneratorConfig(model=ModelConfig(transformer_tap_settings=transformer_tap_settings)) - - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - mutation runCalibration($calibrationName: String!, $calibrationTimeLocal: LocalDateTime, $feeders: [String!], $generatorConfig: HcGeneratorConfigInput) { - runCalibration(calibrationName: $calibrationName, calibrationTimeLocal: $calibrationTimeLocal, feeders: $feeders, generatorConfig: $generatorConfig) - } - """, - "variables": { - "calibrationName": calibration_name, - "calibrationTimeLocal": parsed_time.isoformat(), - "feeders": feeders, - "generatorConfig": self.generator_config_to_json(generator_config) - } - } + if generator_config is None: + generator_config = HcGeneratorConfigInput() + if generator_config.model is None: + generator_config.model = HcModelConfigInput() + if generator_config.model: + generator_config.model.transformer_tap_settings = transformer_tap_settings - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - raise response.raise_for_status() + return get_event_loop().run_until_complete( + self._gql_client.mutation( + Mutation.run_calibration( + calibration_name=calibration_name, + calibration_time_local=local_calibration_time, + feeders=feeders, + generator_config=generator_config, + ), + operation_name="runCalibration", + ) + ) def get_hosting_capacity_calibration_run(self, id: str): """ @@ -977,102 +277,31 @@ def get_hosting_capacity_calibration_run(self, id: str): :param id: The calibration run ID :return: The HTTP response received from the Evolve App Server after requesting calibration run info """ - return get_event_loop().run_until_complete(self.async_get_hosting_capacity_calibration_run(id)) - - async def async_get_hosting_capacity_calibration_run(self, id: str): - """ - Retrieve information of a hosting capacity calibration run - :param id: The calibration run ID - :return: The HTTP response received from the Evolve App Server after requesting calibration run info - """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - query getCalibrationRun($id: ID!) { - getCalibrationRun(id: $id) { - id - name - workflowId - runId - calibrationTimeLocal - startAt - completedAt - status - feeders - calibrationWorkPackageConfig - } - } - """, - "variables": { - "id": id - } - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + return get_event_loop().run_until_complete( + self._gql_client.query( + Query.get_calibration_run(id), + operation_name="getCalibrationRun", + ) + ) def get_hosting_capacity_calibration_sets(self): """ Retrieve a list of all completed calibration runs initiated through Evolve App Server :return: The HTTP response received from the Evolve App Server after requesting completed calibration runs """ - return get_event_loop().run_until_complete(self.async_get_hosting_capacity_calibration_sets()) - - async def async_get_hosting_capacity_calibration_sets(self): - """ - Retrieve a list of all completed calibration runs initiated through Evolve App Server - :return: The HTTP response received from the Evolve App Server after requesting completed calibration runs - """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - query { - getCalibrationSets - } - """ - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() - - def get_transformer_tap_settings(self, calibration_name: str, feeder: Optional[str] = None, - transformer_mrid: Optional[str] = None): - """ - Retrieve distribution transformer tap settings from a calibration set in the hosting capacity input database - :param calibration_name: The (user supplied)name of the calibration run to retrieve transformer tap settings from - :param feeder: An optional filter to apply to the returned list of transformer tap settings - :param transformer_mrid: An optional filter to return only the transformer tap settings for a particular transfomer mrid - :return: The HTTP response received from the Evolve App Server after requesting transformer tap settings for the calibration id - """ return get_event_loop().run_until_complete( - self.async_get_transformer_tap_settings(calibration_name, feeder, transformer_mrid)) + self._gql_client.query( + Query.get_calibration_sets(), + operation_name="getCalibrationSets", + ) + ) - async def async_get_transformer_tap_settings(self, calibration_name: str, feeder: Optional[str] = None, - transformer_mrid: Optional[str] = None): + def get_transformer_tap_settings( + self, + calibration_name: str, + feeder: str | None = None, + transformer_mrid: str | None = None + ): """ Retrieve distribution transformer tap settings from a calibration set in the hosting capacity input database :param calibration_name: The (user supplied)name of the calibration run to retrieve transformer tap settings from @@ -1080,129 +309,36 @@ async def async_get_transformer_tap_settings(self, calibration_name: str, feeder :param transformer_mrid: An optional filter to return only the transformer tap settings for a particular transfomer mrid :return: The HTTP response received from the Evolve App Server after requesting transformer tap settings for the calibration id """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - query getTransformerTapSettings($calibrationName: String!, $feeder: String, $transformerMrid: String) { - getTransformerTapSettings(calibrationName: $calibrationName, feeder: $feeder, transformerMrid: $transformerMrid) { - id - highStep - lowStep - nominalTapNum - tapPosition - controlEnabled - stepVoltageIncrement - } - } - """, - "variables": { - "calibrationName": calibration_name, - "feeder": feeder, - "transformerMrid": transformer_mrid - } - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + return get_event_loop().run_until_complete( + self._gql_client.query( + Query.get_transformer_tap_settings( + calibration_name=calibration_name, + feeder=feeder, + transformer_mrid=transformer_mrid + ), + operation_name="getTransformerTapSettings", + ) + ) - def run_opendss_export(self, config: OpenDssConfig): + def run_opendss_export(self, config: OpenDssModelInput): """ Send request to run an opendss export :param config: The OpenDssConfig for running the export :return: The HTTP response received from the Evolve App Server after attempting to run the opendss export """ - return get_event_loop().run_until_complete(self.async_run_opendss_export(config)) - - async def async_run_opendss_export(self, config: OpenDssConfig): - """ - Send asynchronous request to run an opendss export - :param config: The OpenDssConfig for running the export - :return: The HTTP response received from the Evolve App Server after attempting to run the opendss export - """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - mutation createOpenDssModel($input: OpenDssModelInput!) { - createOpenDssModel(input: $input) - } - """, - "variables": { - "input": { - "modelName": config.model_name, - "isPublic": config.is_public, - "generationSpec": { - "modelOptions": { - "feeder": config.feeder, - "scenario": config.scenario, - "year": config.year - }, - "modulesConfiguration": { - "common": { - **({"fixedTime": {"loadTime": config.load_time.load_time.isoformat(), - "overrides": config.load_time.load_overrides and [ - { - "loadId": key, - "loadWattsOverride": value.load_watts, - "genWattsOverride": value.gen_watts, - "loadVarOverride": value.load_var, - "genVarOverride": value.gen_var, - } for key, value in config.load_time.load_overrides.items() - ] - }} if isinstance(config.load_time, FixedTime) else {}), - **({"timePeriod": { - "startTime": config.load_time.start_time.isoformat(), - "endTime": config.load_time.end_time.isoformat(), - "overrides": config.load_time.load_overrides and [ - { - "loadId": key, - "loadWattsOverride": value.load_watts, - "genWattsOverride": value.gen_watts, - "loadVarOverride": value.load_var, - "genVarOverride": value.gen_var, - } for key, value in config.load_time.load_overrides.items() - ] - }} if isinstance(config.load_time, TimePeriod) else {}) - }, - "generator": self.generator_config_to_json(config.generator_config), - } - } - } - } - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + return get_event_loop().run_until_complete( + self._gql_client.mutation( + Mutation.create_open_dss_model(config), + operation_name="createOpenDssModel", + ) + ) def get_paged_opendss_models( self, - limit: Optional[int] = None, - offset: Optional[int] = None, - query_filter: Optional[GetOpenDssModelsFilterInput] = None, - query_sort: Optional[GetOpenDssModelsSortCriteriaInput] = None): + limit: int | None = None, + offset: int | None = None, + query_filter: GetOpenDssModelsFilterInput | None = None, + query_sort: GetOpenDssModelsSortCriteriaInput | None = None): """ Retrieve a paginated opendss export run information :param limit: The number of opendss export runs to retrieve @@ -1212,74 +348,11 @@ def get_paged_opendss_models( :return: The HTTP response received from the Evolve App Server after requesting opendss export run information """ return get_event_loop().run_until_complete( - self.async_get_paged_opendss_models(limit, offset, query_filter, query_sort)) - - async def async_get_paged_opendss_models( - self, - limit: Optional[int] = None, - offset: Optional[int] = None, - query_filter: Optional[GetOpenDssModelsFilterInput] = None, - query_sort: Optional[GetOpenDssModelsSortCriteriaInput] = None): - """ - Retrieve a paginated opendss export run information - :param limit: The number of opendss export runs to retrieve - :param offset: The number of opendss export runs to skip - :param query_filter: The filter to apply to the query - :param query_sort: The sorting to apply to the query - :return: The HTTP response received from the Evolve App Server after requesting opendss export run information - """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - query pagedOpenDssModels($limit: Int, $offset: Long, $filter: GetOpenDssModelsFilterInput, $sort: GetOpenDssModelsSortCriteriaInput) { - pagedOpenDssModels(limit: $limit, offset: $offset, filter: $filter,sort: $sort) { - totalCount - offset, - models { - id - name - createdAt - createdBy - state - downloadUrl - isPublic - errors - generationSpec - } - } - } - """, - "variables": { - **({"limit": limit} if limit is not None else {}), - **({"offset": offset} if offset is not None else {}), - **({"filter": { - "name": query_filter.name, - "isPublic": query_filter.is_public, - "state": query_filter.state and [state.name for state in query_filter.state] - }} if query_filter else {}), - **({"sort": { - "name": query_sort.name and query_sort.name.name, - "createdAt": query_sort.created_at and query_sort.created_at.name, - "state": query_sort.state and query_sort.state.name, - "isPublic": query_sort.is_public and query_sort.is_public.name - }} if query_sort else {}) - } - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + self._gql_client.query( + Query.paged_open_dss_models(limit=limit, offset=offset, filter_=query_filter, sort=query_sort), + operation_name="pagedOpenDssModels", + ) + ) def get_opendss_model_download_url(self, run_id: int): """ @@ -1295,24 +368,15 @@ async def async_get_opendss_model_download_url(self, run_id: int): :param run_id: The opendss export run ID :return: The HTTP response received from the Evolve App Server after requesting opendss export model download url """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.get( - construct_url(protocol=self._protocol, host=self._host, port=self._port, - path=f"/api/opendss-model/{run_id}"), - headers=self._get_request_headers(), - ssl=sslcontext if self._verify_certificate else False, - allow_redirects=False - ) as response: - if response.status == HTTPStatus.FOUND: - return response.headers["Location"] - elif not response.ok: - response.raise_for_status() + response = (await self._gql_client.http_client.get( + f"{self._base_url}/api/opendss-model/{run_id}", + headers=self._gql_client.headers, + follow_redirects=False + )) + if response.status_code == HTTPStatus.FOUND: + return response.headers["Location"] + elif not response.ok: + response.raise_for_status() def get_opendss_model(self, model_id: int): """ @@ -1333,7 +397,7 @@ async def async_get_opendss_model(self, model_id: int): page_size = 20 while True: - response = await self.async_get_paged_opendss_models(page_size, offset) + response = self.get_paged_opendss_models(page_size, offset) total_count = int(response["data"]["pagedOpenDssModels"]["totalCount"]) page_count = len(response["data"]["pagedOpenDssModels"]["models"]) for model in response["data"]["pagedOpenDssModels"]["models"]: diff --git a/src/zepben/eas/client/enums.py b/src/zepben/eas/client/enums.py new file mode 100644 index 0000000..f42063c --- /dev/null +++ b/src/zepben/eas/client/enums.py @@ -0,0 +1,16 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + +__all__ = ["OpenDssModelState"] + +from enum import Enum + + +class OpenDssModelState(Enum): + COULD_NOT_START = 'COULD_NOT_START' + CREATION = 'CREATION' + COMPLETED = 'COMPLETED' + FAILED = 'FAILED' \ No newline at end of file diff --git a/src/zepben/eas/client/feeder_load_analysis_input.py b/src/zepben/eas/client/feeder_load_analysis_input.py deleted file mode 100644 index efd3411..0000000 --- a/src/zepben/eas/client/feeder_load_analysis_input.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2020 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. -from dataclasses import dataclass -from typing import List, Optional - -__all__ = [ - "FeederLoadAnalysisInput" -] - -from zepben.eas.client.fla_forecast_config import FlaForecastConfig - - -@dataclass -class FeederLoadAnalysisInput: - """ A data class representing the configuration for a feeder load analysis study """ - - start_date: str - """Start date for this analysis""" - - end_date: str - """End date for this analysis""" - - fetch_lv_network: bool - """Whether to stop analysis at distribution transformer""" - - process_feeder_loads: bool - """Whether to include values corresponding to feeder event time points in the report""" - - process_coincident_loads: bool - """Whether to include values corresponding to conductor event time points in the report""" - - aggregate_at_feeder_level: bool - """Request for a report which aggregate all downstream load at the feeder level""" - - output: str - """The file name of the resulting study""" - - feeders: Optional[List[str]] = None - """The mRIDs of feeders to solve for feeder load analysis""" - - substations: Optional[List[str]] = None - """The mRIDs of substations to solve for feeder load analysis""" - - sub_geographical_regions: Optional[List[str]] = None - """The mRIDs of sub-Geographical Region to solve for feeder load analysis""" - - geographical_regions: Optional[List[str]] = None - """The mRIDs of Geographical Region to solve for feeder load analysis""" - - fla_forecast_config: Optional[FlaForecastConfig] = None - """The forecast configuration for this fla study""" diff --git a/src/zepben/eas/client/fla_forecast_config.py b/src/zepben/eas/client/fla_forecast_config.py deleted file mode 100644 index 3f17ef5..0000000 --- a/src/zepben/eas/client/fla_forecast_config.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2020 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. -from dataclasses import dataclass -from typing import Optional - -__all__ = [ - "FlaForecastConfig" -] - - -@dataclass -class FlaForecastConfig: - """ A data class representing the configuration for a forecast portion of a feeder load analysis study """ - - scenario_id: str - """The id of forecast scenario""" - - year: int - """The year for forecast model""" - - pv_upgrade_threshold: Optional[int] = 5000 - """Watts threshold to indicate if a customer site will gain additional pv during scenario application (Default to 5000).""" - - bess_upgrade_threshold: Optional[int] = 5000 - """Watts threshold to indicate if a customer site will gain additional battery during scenario application (Default to 5000).""" - - seed: Optional[int] = 123 - """Seed for scenario application (Default to 123).""" diff --git a/src/zepben/eas/client/hc_commons.py b/src/zepben/eas/client/hc_commons.py deleted file mode 100644 index 3cdf482..0000000 --- a/src/zepben/eas/client/hc_commons.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2020 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. - - -__all__ = [ - "STORED_RESULTS_CONFIG_STORE_NONE", - "STORED_RESULTS_CONFIG_STORE_ALL", - "RAW_RESULTS_CONFIG_ALL_RAW_VALUES", - "RAW_RESULTS_CONFIG_STANDARD", - "RAW_RESULTS_CONFIG_BASIC", - "METRICS_RESULTS_CONFIG_CALCULATE_PERFORMANCE_METRICS", -] - -from zepben.eas import StoredResultsConfig, RawResultsConfig, MetricsResultsConfig - -STORED_RESULTS_CONFIG_STORE_NONE = StoredResultsConfig( - energy_meters_raw=False, - energy_meter_voltages_raw=False, - overloads_raw=False, - voltage_exceptions_raw=False -) - -STORED_RESULTS_CONFIG_STORE_ALL = StoredResultsConfig( - energy_meters_raw=True, - energy_meter_voltages_raw=True, - overloads_raw=True, - voltage_exceptions_raw=True -) - -RAW_RESULTS_CONFIG_ALL_RAW_VALUES = RawResultsConfig( - energy_meters_raw=True, - energy_meter_voltages_raw=True, - results_per_meter=True, - overloads_raw=True, - voltage_exceptions_raw=True -) - -RAW_RESULTS_CONFIG_STANDARD = RawResultsConfig( - energy_meters_raw=True, - energy_meter_voltages_raw=True, - results_per_meter=True, - overloads_raw=True, - voltage_exceptions_raw=True -) - -# BASIC everything in RawConfig to false. -# AT THE MOMENT it means that no raw results are going to be generated, -# however there's a task in the backlog to implement generating a summary -# for each meter; so even though raw results are not going to be produced, a -# message with a summary per meter will still be generated. -RAW_RESULTS_CONFIG_BASIC = RawResultsConfig() - -METRICS_RESULTS_CONFIG_CALCULATE_PERFORMANCE_METRICS = MetricsResultsConfig( - calculate_performance_metrics=True -) diff --git a/src/zepben/eas/client/ingestor.py b/src/zepben/eas/client/ingestor.py deleted file mode 100644 index d384696..0000000 --- a/src/zepben/eas/client/ingestor.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2025 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. -from dataclasses import dataclass -from enum import Enum -from typing import Optional, List -from datetime import datetime - -__all__ = [ - "IngestorConfigInput", "IngestorRuntimeKind", "IngestorRunState", "IngestorRun", "IngestorRunsFilterInput", "Order", - "IngestorRunsSortCriteriaInput" -] - - -@dataclass -class IngestorConfigInput: - key: str - value: str - - -class IngestorRuntimeKind(Enum): - AZURE_CONTAINER_APP_JOB = "AZURE_CONTAINER_APP_JOB" - DOCKER = "DOCKER" - ECS = "ECS" - KUBERNETES = "KUBERNETES" - TEMPORAL_KUBERNETES = "TEMPORAL_KUBERNETES" - - -class IngestorRunState(Enum): - INITIALIZED = "INITIALIZED" - QUEUED = "QUEUED" - STARTED = "STARTED" - RUNNING = "RUNNING" - SUCCESS = "SUCCESS" - FAILURE = "FAILURE" - FAILED_TO_START = "FAILED_TO_START" - - -@dataclass -class IngestorRun: - id: int - container_runtime_type: Optional[IngestorRuntimeKind] - payload: str - token: str - status: IngestorRunState - started_at: datetime - status_last_updated_at: Optional[datetime] - completedAt: Optional[datetime] - - -@dataclass -class IngestorRunsFilterInput: - id: Optional[int] = None - status: Optional[List[IngestorRunState]] = None - completed: Optional[bool] = None - container_runtime_type: Optional[List[IngestorRuntimeKind]] = None - - -class Order(Enum): - ASC = "ASC" - DESC = "DESC" - - -@dataclass -class IngestorRunsSortCriteriaInput: - status: Optional[Order] = None - started_at: Optional[Order] = None - status_last_updated_at: Optional[Order] = None - completed_at: Optional[Order] = None - container_runtime_type: Optional[Order] = None diff --git a/src/zepben/eas/client/opendss.py b/src/zepben/eas/client/opendss.py deleted file mode 100644 index 91838f7..0000000 --- a/src/zepben/eas/client/opendss.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2025 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. -from dataclasses import dataclass -from datetime import tzinfo -from enum import Enum -from typing import Union, Optional, List - -from zepben.eas.client.work_package import GeneratorConfig, TimePeriod, FixedTime - -__all__ = [ - "OpenDssConfig", - "OpenDssModelState", - "GetOpenDssModelsFilterInput", - "Order", - "GetOpenDssModelsSortCriteriaInput" -] - - -@dataclass -class OpenDssConfig: - """ A data class representing the configuration for an opendss export """ - scenario: str - year: int - feeder: str - load_time: Union[TimePeriod, FixedTime] - generator_config: Optional[GeneratorConfig] = None - model_name: Optional[str] = None - is_public: Optional[bool] = None - - -class OpenDssModelState(Enum): - COULD_NOT_START = "COULD_NOT_START" - CREATION = "CREATION" - COMPLETED = "COMPLETED" - FAILED = "FAILED" - - -@dataclass -class GetOpenDssModelsFilterInput: - """ A data class representing the filter to apply to the opendss export run paginated query """ - name: Optional[str] = None - is_public: Optional[int] = None - state: Optional[List[OpenDssModelState]] = None - - -class Order(Enum): - ASC = "ASC" - DESC = "DESC" - - -@dataclass -class GetOpenDssModelsSortCriteriaInput: - """ A data class representing the sort criteria to apply to the opendss export run paginated query """ - name: Optional[Order] = None - created_at: Optional[Order] = None - state: Optional[Order] = None - is_public: Optional[Order] = None diff --git a/src/zepben/eas/client/patched_generated_client.py b/src/zepben/eas/client/patched_generated_client.py new file mode 100644 index 0000000..728a16b --- /dev/null +++ b/src/zepben/eas/client/patched_generated_client.py @@ -0,0 +1,38 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. +from typing import Any, cast + +import httpx +from ariadne_codegen.client_generators.dependencies.exceptions import GraphQLClientHttpError, \ + GraphQLClientInvalidResponseError, GraphQLClientGraphQLMultiError + +from zepben.eas.lib.generated_graphql_client import Client + + +class PatchedClient(Client): + + def get_data(self, response: httpx.Response) -> dict[str, Any]: + if not response.is_success: + raise GraphQLClientHttpError( + status_code=response.status_code, response=response + ) + + try: + response_json = response.json() + except ValueError as exc: + raise GraphQLClientInvalidResponseError(response=response) from exc + + try: + errors = response_json.get("errors") + except AttributeError: + errors = None + + if errors: + raise GraphQLClientGraphQLMultiError.from_errors_dicts( + errors_dicts=errors, data=response_json + ) + + return cast(dict[str, Any], response_json) diff --git a/src/zepben/eas/client/study.py b/src/zepben/eas/client/study.py deleted file mode 100644 index 9a9e05a..0000000 --- a/src/zepben/eas/client/study.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2020 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. - -from dataclasses import dataclass, field -from typing import List, Any - -from geojson import GeoJSON - -__all__ = ["GeoJsonOverlay", "StateOverlay", "Section", "Result", "Study"] - - -@dataclass -class GeoJsonOverlay: - """ A data class representing an Evolve App Server study result GeoJSON overlay """ - data: GeoJSON - styles: List[str] - source_properties: Any = None - - -@dataclass -class StateOverlay: - """ A data class representing an Evolve App Server study result state overlay """ - data: None - styles: List[str] - - -@dataclass -class Section: - """ A data class representing an Evolve App Server study result data section """ - type: str - name: str - description: str - columns: Any - data: Any - - -@dataclass -class Result: - """ A data class representing an Evolve App Server study result """ - name: str - geo_json_overlay: GeoJsonOverlay = None - state_overlay: StateOverlay = None - sections: List[Section] = field(default_factory=lambda: []) - - -@dataclass -class Study: - """ A data class representing an Evolve App Server study """ - name: str - description: str - tags: List[str] - results: List[Result] - styles: List[Any] diff --git a/src/zepben/eas/client/util.py b/src/zepben/eas/client/util.py deleted file mode 100644 index e3c52b8..0000000 --- a/src/zepben/eas/client/util.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2020 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. - -__all__ = ["construct_url"] - -from typing import Union - - -def construct_url(protocol, host, path, port: Union[str, int] = None) -> str: - return f"{protocol}://{host}{f':{port}' if port else ''}{path}" diff --git a/src/zepben/eas/client/work_package.py b/src/zepben/eas/client/work_package.py deleted file mode 100644 index 6343933..0000000 --- a/src/zepben/eas/client/work_package.py +++ /dev/null @@ -1,995 +0,0 @@ -# Copyright 2020 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. -from dataclasses import dataclass -from datetime import datetime -from enum import Enum -from typing import List, Optional, Union, Dict - -__all__ = [ - "SwitchClass", - "SwitchMeterPlacementConfig", - "CandidateGenerationConfig", - "CandidateGenerationType", - "DvmsConfig", - "FixedTime", - "TimePeriod", - "InterventionClass", - "InterventionConfig", - "LoadPlacement", - "FeederScenarioAllocationStrategy", - "MeterPlacementConfig", - "PVVoltVARVoltWattConfig", - "ModelConfig", - "SolveMode", - "SolveConfig", - "RawResultsConfig", - "MetricsResultsConfig", - "StoredResultsConfig", - "GeneratorConfig", - "RegulatorConfig", - "ResultProcessorConfig", - "WorkPackageConfig", - "WorkPackageProgress", - "WorkPackagesProgress", - "EnhancedMetricsConfig", - "WriterType", - "WriterOutputConfig", - "WriterConfig", - "YearRange", - "FixedTimeLoadOverride", - "TimePeriodLoadOverride", - "ForecastConfig", - "FeederConfig", - "FeederConfigs", - "NodeLevelResultsConfig" -] - - -class SwitchClass(Enum): - BREAKER = "BREAKER", - DISCONNECTOR = "DISCONNECTOR", - FUSE = "FUSE", - JUMPER = "JUMPER", - LOAD_BREAK_SWITCH = "LOAD_BREAK_SWITCH", - RECLOSER = "RECLOSER" - - -@dataclass -class SwitchMeterPlacementConfig: - meter_switch_class: Optional[SwitchClass] = None - """The CIM class of Switch to create meters at""" - - name_pattern: Optional[str] = None - """ - A Regex pattern to match on for Switch names. - The IdentifiedObject.name field will be used when matching against switches of the corresponding Switch class. - """ - - -@dataclass -class FixedTimeLoadOverride: - load_watts: Optional[List[float]] - """ - The readings to be used to override load watts - """ - - gen_watts: Optional[List[float]] - """ - The readings to be used to override gen watts - """ - - load_var: Optional[List[float]] - """ - The readings to be used to override load var - """ - - gen_var: Optional[List[float]] - """ - The readings to be used to override gen var - """ - - # def __str__(self): - - -@dataclass -class TimePeriodLoadOverride: - load_watts: Optional[List[float]] - """ - A list of readings to be used to override load watts. - Can be either a yearly or daily profile. - The number of entries must match the number of entries in load_var, and the expected number for the configured load_interval_length_hours. - For load_interval_length_hours: - 0.25: 96 entries for daily and 35040 for yearly - 0.5: 48 entries for daily and 17520 for yearly - 1.0: 24 entries for daily and 8760 for yearly - """ - - gen_watts: Optional[List[float]] - """ - A list of readings to be used to override gen watts. - Can be either a yearly or daily profile. - The number of entries must match the number of entries in gen_var, and the expected number for the configured load_interval_length_hours. - For load_interval_length_hours: - 0.25: 96 entries for daily and 35040 for yearly - 0.5: 48 entries for daily and 17520 for yearly - 1.0: 24 entries for daily and 8760 for yearly - """ - - load_var: Optional[List[float]] - """ - A list of readings to be used to override load var. - Can be either a yearly or daily profile. - The number of entries must match the number of entries in load_watts, and the expected number for the configured load_interval_length_hours. - For load_interval_length_hours: - 0.25: 96 entries for daily and 35040 for yearly - 0.5: 48 entries for daily and 17520 for yearly - 1.0: 24 entries for daily and 8760 for yearly - """ - - gen_var: Optional[List[float]] - """ - A list of readings to be used to override gen var. - Can be either a yearly or daily profile. - The number of entries must match the number of entries in gen_watts, and the expected number for the configured load_interval_length_hours. - For load_interval_length_hours: - 0.25: 96 entries for daily and 35040 for yearly - 0.5: 48 entries for daily and 17520 for yearly - 1.0: 24 entries for daily and 8760 for yearly - """ - - -class FixedTime: - """ - A single point in time to model. Should be precise to the minute, and load data must be - present for the provided time in the load database for accurate results. - """ - - def __init__(self, load_time: datetime, load_overrides: Optional[Dict[str, FixedTimeLoadOverride]] = None): - self.load_time = load_time.replace(second=0, microsecond=0, tzinfo=None) - self.load_overrides = load_overrides - - -class TimePeriod: - """ - A time period to model, from a start time to an end time. Maximum of 1 year. - - Load data must be available in the load database between the provided start and end time for accurate results. - """ - - def __init__( - self, - start_time: datetime, - end_time: datetime, - load_overrides: Optional[Dict[str, TimePeriodLoadOverride]] = None - ): - self._validate(start_time, end_time) - self.start_time = start_time.replace(second=0, microsecond=0, tzinfo=None) - self.end_time = end_time.replace(second=0, microsecond=0, tzinfo=None) - self.load_overrides = load_overrides - - @staticmethod - def _validate(start_time: datetime, end_time: datetime): - ddelta = (end_time - start_time).days - - if ddelta > 367: - raise ValueError("The difference between 'start_time' and 'end_time' cannot be greater than a year.") - - if ddelta < 1: - raise ValueError("The difference between 'start_time' and 'end_time' cannot be less than a day.") - - if ddelta < 0: - raise ValueError("The 'start_time' must be before 'end_time'.") - - -class LoadPlacement(Enum): - PER_ENERGY_CONSUMER = "PER_ENERGY_CONSUMER" - PER_USAGE_POINT = "PER_USAGE_POINT" - - -class FeederScenarioAllocationStrategy(Enum): - RANDOM = "RANDOM" - ADDITIVE = "ADDITIVE" - - -@dataclass -class MeterPlacementConfig: - feeder_head: Optional[bool] = None - """Whether to place a meter at the voltage source at the feeder head.""" - - dist_transformers: Optional[bool] = None - """Whether to place a meter at the secondary winding of each distribution transformer.""" - - switch_meter_placement_configs: Optional[List[SwitchMeterPlacementConfig]] = None - """Specifies which switch classes to place meters at, and the regex pattern to match for in the switch names.""" - - energy_consumer_meter_group: Optional[str] = None - """The ID of the meter group to use for populating EnergyMeters at EnergyConsumers.""" - - -@dataclass -class PVVoltVARVoltWattConfig: - cut_off_date: Optional[datetime] = None - """Optional cut-off date to determine which profile to apply to equipment during translation to the OpenDss model. - If supplied, the "commissionedDate" of the equipment is compared against this date, equipment that do not have a - "commissionedDate" will receive the [before_cut_off_profile]. If null, the [after_cut_off_profile] profile is applied to all equipment.""" - - before_cut_off_profile: Optional[str] = None - """Optional name of the profile to apply to equipment with a "commissionedDate" before [cut_off_date]. - If null the equipment will be translated into a regular Generator the rather a PVSystem.""" - - after_cut_off_profile: Optional[str] = None - """Optional name of the profile to apply to equipment with a "commissionedDate" after [cut_off_date]. - If null the equipment will be translated into a regular Generator the rather a PVSystem.""" - - -@dataclass -class ModelConfig: - vm_pu: Optional[float] = None - """Voltage per-unit of voltage source.""" - - load_vmin_pu: Optional[float] = None - """ - Minimum per unit voltage for which the load model selected is assumed to apply. Below this value, the load model reverts to a constant impedance model. - """ - - load_vmax_pu: Optional[float] = None - """ - Maximum per unit voltage for which the load model selected is assumed to apply. Above this value, the load model reverts to a constant impedance model. - """ - - gen_vmin_pu: Optional[float] = None - """ - Minimum per unit voltage for which the generator model is assumed to apply. Below this value, the gen model reverts to a constant impedance model. - For generator model used, this is used to determine the upper current limit. For example, if Vminpu is 0.90 then the current limit is (1/0.90) = 111%. - """ - - gen_vmax_pu: Optional[float] = None - """ - Maximum per unit voltage for which the generator model is assumed to apply. Above this value, the gen model reverts to a constant impedance model. - """ - - load_model: Optional[int] = None - """ - Specifies how loads and generators in OpenDSS should be modelled. Options: - 1: Standard constant P+jQ load. (Default) - 2: Constant impedance load. - 3: Const P, Quadratic Q (like a motor). - 4: Nominal Linear P, Quadratic Q (feeder mix). Use this with CVRfactor. - 5: Constant Current Magnitude - 6: Const P, Fixed Q - 7: Const P, Fixed Impedance Q - """ - - collapse_swer: Optional[bool] = None - """Whether to collapse/simplify SWER network.""" - - calibration: Optional[bool] = None - """ - Whether to apply calibration modifications to the model. This will create point-in-time models using PQ data. - """ - - p_factor_base_exports: Optional[float] = None - """ - Power factor to set for base model Generators during model translation. If null the model will use the reactive power specified in the load profiles. - """ - - p_factor_forecast_pv: Optional[float] = None - """ - Power factor to set for scenario (forecast) model Generators during model translation. - """ - - p_factor_base_imports: Optional[float] = None - """ - Power factor to set for base model Loads during model translation. If null the model will use the reactive power specified in the load profiles. - """ - - fix_single_phase_loads: Optional[bool] = None - """ - Finds consumers that have a peak load (within the modelled time period) greater than the configured max_single_phase_load value (default 30kW), and upgrades - them to three-phase loads. The intent is to correct data inaccuracies where the number of phases reported for a consumer appears to be incorrect. - By default, we expect a 30kW load would not appear on a single phase consumer, so we upgrade them to three-phase. This consists of tracing upstream to the - distribution transformer and spreading 3 phases (ABCN) back to the transformer where possible. - """ - - max_single_phase_load: Optional[float] = None - """ - The max peak load for a single phase customer, beyond which will trigger the single phase load fixing algorithm mentioned above. - """ - - fix_overloading_consumers: Optional[bool] = None - """ - Finds consumers that have peak load or generation (within the modelled time period) greater than the capacity of the transformer they are attached to by a - configurable factor, and then reconfigures them to be HV consumers (attached above the transformer). The aim is to identify HV consumers that have been - incorrectly connected as LV consumers, and resolve this connectivity. - """ - - max_load_tx_ratio: Optional[float] = None - """ - The maximum load to transformer rating ratio for a single consumer to trigger the overloading consumer fixer. - For example given a ratio of 2, if a customer with a peak 30kW load was downstream of a 10kVA transformer, this would be a ratio of 3:1 and thus trigger - the overloading consumers fixer. - """ - - max_gen_tx_ratio: Optional[float] = None - """ - The maximum generation to transformer rating ratio for a single consumer to trigger the overloading consumer fixer. - For example given a ratio of 2, if a customer with peak generation of 30kW was downstream of a 10kVA transformer, this would be a ratio of 3:1 and thus - trigger the overloading consumers fixer. - """ - - fix_undersized_service_lines: Optional[bool] = None - """ - Finds consumers that have a peak load (within the modelled time period) greater than the capacity of the service line of the consumer by some configured - factor. The intent is to find service lines that have unrealistically low current ratings which would stop convergence, and upgrade them to sensible - ratings. - - When a conductors rating is upgraded, we also then upgrade the impedances to a type in line with the new rating, utilising a pre-configured - catalogue of rating and impedance data, and matching the phase configuration of the consumer. - """ - - max_load_service_line_ratio: Optional[float] = None - """ - The maximum load to service line rating ratio to trigger the undersized service lines fixer. For example given a ratio of 2, if a customer with peak - load of 10kW had a service line supporting only 5kVA, this would be a ratio of 2:1 and thus - trigger the undersized service line fixer. - - Note service lines are generally considered to be the conductors immediately connecting to a consumer. - """ - - max_load_lv_line_ratio: Optional[float] = None - """ - The maximum load to LV line rating ratio to trigger the undersized service lines fixer for LV conductors. For example given a ratio of 5, if a customer - with peak load of 50kW was connected to LV backbone conductors supporting only 10kVA, this would be a ratio of 5:1 and thus - trigger the undersized service line fixer for the LV conductors. - - Note the LV line fixer will fix all conductors upstream of the consumer up to the distribution transformer they are connected to. - """ - - simplify_network: Optional[bool] = None - """ - Flag to control whether to simplify the network model before translation. - """ - - collapse_lv_networks: Optional[bool] = None - """Flag to control whether to collapse lv network in the model.""" - - collapse_negligible_impedances: Optional[bool] = None - """ - Flag to control whether to collapse conductors with negligible impedance during network simplification. - """ - - combine_common_impedances: Optional[bool] = None - """ - Flag to control whether to combine conductors with common impedance during network simplification. - """ - - feeder_scenario_allocation_strategy: Optional[FeederScenarioAllocationStrategy] = None - """ - Strategy for scenario ev, pv and bess allocation. ADDITIVE will be each year is built upon the last years allocation, - while RANDOM will be a different allocation every year. - """ - - closed_loop_v_reg_enabled: Optional[bool] = None - """Create models with a Closed Loop Voltage Regulator at the Zone sub. If false, existing voltage regulator's in the zone sub will be used.""" - - closed_loop_v_reg_replace_all: Optional[bool] = None - """ - Replace all existing Voltage Regulators with Closed Loop Voltage Regulator. If false existing zone sub regulators will be - modelled as-is which may be in non-closed loop configuration. - """ - - closed_loop_v_reg_set_point: Optional[float] = None - """Scaling factor for the base voltage to form the set point (0.0-2.0).""" - - closed_loop_v_band: Optional[float] = None - """VBand value in percentage.""" - - closed_loop_time_delay: Optional[int] = None - """Time delay in seconds.""" - - closed_loop_v_limit: Optional[float] = None - """Maximum voltage at regulating transformer's secondary bus.""" - - default_tap_changer_time_delay: Optional[int] = None - """Time delay in seconds for the default tap changer""" - - default_tap_changer_set_point_pu: Optional[float] = None - """Default tap changer set point""" - - default_tap_changer_band: Optional[float] = None - """Default tap changer band value""" - - split_phase_default_load_loss_percentage: Optional[float] = None - """ - Default load loss percentage for split phase transformers. - """ - - split_phase_lv_kv: Optional[float] = None - - swer_voltage_to_line_voltage: Optional[List[List[int]]] = None - """ - Mapping of SWER voltages to L2L voltages. - """ - - load_placement: Optional[LoadPlacement] = None - """ - Where to create loads - either for each UsagePoint or for each EnergyConsumer. - """ - - load_interval_length_hours: Optional[float] = None - """ - Fraction of an hour for load data. 1.0 = 60 minute intervals, 0.5 = 30 minute intervals. - """ - - meter_placement_config: Optional[MeterPlacementConfig] = None - """Configuration to determine where to place EnergyMeters for collecting results""" - - seed: Optional[int] = None - """A seed to use when generating the model. Re-using the same seed will result in the same model being generated.""" - - default_load_watts: Optional[List[float]] = None - """ - A list of readings to be used as default load watts when no load data is found. - Can be either a yearly or daily profile. - The number of entries must match the expected number for the configured load_interval_length_hours. - For load_interval_length_hours: - 0.25: 96 entries for daily and 35040 for yearly - 0.5: 48 entries for daily and 17520 for yearly - 1.0: 24 entries for daily and 8760 for yearly - """ - - default_gen_watts: Optional[List[float]] = None - """ - A list of readings to be used as default gen watts when no load data is found. - Can be either a yearly or daily profile. - The number of entries must match the expected number for the configured load_interval_length_hours. - For load_interval_length_hours: - 0.25: 96 entries for daily and 35040 for yearly - 0.5: 48 entries for daily and 17520 for yearly - 1.0: 24 entries for daily and 8760 for yearly - """ - - default_load_var: Optional[List[float]] = None - """ - A list of readings to be used as default load car when no load data is found. - Can be either a yearly or daily profile. - The number of entries must match the number of entries in default_load_watts, and the expected number for the configured load_interval_length_hours. - For load_interval_length_hours: - 0.25: 96 entries for daily and 35040 for yearly - 0.5: 48 entries for daily and 17520 for yearly - 1.0: 24 entries for daily and 8760 for yearly - """ - - default_gen_var: Optional[List[float]] = None - """ - A list of readings to be used as default gen var when no load data is found. - Can be either a yearly or daily profile. - The number of entries must match the number of entries in default_gen_watts, and the expected number for the configured load_interval_length_hours. - For load_interval_length_hours: - 0.25: 96 entries for daily and 35040 for yearly - 0.5: 48 entries for daily and 17520 for yearly - 1.0: 24 entries for daily and 8760 for yearly - """ - - transformer_tap_settings: Optional[str] = None - """ - The name of the set of distribution transformer tap settings to be applied to the model from an external source. - """ - - ct_prim_scaling_factor: Optional[float] = None - """ - Optional setting for scaling factor of calculated CTPrim for zone sub transformers. - """ - - use_span_level_threshold: bool = False - """ - Set to true if `AcLineSegment.designRating` is to be used for conductor rated current in the model. - """ - - rating_threshold: Optional[float] = None - """ - Optional setting to loosen rated current comparison between conductors during network simplification by providing a threshold - of allowed differences. Neighbouring conductors within this threshold and matching impedance's will be collapsed. - Set as a % value, i.e put as 50.0 if threshold is 50% - """ - - simplify_plsi_threshold: Optional[float] = None - """ - Optional setting to indicate if sequence impedance's should be normalized during network simplification. - Connected AcLineSegments with PerLengthSequenceImpedance value differences within the threshold will be normalized. - Set as a % value, i.e put as 50.0 if threshold is 50% - """ - - emerg_amp_scaling: Optional[float] = None - """ - Scaling factor for emergency current rating for conductors. - Set as a factor value, i.e put as 1.5 if scaling is 150% - """ - - inverter_control_config: Optional[PVVoltVARVoltWattConfig] = None - """ - Optional configuration object to enable modelling generation equipment as PVSystems controlled by InvControls rather than Generators. - """ - - -class SolveMode(Enum): - YEARLY = "YEARLY" - DAILY = "DAILY" - - -@dataclass -class SolveConfig: - norm_vmin_pu: Optional[float] = None - norm_vmax_pu: Optional[float] = None - emerg_vmin_pu: Optional[float] = None - emerg_vmax_pu: Optional[float] = None - base_frequency: Optional[int] = None - voltage_bases: Optional[List[float]] = None - - max_iter: Optional[int] = None - """Max iterations before failing""" - - max_control_iter: Optional[int] = None - """Max control iterations before failing""" - - mode: Optional[SolveMode] = None - """Run OpenDSS in yearly or daily mode""" - - step_size_minutes: Optional[float] = None - """The step size to solve""" - - -@dataclass -class RawResultsConfig: - """ - Whether to produce raw results generated from OpenDSS. - You will likely always want defaults for this, as setting any of these to False will limit - the results you get and should only be used as a potential performance optimisation if they are unnecessary. - """ - - energy_meter_voltages_raw: Optional[bool] = None - """ - Produce energy meter voltages results. - """ - - energy_meters_raw: Optional[bool] = None - """ - Produce energy meter results. - """ - - results_per_meter: Optional[bool] = None - """ - Produce results per EnergyMeter - """ - - overloads_raw: Optional[bool] = None - """ - Produce overloads - """ - - voltage_exceptions_raw: Optional[bool] = None - """ - Produce voltage exceptions - """ - - -@dataclass -class MetricsResultsConfig: - """ - Calculated metrics based off the raw results - """ - - calculate_performance_metrics: Optional[bool] = None - """Whether to calculate basic performance metrics""" - - -@dataclass -class StoredResultsConfig: - """ - The raw results that will be stored. - Note storing raw results will utilise a lot of storage space and should be avoided for - large runs. - """ - - energy_meter_voltages_raw: Optional[bool] = None - """ - WARNING: Will store a significant amount of data - Store the raw EnergyMeter timeseries voltage results - """ - - energy_meters_raw: Optional[bool] = None - """ - WARNING: Will store a significant amount of data - Store the raw EnergyMeter timeseries results - """ - - overloads_raw: Optional[bool] = None - """ - WARNING: Will store a significant amount of data - Store the raw overload results - """ - - voltage_exceptions_raw: Optional[bool] = None - """ - WARNING: Will store a significant amount of data - Store the raw voltage exception results - """ - - -@dataclass -class NodeLevelResultsConfig: - """ - Configuration settings for node level results. - """ - - collect_voltage: Optional[bool] = None - """ - Include voltage values in node level results. - """ - - collect_current: Optional[bool] = None - """ - Include current values in node level results - """ - - collect_power: Optional[bool] = None - """ - Include power values in node level results - """ - - mrids_to_collect: Optional[List[str]] = None - """ - A list of MRID's to collect node level results at. Note: Depending on the network simplification - and translation these mrid's may not exist in the final OpenDss and no results will be collected. - """ - - collect_all_switches: Optional[bool] = None - """ - Collect node level results at all switches in the network. - """ - - collect_all_transformers: Optional[bool] = None - """ - Collect node level results at all transformers in the network. - """ - - collect_all_conductors: Optional[bool] = None - """ - Collect node level results at all conductors in the network. - """ - - collect_all_energy_consumers: Optional[bool] = None - """ - collect node level results at all energy consumers in the network. - """ - - -@dataclass -class GeneratorConfig: - """ - Configuration settings for the OpenDSS model. - These settings make changes to the network and specific OpenDSS settings prior to model execution. - """ - - model: Optional[ModelConfig] = None - solve: Optional[SolveConfig] = None - raw_results: Optional[RawResultsConfig] = None - node_level_results: Optional[NodeLevelResultsConfig] = None - - -@dataclass -class EnhancedMetricsConfig: - populate_enhanced_metrics: Optional[bool] = None - populate_enhanced_metrics_profile: Optional[bool] = None - populate_duration_curves: Optional[bool] = None - populate_constraints: Optional[bool] = None - populate_weekly_reports: Optional[bool] = None - calculate_normal_for_load_thermal: Optional[bool] = None - calculate_emerg_for_load_thermal: Optional[bool] = None - calculate_normal_for_gen_thermal: Optional[bool] = None - calculate_emerg_for_gen_thermal: Optional[bool] = None - calculate_co2: Optional[bool] = None - - -class WriterType(Enum): - POSTGRES = "POSTGRES", - PARQUET = "PARQUET" - - -@dataclass -class WriterOutputConfig: - enhanced_metrics_config: Optional[EnhancedMetricsConfig] = None - - -@dataclass -class WriterConfig: - writer_type: Optional[WriterType] = None - """ - Whether to write output to Parquet files or a Postgres database. - Check with your administrator which result types are supported. - """ - - output_writer_config: Optional[WriterOutputConfig] = None - """The results to store""" - - -@dataclass -class ResultProcessorConfig: - """ - Configuration specific to processing of results. - """ - - stored_results: Optional[StoredResultsConfig] = None - """Raw results to be stored""" - - metrics: Optional[MetricsResultsConfig] = None - """Whether to calculate and store basic performance metrics""" - - writer_config: Optional[WriterConfig] = None - """Where results should be stored (Parquet or Postgres) and which metrics to store""" - - -@dataclass -class YearRange: - min_year: int - max_year: int - - -@dataclass -class InterventionClass(Enum): - TARIFF_REFORM = "TARIFF_REFORM", - CONTROLLED_LOAD_HOT_WATER = "CONTROLLED_LOAD_HOT_WATER", - COMMUNITY_BESS = "COMMUNITY_BESS", - DISTRIBUTION_TX_OLTC = "DISTRIBUTION_TX_OLTC", - LV_STATCOMS = "LV_STATCOMS", - DVMS = "DVMS", - PHASE_REBALANCING = "PHASE_REBALANCING", - DISTRIBUTION_TAP_OPTIMIZATION = "DISTRIBUTION_TAP_OPTIMIZATION", - UNKNOWN = "UNKNOWN" - - -class CandidateGenerationType(Enum): - CRITERIA = "CRITERIA", - TAP_OPTIMIZATION = "TAP_OPTIMIZATION" - - -@dataclass -class CandidateGenerationConfig: - type: CandidateGenerationType - """The type of method for generating the intervention candidates.""" - - intervention_criteria_name: Optional[str] = None - """ - The ID of the set of criteria used to select intervention candidates from enhanced metrics of the - base work package run. Only used when type is CRITERIA. - """ - - average_voltage_spread_threshold: Optional[int] = None - """ - The threshold for average line voltage spread under the transformer over the year, in volts. - Voltage spread at each timestep is calculated by taking the difference between the maximum and minimum phase-to-phase voltage over - the nodes under the transformer, for each phase, then taking the maximum of that difference across all phases. - When the average voltage spread exceeds this threshold, it indicates that the transformer is experiencing a - significant voltage swing that may impact system stability. Only used when type is TAP_OPTIMIZATION. - """ - - voltage_under_limit_hours_threshold: Optional[int] = None - """ - The threshold for number of hours a transformer is below the nominal voltage range. - Only used when type is TAP_OPTIMIZATION. - """ - - voltage_over_limit_hours_threshold: Optional[int] = None - """ - The threshold for number of hours a transformer is above the nominal voltage range. - Only used when type is TAP_OPTIMIZATION. - """ - - tap_weighting_factor_lower_threshold: Optional[float] = None - """ - The minimum threshold for the tap weighting factor, used to determine when a positive tap adjustment - (increasing voltage) is prioritized. If the tap weighting factor falls below this threshold, it indicates that - the voltage is significantly under the desired range and requires corrective action. This setting is usually - negative. Only used when type is TAP_OPTIMIZATION. - """ - - tap_weighting_factor_upper_threshold: Optional[float] = None - """ - The maximum threshold for the tap weighting factor, used to determine when a negative tap adjustment - (decreasing voltage) is prioritized. If the tap weighting factor exceeds this threshold, it indicates that - the voltage is significantly over the desired range and requires corrective action. This setting is usually - positive. Only used when type is TAP_OPTIMIZATION. - """ - - -@dataclass -class PhaseRebalanceProportions: - a: float - b: float - c: float - - -@dataclass -class RegulatorConfig: - pu_target: float - """Voltage p.u. to move the average customer voltage towards.""" - - pu_deadband_percent: float - """Width of window of voltages considered acceptable for the average customer voltage, in %p.u.""" - - max_tap_change_per_step: int - """The maximum number of tap steps to move (in either direction) for each timestep.""" - - allow_push_to_limit: bool - """ - If this is true, we allow the regulator to push some number of customers outside the specified limits for DVMS, - with the limit of customers given by lower_percentile and upper_percentile in DvmsConfig. - """ - - -@dataclass -class DvmsConfig: - lower_limit: float - """The lower limit of voltage (p.u.) considered acceptable for the purposes of DVMS.""" - - upper_limit: float - """The lower limit of voltage (p.u.) considered acceptable for the purposes of DVMS.""" - - lower_percentile: float - """The lowest percentile of customers' voltages to consider when applying DVMS.""" - - upper_percentile: float - """The highest percentile of customers' voltages to consider when applying DVMS.""" - - max_iterations: int - """The number of iterations to attempt DVMS for each timestep before moving on.""" - - regulator_config: RegulatorConfig - """Configures the voltage regulator to apply if the zone is already satisfactory according to the above limits.""" - - -@dataclass -class InterventionConfig: - base_work_package_id: str - """ - ID of the work package that this intervention is based on. - The new work package should process a subset of its feeders, scenarios, and years. - """ - - intervention_type: InterventionClass - """The class of intervention to apply.""" - - year_range: Optional[YearRange] = None - """ - The range of years to search for and apply interventions. - All years within this range should be included in the work package. - """ - - allocation_limit_per_year: Optional[int] = None - """The maximum number of interventions that can be applied per year.""" - - candidate_generation: Optional[CandidateGenerationConfig] = None - """ - The method of generating candidates for the intervention. - This does not need to be specified for certain interventions, e.g. PHASE_REBALANCING. - """ - - allocation_criteria: Optional[str] = None - """The ID of the set of criteria used to select an intervention instance for each candidate.""" - - specific_allocation_instance: Optional[str] = None - """ - The specific instance of intervention to use for every allocation. If this is unspecified, - all instances of the intervention class will be considered when choosing one for each candidate. - """ - - phase_rebalance_proportions: Optional[PhaseRebalanceProportions] = None - """ - The proportions to use for phase rebalancing. - If this is unspecified and intervention_type = PHASE_REBALANCING, phases will be rebalanced to equal proportions. - """ - - dvms: Optional[DvmsConfig] = None - """The config for DVMS. This must be specified if intervention_type = DVMS.""" - - -@dataclass -class ForecastConfig(object): - feeders: List[str] - """The feeders to process in this work package""" - - years: List[int] - """ - The years to process for the specified feeders in this work package. - The years should be configured in the input database forecasts for all supplied scenarios. - """ - - scenarios: List[str] - """ - The scenarios to model. These should be configured in the input.scenario_configuration table. - """ - - load_time: Union[TimePeriod, FixedTime] - """ - The time to use for the base load data. The provided time[s] must be available in the - load database for accurate results. Specifying an invalid time (i.e one with no load data) will - result in inaccurate results. - """ - - -@dataclass -class FeederConfig(object): - feeder: str - """The feeder to process in this work package""" - - years: List[int] - """ - The years to process for the specified feeders in this work package. - The years should be configured in the input database forecasts for all supplied scenarios. - """ - - scenarios: List[str] - """ - The scenarios to model. These should be configured in the input.scenario_configuration table. - """ - - load_time: Union[TimePeriod, FixedTime] - """ - The time to use for the base load data. The provided time[s] must be available in the - load database for accurate results. Specifying an invalid time (i.e one with no load data) will - result in inaccurate results. - """ - - -@dataclass -class FeederConfigs(object): - configs: list[FeederConfig] - """The feeder to process in this work package""" - - -@dataclass -class WorkPackageConfig: - """ A data class representing the configuration for a hosting capacity work package """ - name: str - syf_config: Union[ForecastConfig, FeederConfigs] - """ - The configuration of the scenario, years, and feeders to run. Use ForecastConfig - for the same scenarios and years applied across all feeders, and the more in depth FeederConfig - if configuration varies per feeder. - """ - - quality_assurance_processing: Optional[bool] = None - """Whether to enable QA processing""" - - generator_config: Optional[GeneratorConfig] = None - """Configuration for the OpenDSS model generator""" - - executor_config: Optional[object] = None - """Executor config - currently unused.""" - - result_processor_config: Optional[ResultProcessorConfig] = None - """Configuration for processing and storing results""" - - intervention: Optional[InterventionConfig] = None - """Configuration for applying an intervention""" - - -@dataclass -class WorkPackageProgress: - id: str - progress_percent: int - pending: List[str] - generation: List[str] - execution: List[str] - result_processing: List[str] - failure_processing: List[str] - complete: List[str] - - -@dataclass -class WorkPackagesProgress: - pending: List[str] - in_progress: List[WorkPackageProgress] diff --git a/src/zepben/eas/lib/__init__.py b/src/zepben/eas/lib/__init__.py new file mode 100644 index 0000000..4a7146f --- /dev/null +++ b/src/zepben/eas/lib/__init__.py @@ -0,0 +1,5 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. diff --git a/src/zepben/eas/lib/generated_graphql_client/__init__.py b/src/zepben/eas/lib/generated_graphql_client/__init__.py new file mode 100644 index 0000000..91b3738 --- /dev/null +++ b/src/zepben/eas/lib/generated_graphql_client/__init__.py @@ -0,0 +1,222 @@ +# Generated by ariadne-codegen + +from .async_base_client import AsyncBaseClient +from .base_model import BaseModel, Upload +from .client import Client +from .enums import ( + CandidateGenerationType, + ColumnGroup, + ColumnName, + ContainerType, + DaysRequired, + DiffType, + HcFeederScenarioAllocationStrategy, + HcLoadPlacement, + HcSolveMode, + HcSwitchClass, + HcWriterType, + HostingCapacityFileType, + IngestorRunState, + IngestorRuntimeKind, + InterventionClass, + MeasurementZoneType, + OpportunitiesNeed, + OpportunitiesType, + SectionType, + SerializationType, + SincalFileType, + SortOrder, + VariantFileType, + VariantStatus, + VariantWorkflowStatus, + WorkflowStatus, + WorkPackageState, +) +from .exceptions import ( + GraphQLClientError, + GraphQLClientGraphQLError, + GraphQLClientGraphQLMultiError, + GraphQLClientHttpError, + GraphQLClientInvalidResponseError, +) +from .input_types import ( + AppOptionsInput, + CandidateGenerationConfigInput, + DvmsConfigInput, + DvmsRegulatorConfigInput, + FeederConfigInput, + FeederConfigsInput, + FeederLoadAnalysisInput, + FixedTimeInput, + FixedTimeLoadOverrideInput, + FlaForecastConfigInput, + ForecastConfigInput, + GeoJsonOverlayInput, + GetOpenDssModelsFilterInput, + GetOpenDssModelsSortCriteriaInput, + GetPowerFactoryModelsFilterInput, + GetPowerFactoryModelsSortCriteriaInput, + GetPowerFactoryModelTemplatesFilterInput, + GetPowerFactoryModelTemplatesSortCriteriaInput, + GetSincalModelPresetsFilterInput, + GetSincalModelPresetsSortCriteriaInput, + GetSincalModelsFilterInput, + GetSincalModelsSortCriteriaInput, + GetStudiesFilterInput, + GetStudiesSortCriteriaInput, + GqlDistributionTransformerConfigInput, + GqlLoadConfigInput, + GqlScenarioConfigInput, + GqlSincalModelForecastSpecInput, + HcEnhancedMetricsConfigInput, + HcExecutorConfigInput, + HcGeneratorConfigInput, + HcInverterControlConfigInput, + HcMeterPlacementConfigInput, + HcMetricsResultsConfigInput, + HcModelConfigInput, + HcNodeLevelResultsConfigInput, + HcRawResultsConfigInput, + HcResultProcessorConfigInput, + HcScenarioConfigsFilterInput, + HcSolveConfigInput, + HcStoredResultsConfigInput, + HcSwitchMeterPlacementConfigInput, + HcWorkPackagesFilterInput, + HcWorkPackagesSortCriteriaInput, + HcWriterConfigInput, + HcWriterOutputConfigInput, + IngestorConfigInput, + IngestorRunsFilterInput, + IngestorRunsSortCriteriaInput, + InterventionConfigInput, + OpenDssCommonConfigInput, + OpenDssModelGenerationSpecInput, + OpenDssModelInput, + OpenDssModelOptionsInput, + OpenDssModulesConfigInput, + PhaseRebalanceProportionsInput, + PowerFactoryModelGenerationSpecInput, + PowerFactoryModelInput, + ProcessedDiffFilterInput, + ProcessedDiffSortCriteriaInput, + ResultSectionInput, + SincalModelGenerationSpecInput, + SincalModelInput, + StateOverlayInput, + StudyInput, + StudyResultInput, + TimePeriodInput, + TimePeriodLoadOverrideInput, + WorkPackageInput, + YearRangeInput, +) + +__all__ = [ + "AppOptionsInput", + "AsyncBaseClient", + "BaseModel", + "CandidateGenerationConfigInput", + "CandidateGenerationType", + "Client", + "ColumnGroup", + "ColumnName", + "ContainerType", + "DaysRequired", + "DiffType", + "DvmsConfigInput", + "DvmsRegulatorConfigInput", + "FeederConfigInput", + "FeederConfigsInput", + "FeederLoadAnalysisInput", + "FixedTimeInput", + "FixedTimeLoadOverrideInput", + "FlaForecastConfigInput", + "ForecastConfigInput", + "GeoJsonOverlayInput", + "GetOpenDssModelsFilterInput", + "GetOpenDssModelsSortCriteriaInput", + "GetPowerFactoryModelTemplatesFilterInput", + "GetPowerFactoryModelTemplatesSortCriteriaInput", + "GetPowerFactoryModelsFilterInput", + "GetPowerFactoryModelsSortCriteriaInput", + "GetSincalModelPresetsFilterInput", + "GetSincalModelPresetsSortCriteriaInput", + "GetSincalModelsFilterInput", + "GetSincalModelsSortCriteriaInput", + "GetStudiesFilterInput", + "GetStudiesSortCriteriaInput", + "GqlDistributionTransformerConfigInput", + "GqlLoadConfigInput", + "GqlScenarioConfigInput", + "GqlSincalModelForecastSpecInput", + "GraphQLClientError", + "GraphQLClientGraphQLError", + "GraphQLClientGraphQLMultiError", + "GraphQLClientHttpError", + "GraphQLClientInvalidResponseError", + "HcEnhancedMetricsConfigInput", + "HcExecutorConfigInput", + "HcFeederScenarioAllocationStrategy", + "HcGeneratorConfigInput", + "HcInverterControlConfigInput", + "HcLoadPlacement", + "HcMeterPlacementConfigInput", + "HcMetricsResultsConfigInput", + "HcModelConfigInput", + "HcNodeLevelResultsConfigInput", + "HcRawResultsConfigInput", + "HcResultProcessorConfigInput", + "HcScenarioConfigsFilterInput", + "HcSolveConfigInput", + "HcSolveMode", + "HcStoredResultsConfigInput", + "HcSwitchClass", + "HcSwitchMeterPlacementConfigInput", + "HcWorkPackagesFilterInput", + "HcWorkPackagesSortCriteriaInput", + "HcWriterConfigInput", + "HcWriterOutputConfigInput", + "HcWriterType", + "HostingCapacityFileType", + "IngestorConfigInput", + "IngestorRunState", + "IngestorRunsFilterInput", + "IngestorRunsSortCriteriaInput", + "IngestorRuntimeKind", + "InterventionClass", + "InterventionConfigInput", + "MeasurementZoneType", + "OpenDssCommonConfigInput", + "OpenDssModelGenerationSpecInput", + "OpenDssModelInput", + "OpenDssModelOptionsInput", + "OpenDssModulesConfigInput", + "OpportunitiesNeed", + "OpportunitiesType", + "PhaseRebalanceProportionsInput", + "PowerFactoryModelGenerationSpecInput", + "PowerFactoryModelInput", + "ProcessedDiffFilterInput", + "ProcessedDiffSortCriteriaInput", + "ResultSectionInput", + "SectionType", + "SerializationType", + "SincalFileType", + "SincalModelGenerationSpecInput", + "SincalModelInput", + "SortOrder", + "StateOverlayInput", + "StudyInput", + "StudyResultInput", + "TimePeriodInput", + "TimePeriodLoadOverrideInput", + "Upload", + "VariantFileType", + "VariantStatus", + "VariantWorkflowStatus", + "WorkPackageInput", + "WorkPackageState", + "WorkflowStatus", + "YearRangeInput", +] diff --git a/src/zepben/eas/lib/generated_graphql_client/async_base_client.py b/src/zepben/eas/lib/generated_graphql_client/async_base_client.py new file mode 100644 index 0000000..48e6914 --- /dev/null +++ b/src/zepben/eas/lib/generated_graphql_client/async_base_client.py @@ -0,0 +1,391 @@ +# Generated by ariadne-codegen + +import asyncio +import enum +import json +from collections.abc import AsyncIterator +from typing import IO, Any, Optional, TypeVar, cast +from uuid import uuid4 + +import httpx +from pydantic import BaseModel +from pydantic_core import to_jsonable_python + +from .base_model import UNSET, Upload +from .exceptions import ( + GraphQLClientError, + GraphQLClientGraphQLMultiError, + GraphQLClientHttpError, + GraphQLClientInvalidMessageFormat, + GraphQLClientInvalidResponseError, +) + +try: + from websockets import ( # type: ignore[import-not-found,unused-ignore] + ClientConnection, + ) + from websockets import ( # type: ignore[import-not-found,unused-ignore] + connect as ws_connect, + ) + from websockets.typing import ( # type: ignore[import-not-found,unused-ignore] + Data, + Origin, + Subprotocol, + ) +except ImportError: + from contextlib import asynccontextmanager + + @asynccontextmanager # type: ignore + async def ws_connect(*args, **kwargs): + raise NotImplementedError("Subscriptions require 'websockets' package.") + yield + + ClientConnection = Any # type: ignore[misc,assignment,unused-ignore] + Data = Any # type: ignore[misc,assignment,unused-ignore] + Origin = Any # type: ignore[misc,assignment,unused-ignore] + + def Subprotocol(*args, **kwargs): # type: ignore # noqa: N802, N803 + raise NotImplementedError("Subscriptions require 'websockets' package.") + + +Self = TypeVar("Self", bound="AsyncBaseClient") + +GRAPHQL_TRANSPORT_WS = "graphql-transport-ws" + + +class GraphQLTransportWSMessageType(str, enum.Enum): + CONNECTION_INIT = "connection_init" + CONNECTION_ACK = "connection_ack" + PING = "ping" + PONG = "pong" + SUBSCRIBE = "subscribe" + NEXT = "next" + ERROR = "error" + COMPLETE = "complete" + + +class AsyncBaseClient: + def __init__( + self, + url: str = "", + headers: Optional[dict[str, str]] = None, + http_client: Optional[httpx.AsyncClient] = None, + ws_url: str = "", + ws_headers: Optional[dict[str, Any]] = None, + ws_origin: Optional[str] = None, + ws_connection_init_payload: Optional[dict[str, Any]] = None, + ) -> None: + self.url = url + self.headers = headers + self.http_client = ( + http_client if http_client else httpx.AsyncClient(headers=headers) + ) + + self.ws_url = ws_url + self.ws_headers = ws_headers or {} + self.ws_origin = Origin(ws_origin) if ws_origin else None + self.ws_connection_init_payload = ws_connection_init_payload + + async def __aenter__(self: Self) -> Self: + return self + + async def __aexit__( + self, + exc_type: object, + exc_val: object, + exc_tb: object, + ) -> None: + await self.http_client.aclose() + + async def execute( + self, + query: str, + operation_name: Optional[str] = None, + variables: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> httpx.Response: + processed_variables, files, files_map = self._process_variables(variables) + + if files and files_map: + return await self._execute_multipart( + query=query, + operation_name=operation_name, + variables=processed_variables, + files=files, + files_map=files_map, + **kwargs, + ) + + return await self._execute_json( + query=query, + operation_name=operation_name, + variables=processed_variables, + **kwargs, + ) + + def get_data(self, response: httpx.Response) -> dict[str, Any]: + if not response.is_success: + raise GraphQLClientHttpError( + status_code=response.status_code, response=response + ) + + try: + response_json = response.json() + except ValueError as exc: + raise GraphQLClientInvalidResponseError(response=response) from exc + + if (not isinstance(response_json, dict)) or ( + "data" not in response_json and "errors" not in response_json + ): + raise GraphQLClientInvalidResponseError(response=response) + + data = response_json.get("data") + errors = response_json.get("errors") + + if errors: + raise GraphQLClientGraphQLMultiError.from_errors_dicts( + errors_dicts=errors, data=data + ) + + return cast(dict[str, Any], data) + + async def execute_ws( + self, + query: str, + operation_name: Optional[str] = None, + variables: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> AsyncIterator[dict[str, Any]]: + headers = self.ws_headers.copy() + headers.update(kwargs.pop("additional_headers", {})) + + merged_kwargs: dict[str, Any] = {"origin": self.ws_origin} + merged_kwargs.update(kwargs) + merged_kwargs["additional_headers"] = headers + + operation_id = str(uuid4()) + async with ws_connect( + self.ws_url, + subprotocols=[Subprotocol(GRAPHQL_TRANSPORT_WS)], + **merged_kwargs, + ) as websocket: + await self._send_connection_init(websocket) + # Wait for connection_ack; some servers (e.g. Hasura) send ping before + # connection_ack, so we loop and handle pings until we get ack. + try: + await asyncio.wait_for( + self._wait_for_connection_ack(websocket), + timeout=5.0, + ) + except asyncio.TimeoutError as exc: + raise GraphQLClientError( + "Connection ack not received within 5 seconds" + ) from exc + await self._send_subscribe( + websocket, + operation_id=operation_id, + query=query, + operation_name=operation_name, + variables=variables, + ) + + async for message in websocket: + data = await self._handle_ws_message(message, websocket) + if data and "connection_ack" not in data: + yield data + + def _process_variables( + self, variables: Optional[dict[str, Any]] + ) -> tuple[ + dict[str, Any], dict[str, tuple[str, IO[bytes], str]], dict[str, list[str]] + ]: + if not variables: + return {}, {}, {} + + serializable_variables = self._convert_dict_to_json_serializable(variables) + return self._get_files_from_variables(serializable_variables) + + def _convert_dict_to_json_serializable( + self, dict_: dict[str, Any] + ) -> dict[str, Any]: + return { + key: self._convert_value(value) + for key, value in dict_.items() + if value is not UNSET + } + + def _convert_value(self, value: Any) -> Any: + if isinstance(value, BaseModel): + return value.model_dump(by_alias=True, exclude_unset=True) + if isinstance(value, list): + return [self._convert_value(item) for item in value] + return value + + def _get_files_from_variables( + self, variables: dict[str, Any] + ) -> tuple[ + dict[str, Any], dict[str, tuple[str, IO[bytes], str]], dict[str, list[str]] + ]: + files_map: dict[str, list[str]] = {} + files_list: list[Upload] = [] + + def separate_files(path: str, obj: Any) -> Any: + if isinstance(obj, list): + nulled_list = [] + for index, value in enumerate(obj): + value = separate_files(f"{path}.{index}", value) + nulled_list.append(value) + return nulled_list + + if isinstance(obj, dict): + nulled_dict = {} + for key, value in obj.items(): + value = separate_files(f"{path}.{key}", value) + nulled_dict[key] = value + return nulled_dict + + if isinstance(obj, Upload): + if obj in files_list: + file_index = files_list.index(obj) + files_map[str(file_index)].append(path) + else: + file_index = len(files_list) + files_list.append(obj) + files_map[str(file_index)] = [path] + return None + + return obj + + nulled_variables = separate_files("variables", variables) + files: dict[str, tuple[str, IO[bytes], str]] = { + str(i): (file_.filename, cast(IO[bytes], file_.content), file_.content_type) + for i, file_ in enumerate(files_list) + } + return nulled_variables, files, files_map + + async def _execute_multipart( + self, + query: str, + operation_name: Optional[str], + variables: dict[str, Any], + files: dict[str, tuple[str, IO[bytes], str]], + files_map: dict[str, list[str]], + **kwargs: Any, + ) -> httpx.Response: + data = { + "operations": json.dumps( + { + "query": query, + "operationName": operation_name, + "variables": variables, + }, + default=to_jsonable_python, + ), + "map": json.dumps(files_map, default=to_jsonable_python), + } + + return await self.http_client.post( + url=self.url, data=data, files=files, **kwargs + ) + + async def _execute_json( + self, + query: str, + operation_name: Optional[str], + variables: dict[str, Any], + **kwargs: Any, + ) -> httpx.Response: + headers: dict[str, str] = {"Content-type": "application/json"} + headers.update(kwargs.get("headers", {})) + + merged_kwargs: dict[str, Any] = kwargs.copy() + merged_kwargs["headers"] = headers + + return await self.http_client.post( + url=self.url, + content=json.dumps( + { + "query": query, + "operationName": operation_name, + "variables": variables, + }, + default=to_jsonable_python, + ), + **merged_kwargs, + ) + + async def _send_connection_init(self, websocket: ClientConnection) -> None: + payload: dict[str, Any] = { + "type": GraphQLTransportWSMessageType.CONNECTION_INIT.value + } + if self.ws_connection_init_payload: + payload["payload"] = self.ws_connection_init_payload + await websocket.send(json.dumps(payload)) + + async def _wait_for_connection_ack(self, websocket: ClientConnection) -> None: + """Read messages until connection_ack; handle ping/pong in between.""" + async for message in websocket: + data = await self._handle_ws_message(message, websocket) + if data is not None and "connection_ack" in data: + return + + async def _send_subscribe( + self, + websocket: ClientConnection, + operation_id: str, + query: str, + operation_name: Optional[str] = None, + variables: Optional[dict[str, Any]] = None, + ) -> None: + payload: dict[str, Any] = { + "id": operation_id, + "type": GraphQLTransportWSMessageType.SUBSCRIBE.value, + "payload": {"query": query, "operationName": operation_name}, + } + if variables: + payload["payload"]["variables"] = self._convert_dict_to_json_serializable( + variables + ) + await websocket.send(json.dumps(payload)) + + async def _handle_ws_message( + self, + message: Data, + websocket: ClientConnection, + expected_type: Optional[GraphQLTransportWSMessageType] = None, + ) -> Optional[dict[str, Any]]: + try: + message_dict = json.loads(message) + except json.JSONDecodeError as exc: + raise GraphQLClientInvalidMessageFormat(message=message) from exc + + type_ = message_dict.get("type") + payload = message_dict.get("payload", {}) + + if not type_ or type_ not in {t.value for t in GraphQLTransportWSMessageType}: + raise GraphQLClientInvalidMessageFormat(message=message) + + if expected_type and expected_type != type_: + raise GraphQLClientInvalidMessageFormat( + f"Invalid message received. Expected: {expected_type.value}" + ) + + if type_ == GraphQLTransportWSMessageType.NEXT: + if "data" not in payload: + raise GraphQLClientInvalidMessageFormat(message=message) + return cast(dict[str, Any], payload["data"]) + + if type_ == GraphQLTransportWSMessageType.COMPLETE: + await websocket.close() + elif type_ == GraphQLTransportWSMessageType.PING: + await websocket.send( + json.dumps({"type": GraphQLTransportWSMessageType.PONG.value}) + ) + elif type_ == GraphQLTransportWSMessageType.ERROR: + raise GraphQLClientGraphQLMultiError.from_errors_dicts( + errors_dicts=payload, data=message_dict + ) + elif type_ == GraphQLTransportWSMessageType.CONNECTION_ACK: + return {"connection_ack": True} + + return None diff --git a/src/zepben/eas/lib/generated_graphql_client/base_model.py b/src/zepben/eas/lib/generated_graphql_client/base_model.py new file mode 100644 index 0000000..a93b416 --- /dev/null +++ b/src/zepben/eas/lib/generated_graphql_client/base_model.py @@ -0,0 +1,30 @@ +# Generated by ariadne-codegen + +from io import IOBase + +from pydantic import BaseModel as PydanticBaseModel +from pydantic import ConfigDict + + +class UnsetType: + def __bool__(self) -> bool: + return False + + +UNSET = UnsetType() + + +class BaseModel(PydanticBaseModel): + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + arbitrary_types_allowed=True, + protected_namespaces=(), + ) + + +class Upload: + def __init__(self, filename: str, content: IOBase, content_type: str): + self.filename = filename + self.content = content + self.content_type = content_type diff --git a/src/zepben/eas/lib/generated_graphql_client/base_operation.py b/src/zepben/eas/lib/generated_graphql_client/base_operation.py new file mode 100644 index 0000000..6433b07 --- /dev/null +++ b/src/zepben/eas/lib/generated_graphql_client/base_operation.py @@ -0,0 +1,156 @@ +# Generated by ariadne-codegen + +from typing import Any, Optional, Union + +from graphql import ( + ArgumentNode, + FieldNode, + InlineFragmentNode, + NamedTypeNode, + NameNode, + SelectionSetNode, + VariableNode, +) + + +class GraphQLArgument: + """ + Represents a GraphQL argument and allows conversion to an AST structure. + """ + + def __init__(self, argument_name: str, argument_value: Any) -> None: + self._name = argument_name + self._value = argument_value + + def to_ast(self) -> ArgumentNode: + """Converts the argument to an ArgumentNode AST object.""" + return ArgumentNode( + name=NameNode(value=self._name), + value=VariableNode(name=NameNode(value=self._value)), + ) + + +class GraphQLField: + """ + Represents a GraphQL field with its name, arguments, subfields, alias, + and inline fragments. + + Attributes: + formatted_variables (dict[str, dict[str, Any]]): The formatted arguments + of the GraphQL field. + """ + + def __init__( + self, field_name: str, arguments: Optional[dict[str, dict[str, Any]]] = None + ) -> None: + self._field_name = field_name + self._variables = arguments or {} + self.formatted_variables: dict[str, dict[str, Any]] = {} + self._subfields: list[GraphQLField] = [] + self._alias: Optional[str] = None + self._inline_fragments: dict[str, tuple[GraphQLField, ...]] = {} + + def alias(self, alias: str) -> "GraphQLField": + """Sets an alias for the GraphQL field and returns the instance.""" + self._alias = alias + return self + + def _build_field_name(self) -> str: + """Builds the field name, including the alias if present.""" + return f"{self._alias}: {self._field_name}" if self._alias else self._field_name + + def _build_selections( + self, idx: int, used_names: set[str] + ) -> list[Union[FieldNode, InlineFragmentNode]]: + """Builds the selection set for the current GraphQL field, + including subfields and inline fragments.""" + # Create selections from subfields + selections: list[Union[FieldNode, InlineFragmentNode]] = [ + subfield.to_ast(idx, used_names) for subfield in self._subfields + ] + + # Add inline fragments + for name, subfields in self._inline_fragments.items(): + selections.append( + InlineFragmentNode( + type_condition=NamedTypeNode(name=NameNode(value=name)), + selection_set=SelectionSetNode( + selections=[ + subfield.to_ast(idx, used_names) for subfield in subfields + ] + ), + ) + ) + + return selections + + def _format_variable_name( + self, idx: int, var_name: str, used_names: set[str] + ) -> str: + """Generates a unique variable name by appending an index and, + if necessary, an additional counter to avoid duplicates.""" + base_name = f"{var_name}_{idx}" + unique_name = base_name + counter = 1 + + # Ensure the generated name is unique + while unique_name in used_names: + unique_name = f"{base_name}_{counter}" + counter += 1 + + # Add the unique name to the set of used names + used_names.add(unique_name) + + return unique_name + + def _collect_all_variables(self, idx: int, used_names: set[str]) -> None: + """ + Collects and formats all variables for the current GraphQL field, + ensuring unique names. + """ + self.formatted_variables = {} + + for k, v in self._variables.items(): + unique_name = self._format_variable_name(idx, k, used_names) + self.formatted_variables[unique_name] = { + "name": k, + "type": v["type"], + "value": v["value"], + } + + def to_ast(self, idx: int, used_names: Optional[set[str]] = None) -> FieldNode: + """Converts the current GraphQL field to an AST (Abstract Syntax Tree) node.""" + if used_names is None: + used_names = set() + + self._collect_all_variables(idx, used_names) + + return FieldNode( + name=NameNode(value=self._build_field_name()), + arguments=[ + GraphQLArgument(v["name"], k).to_ast() + for k, v in self.formatted_variables.items() + ], + selection_set=( + SelectionSetNode(selections=self._build_selections(idx, used_names)) + if self._subfields or self._inline_fragments + else None + ), + ) + + def get_formatted_variables(self) -> dict[str, dict[str, Any]]: + """ + Retrieves all formatted variables for the current GraphQL field, + including those from subfields and inline fragments. + """ + formatted_variables = self.formatted_variables.copy() + + # Collect variables from subfields + for subfield in self._subfields: + formatted_variables.update(subfield.get_formatted_variables()) + + # Collect variables from inline fragments + for subfields in self._inline_fragments.values(): + for subfield in subfields: + formatted_variables.update(subfield.get_formatted_variables()) + return formatted_variables diff --git a/src/zepben/eas/lib/generated_graphql_client/client.py b/src/zepben/eas/lib/generated_graphql_client/client.py new file mode 100644 index 0000000..4e89027 --- /dev/null +++ b/src/zepben/eas/lib/generated_graphql_client/client.py @@ -0,0 +1,109 @@ +# Generated by ariadne-codegen + +from typing import Any + +from graphql import ( + DocumentNode, + NamedTypeNode, + NameNode, + OperationDefinitionNode, + OperationType, + SelectionNode, + SelectionSetNode, + VariableDefinitionNode, + VariableNode, + print_ast, +) + +from .async_base_client import AsyncBaseClient +from .base_operation import GraphQLField + + +def gql(q: str) -> str: + return q + + +class Client(AsyncBaseClient): + async def execute_custom_operation( + self, *fields: GraphQLField, operation_type: OperationType, operation_name: str + ) -> dict[str, Any]: + selections = self._build_selection_set(fields) + combined_variables = self._combine_variables(fields) + variable_definitions = self._build_variable_definitions( + combined_variables["types"] + ) + operation_ast = self._build_operation_ast( + selections, operation_type, operation_name, variable_definitions + ) + response = await self.execute( + print_ast(operation_ast), + variables=combined_variables["values"], + operation_name=operation_name, + ) + return self.get_data(response) + + def _combine_variables( + self, fields: tuple[GraphQLField, ...] + ) -> dict[str, dict[str, Any]]: + variables_types_combined = {} + processed_variables_combined = {} + for field in fields: + formatted_variables = field.get_formatted_variables() + variables_types_combined.update( + {k: v["type"] for k, v in formatted_variables.items()} + ) + processed_variables_combined.update( + {k: v["value"] for k, v in formatted_variables.items()} + ) + return { + "types": variables_types_combined, + "values": processed_variables_combined, + } + + def _build_variable_definitions( + self, variables_types_combined: dict[str, str] + ) -> list[VariableDefinitionNode]: + return [ + VariableDefinitionNode( + variable=VariableNode(name=NameNode(value=var_name)), + type=NamedTypeNode(name=NameNode(value=var_value)), + ) + for var_name, var_value in variables_types_combined.items() + ] + + def _build_operation_ast( + self, + selections: list[SelectionNode], + operation_type: OperationType, + operation_name: str, + variable_definitions: list[VariableDefinitionNode], + ) -> DocumentNode: + return DocumentNode( + definitions=[ + OperationDefinitionNode( + operation=operation_type, + name=NameNode(value=operation_name), + variable_definitions=variable_definitions, + selection_set=SelectionSetNode(selections=selections), + ) + ] + ) + + def _build_selection_set( + self, fields: tuple[GraphQLField, ...] + ) -> list[SelectionNode]: + return [field.to_ast(idx) for idx, field in enumerate(fields)] + + async def query(self, *fields: GraphQLField, operation_name: str) -> dict[str, Any]: + return await self.execute_custom_operation( + *fields, operation_type=OperationType.QUERY, operation_name=operation_name + ) + + async def mutation( + self, *fields: GraphQLField, operation_name: str + ) -> dict[str, Any]: + return await self.execute_custom_operation( + *fields, + operation_type=OperationType.MUTATION, + operation_name=operation_name + ) diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_fields.py b/src/zepben/eas/lib/generated_graphql_client/custom_fields.py new file mode 100644 index 0000000..e691349 --- /dev/null +++ b/src/zepben/eas/lib/generated_graphql_client/custom_fields.py @@ -0,0 +1,1854 @@ +# Generated by ariadne-codegen + +from typing import Any, Union + +from .base_operation import GraphQLField +from .custom_typing_fields import ( + AppOptionsGraphQLField, + CoordinateGraphQLField, + CustomerDetailsGraphQLField, + CustomerDetailsResponseGraphQLField, + CustomerListColumnConfigGraphQLField, + DiffResultGraphQLField, + DurationCurveByTerminalGraphQLField, + DurationCurveGraphQLField, + DurationCurvePointGraphQLField, + EquipmentGraphQLField, + FeederLoadAnalysisReportGraphQLField, + FeederLoadAnalysisSpecGraphQLField, + GeoJsonFeatureGraphQLField, + GeoJsonGeometryGraphQLField, + GeoJsonOverlayGraphQLField, + GeoJsonPropertiesGraphQLField, + GqlDistributionTransformerConfigGraphQLField, + GqlLoadConfigGraphQLField, + GqlScenarioConfigGraphQLField, + GqlTxTapRecordGraphQLField, + GqlUserGraphQLField, + GqlUserResponseGraphQLField, + HcCalibrationGraphQLField, + HcModelGraphQLField, + HcScenarioConfigsPageGraphQLField, + HcWorkPackageGraphQLField, + HcWorkPackagePageGraphQLField, + IngestionJobGraphQLField, + IngestionRunGraphQLField, + IngestorRunPageGraphQLField, + JobSourceGraphQLField, + MachineUserGraphQLField, + MetricGraphQLField, + NetworkModelGraphQLField, + NetworkModelsGraphQLField, + OpenDssModelGraphQLField, + OpenDssModelPageGraphQLField, + OpportunitiesByYearGraphQLField, + OpportunityGraphQLField, + OpportunityLocationGraphQLField, + PowerFactoryModelGenerationSpecGraphQLField, + PowerFactoryModelGraphQLField, + PowerFactoryModelPageGraphQLField, + PowerFactoryModelTemplateGraphQLField, + PowerFactoryModelTemplatePageGraphQLField, + ProcessedDiffGraphQLField, + ProcessedDiffPageGraphQLField, + RemoveAppOptionResultGraphQLField, + ScenarioConfigurationGraphQLField, + SincalConfigFileGraphQLField, + SincalGlobalInputsConfigGraphQLField, + SincalModelGenerationSpecGraphQLField, + SincalModelGraphQLField, + SincalModelPageGraphQLField, + SincalModelPresetGraphQLField, + SincalModelPresetPageGraphQLField, + StateOverlayGraphQLField, + StudyGraphQLField, + StudyPageGraphQLField, + StudyResultGraphQLField, + UploadUrlResponseGraphQLField, + UserCustomerListColumnConfigGraphQLField, + VariantGraphQLField, + VariantWorkPackageGraphQLField, + WorkPackageModelGroupingsGraphQLField, + WorkPackageModelTotalsGraphQLField, + WorkPackageProgressDetailsGraphQLField, + WorkPackageTreeGraphQLField, +) + + +class AppOptionsFields(GraphQLField): + asset_name_format: "AppOptionsGraphQLField" = AppOptionsGraphQLField( + "assetNameFormat" + ) + pole_string_format: "AppOptionsGraphQLField" = AppOptionsGraphQLField( + "poleStringFormat" + ) + + def fields(self, *subfields: AppOptionsGraphQLField) -> "AppOptionsFields": + """Subfields should come from the AppOptionsFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "AppOptionsFields": + self._alias = alias + return self + + +class CoordinateFields(GraphQLField): + latitude: "CoordinateGraphQLField" = CoordinateGraphQLField("latitude") + longitude: "CoordinateGraphQLField" = CoordinateGraphQLField("longitude") + + def fields(self, *subfields: CoordinateGraphQLField) -> "CoordinateFields": + """Subfields should come from the CoordinateFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "CoordinateFields": + self._alias = alias + return self + + +class CustomerDetailsFields(GraphQLField): + customer_mrid: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField( + "customerMrid" + ) + customer_type: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField( + "customerType" + ) + distributor: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField( + "distributor" + ) + dlf: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField("dlf") + feeder: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField("feeder") + first_name: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField("firstName") + is_embedded_network: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField( + "isEmbeddedNetwork" + ) + is_energy_feedback: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField( + "isEnergyFeedback" + ) + last_name: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField("lastName") + lv_feeder: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField("lvFeeder") + meter_number: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField( + "meterNumber" + ) + mobile_number: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField( + "mobileNumber" + ) + move_in_date: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField( + "moveInDate" + ) + nmi: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField("nmi") + nmi_class: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField("nmiClass") + phone_number: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField( + "phoneNumber" + ) + postal_address: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField( + "postalAddress" + ) + sensitivity_category: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField( + "sensitivityCategory" + ) + service_address: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField( + "serviceAddress" + ) + service_provision_status: "CustomerDetailsGraphQLField" = ( + CustomerDetailsGraphQLField("serviceProvisionStatus") + ) + supply_point_id: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField( + "supplyPointId" + ) + tariff: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField("tariff") + tni: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField("tni") + transformer_description: "CustomerDetailsGraphQLField" = ( + CustomerDetailsGraphQLField("transformerDescription") + ) + transformer_id: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField( + "transformerId" + ) + zone_substation: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField( + "zoneSubstation" + ) + + def fields( + self, *subfields: CustomerDetailsGraphQLField + ) -> "CustomerDetailsFields": + """Subfields should come from the CustomerDetailsFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "CustomerDetailsFields": + self._alias = alias + return self + + +class CustomerDetailsResponseFields(GraphQLField): + @classmethod + def customer_details(cls) -> "CustomerDetailsFields": + return CustomerDetailsFields("customerDetails") + + def fields( + self, + *subfields: Union[CustomerDetailsResponseGraphQLField, "CustomerDetailsFields"] + ) -> "CustomerDetailsResponseFields": + """Subfields should come from the CustomerDetailsResponseFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "CustomerDetailsResponseFields": + self._alias = alias + return self + + +class CustomerListColumnConfigFields(GraphQLField): + column_name: "CustomerListColumnConfigGraphQLField" = ( + CustomerListColumnConfigGraphQLField("columnName") + ) + group: "CustomerListColumnConfigGraphQLField" = ( + CustomerListColumnConfigGraphQLField("group") + ) + + def fields( + self, *subfields: CustomerListColumnConfigGraphQLField + ) -> "CustomerListColumnConfigFields": + """Subfields should come from the CustomerListColumnConfigFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "CustomerListColumnConfigFields": + self._alias = alias + return self + + +class DiffResultFields(GraphQLField): + entries: "DiffResultGraphQLField" = DiffResultGraphQLField("entries") + id: "DiffResultGraphQLField" = DiffResultGraphQLField("id") + + def fields(self, *subfields: DiffResultGraphQLField) -> "DiffResultFields": + """Subfields should come from the DiffResultFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "DiffResultFields": + self._alias = alias + return self + + +class DurationCurveFields(GraphQLField): + @classmethod + def points(cls) -> "DurationCurvePointFields": + return DurationCurvePointFields("points") + + def fields( + self, *subfields: Union[DurationCurveGraphQLField, "DurationCurvePointFields"] + ) -> "DurationCurveFields": + """Subfields should come from the DurationCurveFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "DurationCurveFields": + self._alias = alias + return self + + +class DurationCurveByTerminalFields(GraphQLField): + @classmethod + def duration_curve(cls) -> "DurationCurveFields": + return DurationCurveFields("durationCurve") + + terminal_sequence_number: "DurationCurveByTerminalGraphQLField" = ( + DurationCurveByTerminalGraphQLField("terminalSequenceNumber") + ) + + def fields( + self, + *subfields: Union[DurationCurveByTerminalGraphQLField, "DurationCurveFields"] + ) -> "DurationCurveByTerminalFields": + """Subfields should come from the DurationCurveByTerminalFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "DurationCurveByTerminalFields": + self._alias = alias + return self + + +class DurationCurvePointFields(GraphQLField): + conducting_equipment: "DurationCurvePointGraphQLField" = ( + DurationCurvePointGraphQLField("conductingEquipment") + ) + feeder: "DurationCurvePointGraphQLField" = DurationCurvePointGraphQLField("feeder") + kw: "DurationCurvePointGraphQLField" = DurationCurvePointGraphQLField("kw") + measurement_zone_type: "DurationCurvePointGraphQLField" = ( + DurationCurvePointGraphQLField("measurementZoneType") + ) + percentage_of_time: "DurationCurvePointGraphQLField" = ( + DurationCurvePointGraphQLField("percentageOfTime") + ) + scenario: "DurationCurvePointGraphQLField" = DurationCurvePointGraphQLField( + "scenario" + ) + terminal_sequence_number: "DurationCurvePointGraphQLField" = ( + DurationCurvePointGraphQLField("terminalSequenceNumber") + ) + timestamp: "DurationCurvePointGraphQLField" = DurationCurvePointGraphQLField( + "timestamp" + ) + v_base: "DurationCurvePointGraphQLField" = DurationCurvePointGraphQLField("vBase") + work_package_id: "DurationCurvePointGraphQLField" = DurationCurvePointGraphQLField( + "workPackageId" + ) + + def fields( + self, *subfields: DurationCurvePointGraphQLField + ) -> "DurationCurvePointFields": + """Subfields should come from the DurationCurvePointFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "DurationCurvePointFields": + self._alias = alias + return self + + +class EquipmentFields(GraphQLField): + m_rid: "EquipmentGraphQLField" = EquipmentGraphQLField("mRID") + + @classmethod + def location(cls) -> "CoordinateFields": + return CoordinateFields("location") + + def fields( + self, *subfields: Union[EquipmentGraphQLField, "CoordinateFields"] + ) -> "EquipmentFields": + """Subfields should come from the EquipmentFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "EquipmentFields": + self._alias = alias + return self + + +class FeederLoadAnalysisReportFields(GraphQLField): + completed_at: "FeederLoadAnalysisReportGraphQLField" = ( + FeederLoadAnalysisReportGraphQLField("completedAt") + ) + created_at: "FeederLoadAnalysisReportGraphQLField" = ( + FeederLoadAnalysisReportGraphQLField("createdAt") + ) + created_by: "FeederLoadAnalysisReportGraphQLField" = ( + FeederLoadAnalysisReportGraphQLField("createdBy") + ) + errors: "FeederLoadAnalysisReportGraphQLField" = ( + FeederLoadAnalysisReportGraphQLField("errors") + ) + + @classmethod + def generation_spec(cls) -> "FeederLoadAnalysisSpecFields": + return FeederLoadAnalysisSpecFields("generationSpec") + + id: "FeederLoadAnalysisReportGraphQLField" = FeederLoadAnalysisReportGraphQLField( + "id" + ) + name: "FeederLoadAnalysisReportGraphQLField" = FeederLoadAnalysisReportGraphQLField( + "name" + ) + state: "FeederLoadAnalysisReportGraphQLField" = ( + FeederLoadAnalysisReportGraphQLField("state") + ) + + def fields( + self, + *subfields: Union[ + FeederLoadAnalysisReportGraphQLField, "FeederLoadAnalysisSpecFields" + ] + ) -> "FeederLoadAnalysisReportFields": + """Subfields should come from the FeederLoadAnalysisReportFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "FeederLoadAnalysisReportFields": + self._alias = alias + return self + + +class FeederLoadAnalysisSpecFields(GraphQLField): + aggregate_at_feeder_level: "FeederLoadAnalysisSpecGraphQLField" = ( + FeederLoadAnalysisSpecGraphQLField("aggregateAtFeederLevel") + ) + end_date: "FeederLoadAnalysisSpecGraphQLField" = FeederLoadAnalysisSpecGraphQLField( + "endDate" + ) + feeders: "FeederLoadAnalysisSpecGraphQLField" = FeederLoadAnalysisSpecGraphQLField( + "feeders" + ) + fetch_lv_network: "FeederLoadAnalysisSpecGraphQLField" = ( + FeederLoadAnalysisSpecGraphQLField("fetchLvNetwork") + ) + geographical_regions: "FeederLoadAnalysisSpecGraphQLField" = ( + FeederLoadAnalysisSpecGraphQLField("geographicalRegions") + ) + output: "FeederLoadAnalysisSpecGraphQLField" = FeederLoadAnalysisSpecGraphQLField( + "output" + ) + process_coincident_loads: "FeederLoadAnalysisSpecGraphQLField" = ( + FeederLoadAnalysisSpecGraphQLField("processCoincidentLoads") + ) + process_feeder_loads: "FeederLoadAnalysisSpecGraphQLField" = ( + FeederLoadAnalysisSpecGraphQLField("processFeederLoads") + ) + produce_basic_report: "FeederLoadAnalysisSpecGraphQLField" = ( + FeederLoadAnalysisSpecGraphQLField("produceBasicReport") + ) + produce_conductor_report: "FeederLoadAnalysisSpecGraphQLField" = ( + FeederLoadAnalysisSpecGraphQLField("produceConductorReport") + ) + start_date: "FeederLoadAnalysisSpecGraphQLField" = ( + FeederLoadAnalysisSpecGraphQLField("startDate") + ) + sub_geographical_regions: "FeederLoadAnalysisSpecGraphQLField" = ( + FeederLoadAnalysisSpecGraphQLField("subGeographicalRegions") + ) + substations: "FeederLoadAnalysisSpecGraphQLField" = ( + FeederLoadAnalysisSpecGraphQLField("substations") + ) + + def fields( + self, *subfields: FeederLoadAnalysisSpecGraphQLField + ) -> "FeederLoadAnalysisSpecFields": + """Subfields should come from the FeederLoadAnalysisSpecFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "FeederLoadAnalysisSpecFields": + self._alias = alias + return self + + +class GeoJsonFeatureFields(GraphQLField): + @classmethod + def geometry(cls) -> "GeoJsonGeometryFields": + return GeoJsonGeometryFields("geometry") + + @classmethod + def properties(cls) -> "GeoJsonPropertiesFields": + return GeoJsonPropertiesFields("properties") + + type_: "GeoJsonFeatureGraphQLField" = GeoJsonFeatureGraphQLField("type") + + def fields( + self, + *subfields: Union[ + GeoJsonFeatureGraphQLField, + "GeoJsonGeometryFields", + "GeoJsonPropertiesFields", + ] + ) -> "GeoJsonFeatureFields": + """Subfields should come from the GeoJsonFeatureFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "GeoJsonFeatureFields": + self._alias = alias + return self + + +class GeoJsonGeometryFields(GraphQLField): + @classmethod + def coordinates(cls) -> "CoordinateFields": + return CoordinateFields("coordinates") + + type_: "GeoJsonGeometryGraphQLField" = GeoJsonGeometryGraphQLField("type") + + def fields( + self, *subfields: Union[GeoJsonGeometryGraphQLField, "CoordinateFields"] + ) -> "GeoJsonGeometryFields": + """Subfields should come from the GeoJsonGeometryFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "GeoJsonGeometryFields": + self._alias = alias + return self + + +class GeoJsonOverlayFields(GraphQLField): + styles: "GeoJsonOverlayGraphQLField" = GeoJsonOverlayGraphQLField("styles") + id: "GeoJsonOverlayGraphQLField" = GeoJsonOverlayGraphQLField("id") + data: "GeoJsonOverlayGraphQLField" = GeoJsonOverlayGraphQLField("data") + source_properties: "GeoJsonOverlayGraphQLField" = GeoJsonOverlayGraphQLField( + "sourceProperties" + ) + + def fields(self, *subfields: GeoJsonOverlayGraphQLField) -> "GeoJsonOverlayFields": + """Subfields should come from the GeoJsonOverlayFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "GeoJsonOverlayFields": + self._alias = alias + return self + + +class GeoJsonPropertiesFields(GraphQLField): + detail: "GeoJsonPropertiesGraphQLField" = GeoJsonPropertiesGraphQLField("detail") + properties: "GeoJsonPropertiesGraphQLField" = GeoJsonPropertiesGraphQLField( + "properties" + ) + + def fields( + self, *subfields: GeoJsonPropertiesGraphQLField + ) -> "GeoJsonPropertiesFields": + """Subfields should come from the GeoJsonPropertiesFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "GeoJsonPropertiesFields": + self._alias = alias + return self + + +class GqlDistributionTransformerConfigFields(GraphQLField): + r_ground: "GqlDistributionTransformerConfigGraphQLField" = ( + GqlDistributionTransformerConfigGraphQLField("rGround") + ) + x_ground: "GqlDistributionTransformerConfigGraphQLField" = ( + GqlDistributionTransformerConfigGraphQLField("xGround") + ) + + def fields( + self, *subfields: GqlDistributionTransformerConfigGraphQLField + ) -> "GqlDistributionTransformerConfigFields": + """Subfields should come from the GqlDistributionTransformerConfigFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "GqlDistributionTransformerConfigFields": + self._alias = alias + return self + + +class GqlLoadConfigFields(GraphQLField): + spread_max_demand: "GqlLoadConfigGraphQLField" = GqlLoadConfigGraphQLField( + "spreadMaxDemand" + ) + + def fields(self, *subfields: GqlLoadConfigGraphQLField) -> "GqlLoadConfigFields": + """Subfields should come from the GqlLoadConfigFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "GqlLoadConfigFields": + self._alias = alias + return self + + +class GqlScenarioConfigFields(GraphQLField): + bess_upgrade_threshold: "GqlScenarioConfigGraphQLField" = ( + GqlScenarioConfigGraphQLField("bessUpgradeThreshold") + ) + pv_upgrade_threshold: "GqlScenarioConfigGraphQLField" = ( + GqlScenarioConfigGraphQLField("pvUpgradeThreshold") + ) + scenario_id: "GqlScenarioConfigGraphQLField" = GqlScenarioConfigGraphQLField( + "scenarioID" + ) + years: "GqlScenarioConfigGraphQLField" = GqlScenarioConfigGraphQLField("years") + + def fields( + self, *subfields: GqlScenarioConfigGraphQLField + ) -> "GqlScenarioConfigFields": + """Subfields should come from the GqlScenarioConfigFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "GqlScenarioConfigFields": + self._alias = alias + return self + + +class GqlTxTapRecordFields(GraphQLField): + control_enabled: "GqlTxTapRecordGraphQLField" = GqlTxTapRecordGraphQLField( + "controlEnabled" + ) + high_step: "GqlTxTapRecordGraphQLField" = GqlTxTapRecordGraphQLField("highStep") + id: "GqlTxTapRecordGraphQLField" = GqlTxTapRecordGraphQLField("id") + low_step: "GqlTxTapRecordGraphQLField" = GqlTxTapRecordGraphQLField("lowStep") + nominal_tap_num: "GqlTxTapRecordGraphQLField" = GqlTxTapRecordGraphQLField( + "nominalTapNum" + ) + step_voltage_increment: "GqlTxTapRecordGraphQLField" = GqlTxTapRecordGraphQLField( + "stepVoltageIncrement" + ) + tap_position: "GqlTxTapRecordGraphQLField" = GqlTxTapRecordGraphQLField( + "tapPosition" + ) + + def fields(self, *subfields: GqlTxTapRecordGraphQLField) -> "GqlTxTapRecordFields": + """Subfields should come from the GqlTxTapRecordFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "GqlTxTapRecordFields": + self._alias = alias + return self + + +class GqlUserFields(GraphQLField): + email: "GqlUserGraphQLField" = GqlUserGraphQLField("email") + identity_provider: "GqlUserGraphQLField" = GqlUserGraphQLField("identityProvider") + username: "GqlUserGraphQLField" = GqlUserGraphQLField("username") + id: "GqlUserGraphQLField" = GqlUserGraphQLField("id") + + def fields(self, *subfields: GqlUserGraphQLField) -> "GqlUserFields": + """Subfields should come from the GqlUserFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "GqlUserFields": + self._alias = alias + return self + + +class GqlUserResponseFields(GraphQLField): + email: "GqlUserResponseGraphQLField" = GqlUserResponseGraphQLField("email") + id: "GqlUserResponseGraphQLField" = GqlUserResponseGraphQLField("id") + identity_provider: "GqlUserResponseGraphQLField" = GqlUserResponseGraphQLField( + "identityProvider" + ) + permissions: "GqlUserResponseGraphQLField" = GqlUserResponseGraphQLField( + "permissions" + ) + username: "GqlUserResponseGraphQLField" = GqlUserResponseGraphQLField("username") + + def fields( + self, *subfields: GqlUserResponseGraphQLField + ) -> "GqlUserResponseFields": + """Subfields should come from the GqlUserResponseFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "GqlUserResponseFields": + self._alias = alias + return self + + +class HcCalibrationFields(GraphQLField): + calibration_time_local: "HcCalibrationGraphQLField" = HcCalibrationGraphQLField( + "calibrationTimeLocal" + ) + calibration_work_package_config: "HcCalibrationGraphQLField" = ( + HcCalibrationGraphQLField("calibrationWorkPackageConfig") + ) + completed_at: "HcCalibrationGraphQLField" = HcCalibrationGraphQLField("completedAt") + feeders: "HcCalibrationGraphQLField" = HcCalibrationGraphQLField("feeders") + name: "HcCalibrationGraphQLField" = HcCalibrationGraphQLField("name") + run_id: "HcCalibrationGraphQLField" = HcCalibrationGraphQLField("runId") + run_info: "HcCalibrationGraphQLField" = HcCalibrationGraphQLField("runInfo") + start_at: "HcCalibrationGraphQLField" = HcCalibrationGraphQLField("startAt") + status: "HcCalibrationGraphQLField" = HcCalibrationGraphQLField("status") + workflow_id: "HcCalibrationGraphQLField" = HcCalibrationGraphQLField("workflowId") + id: "HcCalibrationGraphQLField" = HcCalibrationGraphQLField("id") + + def fields(self, *subfields: HcCalibrationGraphQLField) -> "HcCalibrationFields": + """Subfields should come from the HcCalibrationFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "HcCalibrationFields": + self._alias = alias + return self + + +class HcModelFields(GraphQLField): + feeder: "HcModelGraphQLField" = HcModelGraphQLField("feeder") + scenario: "HcModelGraphQLField" = HcModelGraphQLField("scenario") + year: "HcModelGraphQLField" = HcModelGraphQLField("year") + + def fields(self, *subfields: HcModelGraphQLField) -> "HcModelFields": + """Subfields should come from the HcModelFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "HcModelFields": + self._alias = alias + return self + + +class HcScenarioConfigsPageFields(GraphQLField): + offset: "HcScenarioConfigsPageGraphQLField" = HcScenarioConfigsPageGraphQLField( + "offset" + ) + + @classmethod + def scenario_configs(cls) -> "ScenarioConfigurationFields": + return ScenarioConfigurationFields("scenarioConfigs") + + total_count: "HcScenarioConfigsPageGraphQLField" = ( + HcScenarioConfigsPageGraphQLField("totalCount") + ) + + def fields( + self, + *subfields: Union[ + HcScenarioConfigsPageGraphQLField, "ScenarioConfigurationFields" + ] + ) -> "HcScenarioConfigsPageFields": + """Subfields should come from the HcScenarioConfigsPageFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "HcScenarioConfigsPageFields": + self._alias = alias + return self + + +class HcWorkPackageFields(GraphQLField): + completed_at: "HcWorkPackageGraphQLField" = HcWorkPackageGraphQLField("completedAt") + created_at: "HcWorkPackageGraphQLField" = HcWorkPackageGraphQLField("createdAt") + + @classmethod + def created_by(cls) -> "GqlUserFields": + return GqlUserFields("createdBy") + + feeders: "HcWorkPackageGraphQLField" = HcWorkPackageGraphQLField("feeders") + is_deleted: "HcWorkPackageGraphQLField" = HcWorkPackageGraphQLField("isDeleted") + load_type: "HcWorkPackageGraphQLField" = HcWorkPackageGraphQLField("loadType") + name: "HcWorkPackageGraphQLField" = HcWorkPackageGraphQLField("name") + parent_id: "HcWorkPackageGraphQLField" = HcWorkPackageGraphQLField("parentId") + + @classmethod + def progress_details(cls) -> "WorkPackageProgressDetailsFields": + return WorkPackageProgressDetailsFields("progressDetails") + + scenarios: "HcWorkPackageGraphQLField" = HcWorkPackageGraphQLField("scenarios") + status: "HcWorkPackageGraphQLField" = HcWorkPackageGraphQLField("status") + time_period_end: "HcWorkPackageGraphQLField" = HcWorkPackageGraphQLField( + "timePeriodEnd" + ) + time_period_start: "HcWorkPackageGraphQLField" = HcWorkPackageGraphQLField( + "timePeriodStart" + ) + years: "HcWorkPackageGraphQLField" = HcWorkPackageGraphQLField("years") + id: "HcWorkPackageGraphQLField" = HcWorkPackageGraphQLField("id") + config: "HcWorkPackageGraphQLField" = HcWorkPackageGraphQLField("config") + description: "HcWorkPackageGraphQLField" = HcWorkPackageGraphQLField("description") + updated_at: "HcWorkPackageGraphQLField" = HcWorkPackageGraphQLField("updatedAt") + + def fields( + self, + *subfields: Union[ + HcWorkPackageGraphQLField, + "GqlUserFields", + "WorkPackageProgressDetailsFields", + ] + ) -> "HcWorkPackageFields": + """Subfields should come from the HcWorkPackageFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "HcWorkPackageFields": + self._alias = alias + return self + + +class HcWorkPackagePageFields(GraphQLField): + all_users: "HcWorkPackagePageGraphQLField" = HcWorkPackagePageGraphQLField( + "allUsers" + ) + offset: "HcWorkPackagePageGraphQLField" = HcWorkPackagePageGraphQLField("offset") + total_count: "HcWorkPackagePageGraphQLField" = HcWorkPackagePageGraphQLField( + "totalCount" + ) + + @classmethod + def work_packages(cls) -> "HcWorkPackageFields": + return HcWorkPackageFields("workPackages") + + def fields( + self, *subfields: Union[HcWorkPackagePageGraphQLField, "HcWorkPackageFields"] + ) -> "HcWorkPackagePageFields": + """Subfields should come from the HcWorkPackagePageFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "HcWorkPackagePageFields": + self._alias = alias + return self + + +class IngestionJobFields(GraphQLField): + application: "IngestionJobGraphQLField" = IngestionJobGraphQLField("application") + application_version: "IngestionJobGraphQLField" = IngestionJobGraphQLField( + "applicationVersion" + ) + id: "IngestionJobGraphQLField" = IngestionJobGraphQLField("id") + source: "IngestionJobGraphQLField" = IngestionJobGraphQLField("source") + start_time: "IngestionJobGraphQLField" = IngestionJobGraphQLField("startTime") + + def fields(self, *subfields: IngestionJobGraphQLField) -> "IngestionJobFields": + """Subfields should come from the IngestionJobFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "IngestionJobFields": + self._alias = alias + return self + + +class IngestionRunFields(GraphQLField): + completed_at: "IngestionRunGraphQLField" = IngestionRunGraphQLField("completedAt") + container_runtime_type: "IngestionRunGraphQLField" = IngestionRunGraphQLField( + "containerRuntimeType" + ) + payload: "IngestionRunGraphQLField" = IngestionRunGraphQLField("payload") + started_at: "IngestionRunGraphQLField" = IngestionRunGraphQLField("startedAt") + status: "IngestionRunGraphQLField" = IngestionRunGraphQLField("status") + status_last_updated_at: "IngestionRunGraphQLField" = IngestionRunGraphQLField( + "statusLastUpdatedAt" + ) + token: "IngestionRunGraphQLField" = IngestionRunGraphQLField("token") + id: "IngestionRunGraphQLField" = IngestionRunGraphQLField("id") + + def fields(self, *subfields: IngestionRunGraphQLField) -> "IngestionRunFields": + """Subfields should come from the IngestionRunFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "IngestionRunFields": + self._alias = alias + return self + + +class IngestorRunPageFields(GraphQLField): + @classmethod + def ingestor_runs(cls) -> "IngestionRunFields": + return IngestionRunFields("ingestorRuns") + + offset: "IngestorRunPageGraphQLField" = IngestorRunPageGraphQLField("offset") + total_count: "IngestorRunPageGraphQLField" = IngestorRunPageGraphQLField( + "totalCount" + ) + + def fields( + self, *subfields: Union[IngestorRunPageGraphQLField, "IngestionRunFields"] + ) -> "IngestorRunPageFields": + """Subfields should come from the IngestorRunPageFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "IngestorRunPageFields": + self._alias = alias + return self + + +class JobSourceFields(GraphQLField): + file_hash: "JobSourceGraphQLField" = JobSourceGraphQLField("fileHash") + name: "JobSourceGraphQLField" = JobSourceGraphQLField("name") + timestamp: "JobSourceGraphQLField" = JobSourceGraphQLField("timestamp") + + def fields(self, *subfields: JobSourceGraphQLField) -> "JobSourceFields": + """Subfields should come from the JobSourceFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "JobSourceFields": + self._alias = alias + return self + + +class MachineUserFields(GraphQLField): + display_name: "MachineUserGraphQLField" = MachineUserGraphQLField("displayName") + username: "MachineUserGraphQLField" = MachineUserGraphQLField("username") + + def fields(self, *subfields: MachineUserGraphQLField) -> "MachineUserFields": + """Subfields should come from the MachineUserFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "MachineUserFields": + self._alias = alias + return self + + +class MetricFields(GraphQLField): + name: "MetricGraphQLField" = MetricGraphQLField("name") + value: "MetricGraphQLField" = MetricGraphQLField("value") + + def fields(self, *subfields: MetricGraphQLField) -> "MetricFields": + """Subfields should come from the MetricFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "MetricFields": + self._alias = alias + return self + + +class NetworkModelFields(GraphQLField): + database_name: "NetworkModelGraphQLField" = NetworkModelGraphQLField("databaseName") + source_data_date: "NetworkModelGraphQLField" = NetworkModelGraphQLField( + "sourceDataDate" + ) + + def fields(self, *subfields: NetworkModelGraphQLField) -> "NetworkModelFields": + """Subfields should come from the NetworkModelFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "NetworkModelFields": + self._alias = alias + return self + + +class NetworkModelsFields(GraphQLField): + @classmethod + def available_network_models(cls) -> "NetworkModelFields": + return NetworkModelFields("availableNetworkModels") + + currently_loaded_network_model: "NetworkModelsGraphQLField" = ( + NetworkModelsGraphQLField("currentlyLoadedNetworkModel") + ) + network_date_locked: "NetworkModelsGraphQLField" = NetworkModelsGraphQLField( + "networkDateLocked" + ) + + def fields( + self, *subfields: Union[NetworkModelsGraphQLField, "NetworkModelFields"] + ) -> "NetworkModelsFields": + """Subfields should come from the NetworkModelsFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "NetworkModelsFields": + self._alias = alias + return self + + +class OpenDssModelFields(GraphQLField): + created_at: "OpenDssModelGraphQLField" = OpenDssModelGraphQLField("createdAt") + created_by: "OpenDssModelGraphQLField" = OpenDssModelGraphQLField("createdBy") + download_url: "OpenDssModelGraphQLField" = OpenDssModelGraphQLField("downloadUrl") + errors: "OpenDssModelGraphQLField" = OpenDssModelGraphQLField("errors") + generation_spec: "OpenDssModelGraphQLField" = OpenDssModelGraphQLField( + "generationSpec" + ) + id: "OpenDssModelGraphQLField" = OpenDssModelGraphQLField("id") + is_public: "OpenDssModelGraphQLField" = OpenDssModelGraphQLField("isPublic") + name: "OpenDssModelGraphQLField" = OpenDssModelGraphQLField("name") + state: "OpenDssModelGraphQLField" = OpenDssModelGraphQLField("state") + + def fields(self, *subfields: OpenDssModelGraphQLField) -> "OpenDssModelFields": + """Subfields should come from the OpenDssModelFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "OpenDssModelFields": + self._alias = alias + return self + + +class OpenDssModelPageFields(GraphQLField): + @classmethod + def models(cls) -> "OpenDssModelFields": + return OpenDssModelFields("models") + + offset: "OpenDssModelPageGraphQLField" = OpenDssModelPageGraphQLField("offset") + total_count: "OpenDssModelPageGraphQLField" = OpenDssModelPageGraphQLField( + "totalCount" + ) + + def fields( + self, *subfields: Union[OpenDssModelPageGraphQLField, "OpenDssModelFields"] + ) -> "OpenDssModelPageFields": + """Subfields should come from the OpenDssModelPageFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "OpenDssModelPageFields": + self._alias = alias + return self + + +class OpportunitiesByYearFields(GraphQLField): + @classmethod + def available_opportunities(cls) -> "OpportunityFields": + return OpportunityFields("availableOpportunities") + + year: "OpportunitiesByYearGraphQLField" = OpportunitiesByYearGraphQLField("year") + + def fields( + self, *subfields: Union[OpportunitiesByYearGraphQLField, "OpportunityFields"] + ) -> "OpportunitiesByYearFields": + """Subfields should come from the OpportunitiesByYearFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "OpportunitiesByYearFields": + self._alias = alias + return self + + +class OpportunityFields(GraphQLField): + annual_deferral_value: "OpportunityGraphQLField" = OpportunityGraphQLField( + "annualDeferralValue" + ) + + @classmethod + def conducting_equipment(cls) -> "EquipmentFields": + return EquipmentFields("conductingEquipment") + + connection_voltage_level: "OpportunityGraphQLField" = OpportunityGraphQLField( + "connectionVoltageLevel" + ) + constraint_primary_driver: "OpportunityGraphQLField" = OpportunityGraphQLField( + "constraintPrimaryDriver" + ) + days_required: "OpportunityGraphQLField" = OpportunityGraphQLField("daysRequired") + downstream_customers: "OpportunityGraphQLField" = OpportunityGraphQLField( + "downstreamCustomers" + ) + est_annual_hours: "OpportunityGraphQLField" = OpportunityGraphQLField( + "estAnnualHours" + ) + est_duration_per_event: "OpportunityGraphQLField" = OpportunityGraphQLField( + "estDurationPerEvent" + ) + est_number_of_events: "OpportunityGraphQLField" = OpportunityGraphQLField( + "estNumberOfEvents" + ) + id: "OpportunityGraphQLField" = OpportunityGraphQLField("id") + min_capacity: "OpportunityGraphQLField" = OpportunityGraphQLField("minCapacity") + need_direction: "OpportunityGraphQLField" = OpportunityGraphQLField("needDirection") + peak_demand: "OpportunityGraphQLField" = OpportunityGraphQLField("peakDemand") + time_required: "OpportunityGraphQLField" = OpportunityGraphQLField("timeRequired") + title: "OpportunityGraphQLField" = OpportunityGraphQLField("title") + year: "OpportunityGraphQLField" = OpportunityGraphQLField("year") + + @classmethod + def polygon(cls) -> "GeoJsonFeatureFields": + return GeoJsonFeatureFields("polygon") + + def fields( + self, + *subfields: Union[ + OpportunityGraphQLField, "EquipmentFields", "GeoJsonFeatureFields" + ] + ) -> "OpportunityFields": + """Subfields should come from the OpportunityFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "OpportunityFields": + self._alias = alias + return self + + +class OpportunityLocationFields(GraphQLField): + @classmethod + def coordinates(cls) -> "CoordinateFields": + return CoordinateFields("coordinates") + + m_rid: "OpportunityLocationGraphQLField" = OpportunityLocationGraphQLField("mRID") + + def fields( + self, *subfields: Union[OpportunityLocationGraphQLField, "CoordinateFields"] + ) -> "OpportunityLocationFields": + """Subfields should come from the OpportunityLocationFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "OpportunityLocationFields": + self._alias = alias + return self + + +class PowerFactoryModelFields(GraphQLField): + created_at: "PowerFactoryModelGraphQLField" = PowerFactoryModelGraphQLField( + "createdAt" + ) + errors: "PowerFactoryModelGraphQLField" = PowerFactoryModelGraphQLField("errors") + + @classmethod + def generation_spec(cls) -> "PowerFactoryModelGenerationSpecFields": + return PowerFactoryModelGenerationSpecFields("generationSpec") + + id: "PowerFactoryModelGraphQLField" = PowerFactoryModelGraphQLField("id") + is_public: "PowerFactoryModelGraphQLField" = PowerFactoryModelGraphQLField( + "isPublic" + ) + name: "PowerFactoryModelGraphQLField" = PowerFactoryModelGraphQLField("name") + state: "PowerFactoryModelGraphQLField" = PowerFactoryModelGraphQLField("state") + + def fields( + self, + *subfields: Union[ + PowerFactoryModelGraphQLField, "PowerFactoryModelGenerationSpecFields" + ] + ) -> "PowerFactoryModelFields": + """Subfields should come from the PowerFactoryModelFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "PowerFactoryModelFields": + self._alias = alias + return self + + +class PowerFactoryModelGenerationSpecFields(GraphQLField): + @classmethod + def distribution_transformer_config( + cls, + ) -> "GqlDistributionTransformerConfigFields": + return GqlDistributionTransformerConfigFields("distributionTransformerConfig") + + equipment_container_mrids: "PowerFactoryModelGenerationSpecGraphQLField" = ( + PowerFactoryModelGenerationSpecGraphQLField("equipmentContainerMrids") + ) + + @classmethod + def load_config(cls) -> "GqlLoadConfigFields": + return GqlLoadConfigFields("loadConfig") + + @classmethod + def scenario_config(cls) -> "GqlScenarioConfigFields": + return GqlScenarioConfigFields("scenarioConfig") + + def fields( + self, + *subfields: Union[ + PowerFactoryModelGenerationSpecGraphQLField, + "GqlDistributionTransformerConfigFields", + "GqlLoadConfigFields", + "GqlScenarioConfigFields", + ] + ) -> "PowerFactoryModelGenerationSpecFields": + """Subfields should come from the PowerFactoryModelGenerationSpecFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "PowerFactoryModelGenerationSpecFields": + self._alias = alias + return self + + +class PowerFactoryModelPageFields(GraphQLField): + offset: "PowerFactoryModelPageGraphQLField" = PowerFactoryModelPageGraphQLField( + "offset" + ) + + @classmethod + def power_factory_models(cls) -> "PowerFactoryModelFields": + return PowerFactoryModelFields("powerFactoryModels") + + total_count: "PowerFactoryModelPageGraphQLField" = ( + PowerFactoryModelPageGraphQLField("totalCount") + ) + + def fields( + self, + *subfields: Union[PowerFactoryModelPageGraphQLField, "PowerFactoryModelFields"] + ) -> "PowerFactoryModelPageFields": + """Subfields should come from the PowerFactoryModelPageFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "PowerFactoryModelPageFields": + self._alias = alias + return self + + +class PowerFactoryModelTemplateFields(GraphQLField): + created_at: "PowerFactoryModelTemplateGraphQLField" = ( + PowerFactoryModelTemplateGraphQLField("createdAt") + ) + + @classmethod + def generation_spec(cls) -> "PowerFactoryModelGenerationSpecFields": + return PowerFactoryModelGenerationSpecFields("generationSpec") + + id: "PowerFactoryModelTemplateGraphQLField" = PowerFactoryModelTemplateGraphQLField( + "id" + ) + is_public: "PowerFactoryModelTemplateGraphQLField" = ( + PowerFactoryModelTemplateGraphQLField("isPublic") + ) + name: "PowerFactoryModelTemplateGraphQLField" = ( + PowerFactoryModelTemplateGraphQLField("name") + ) + + def fields( + self, + *subfields: Union[ + PowerFactoryModelTemplateGraphQLField, + "PowerFactoryModelGenerationSpecFields", + ] + ) -> "PowerFactoryModelTemplateFields": + """Subfields should come from the PowerFactoryModelTemplateFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "PowerFactoryModelTemplateFields": + self._alias = alias + return self + + +class PowerFactoryModelTemplatePageFields(GraphQLField): + offset: "PowerFactoryModelTemplatePageGraphQLField" = ( + PowerFactoryModelTemplatePageGraphQLField("offset") + ) + + @classmethod + def templates(cls) -> "PowerFactoryModelTemplateFields": + return PowerFactoryModelTemplateFields("templates") + + total_count: "PowerFactoryModelTemplatePageGraphQLField" = ( + PowerFactoryModelTemplatePageGraphQLField("totalCount") + ) + + def fields( + self, + *subfields: Union[ + PowerFactoryModelTemplatePageGraphQLField, "PowerFactoryModelTemplateFields" + ] + ) -> "PowerFactoryModelTemplatePageFields": + """Subfields should come from the PowerFactoryModelTemplatePageFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "PowerFactoryModelTemplatePageFields": + self._alias = alias + return self + + +class ProcessedDiffFields(GraphQLField): + description: "ProcessedDiffGraphQLField" = ProcessedDiffGraphQLField("description") + diff_id: "ProcessedDiffGraphQLField" = ProcessedDiffGraphQLField("diffId") + feeder: "ProcessedDiffGraphQLField" = ProcessedDiffGraphQLField("feeder") + name: "ProcessedDiffGraphQLField" = ProcessedDiffGraphQLField("name") + scenario: "ProcessedDiffGraphQLField" = ProcessedDiffGraphQLField("scenario") + type_: "ProcessedDiffGraphQLField" = ProcessedDiffGraphQLField("type") + + @classmethod + def w_p_id_1(cls) -> "HcWorkPackageFields": + return HcWorkPackageFields("wPId1") + + @classmethod + def w_p_id_2(cls) -> "HcWorkPackageFields": + return HcWorkPackageFields("wPId2") + + year: "ProcessedDiffGraphQLField" = ProcessedDiffGraphQLField("year") + + def fields( + self, *subfields: Union[ProcessedDiffGraphQLField, "HcWorkPackageFields"] + ) -> "ProcessedDiffFields": + """Subfields should come from the ProcessedDiffFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "ProcessedDiffFields": + self._alias = alias + return self + + +class ProcessedDiffPageFields(GraphQLField): + offset: "ProcessedDiffPageGraphQLField" = ProcessedDiffPageGraphQLField("offset") + + @classmethod + def processed_diff(cls) -> "ProcessedDiffFields": + return ProcessedDiffFields("processedDiff") + + total_count: "ProcessedDiffPageGraphQLField" = ProcessedDiffPageGraphQLField( + "totalCount" + ) + + def fields( + self, *subfields: Union[ProcessedDiffPageGraphQLField, "ProcessedDiffFields"] + ) -> "ProcessedDiffPageFields": + """Subfields should come from the ProcessedDiffPageFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "ProcessedDiffPageFields": + self._alias = alias + return self + + +class RemoveAppOptionResultFields(GraphQLField): + name: "RemoveAppOptionResultGraphQLField" = RemoveAppOptionResultGraphQLField( + "name" + ) + removed: "RemoveAppOptionResultGraphQLField" = RemoveAppOptionResultGraphQLField( + "removed" + ) + + def fields( + self, *subfields: RemoveAppOptionResultGraphQLField + ) -> "RemoveAppOptionResultFields": + """Subfields should come from the RemoveAppOptionResultFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "RemoveAppOptionResultFields": + self._alias = alias + return self + + +class ScenarioConfigurationFields(GraphQLField): + bess_allocation_id: "ScenarioConfigurationGraphQLField" = ( + ScenarioConfigurationGraphQLField("bessAllocationId") + ) + bess_forecast_level: "ScenarioConfigurationGraphQLField" = ( + ScenarioConfigurationGraphQLField("bessForecastLevel") + ) + bess_forecasts_scenario: "ScenarioConfigurationGraphQLField" = ( + ScenarioConfigurationGraphQLField("bessForecastsScenario") + ) + demand_forecast_level: "ScenarioConfigurationGraphQLField" = ( + ScenarioConfigurationGraphQLField("demandForecastLevel") + ) + demand_forecast_poe: "ScenarioConfigurationGraphQLField" = ( + ScenarioConfigurationGraphQLField("demandForecastPoe") + ) + demand_forecasts_scenario: "ScenarioConfigurationGraphQLField" = ( + ScenarioConfigurationGraphQLField("demandForecastsScenario") + ) + ev_allocation_id: "ScenarioConfigurationGraphQLField" = ( + ScenarioConfigurationGraphQLField("evAllocationId") + ) + ev_forecast_level: "ScenarioConfigurationGraphQLField" = ( + ScenarioConfigurationGraphQLField("evForecastLevel") + ) + ev_forecasts_scenario: "ScenarioConfigurationGraphQLField" = ( + ScenarioConfigurationGraphQLField("evForecastsScenario") + ) + pv_allocation_id: "ScenarioConfigurationGraphQLField" = ( + ScenarioConfigurationGraphQLField("pvAllocationId") + ) + pv_forecast_level: "ScenarioConfigurationGraphQLField" = ( + ScenarioConfigurationGraphQLField("pvForecastLevel") + ) + pv_forecasts_scenario: "ScenarioConfigurationGraphQLField" = ( + ScenarioConfigurationGraphQLField("pvForecastsScenario") + ) + scenario_name: "ScenarioConfigurationGraphQLField" = ( + ScenarioConfigurationGraphQLField("scenarioName") + ) + id: "ScenarioConfigurationGraphQLField" = ScenarioConfigurationGraphQLField("id") + + def fields( + self, *subfields: ScenarioConfigurationGraphQLField + ) -> "ScenarioConfigurationFields": + """Subfields should come from the ScenarioConfigurationFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "ScenarioConfigurationFields": + self._alias = alias + return self + + +class SincalConfigFileFields(GraphQLField): + file_type: "SincalConfigFileGraphQLField" = SincalConfigFileGraphQLField("fileType") + original_filename: "SincalConfigFileGraphQLField" = SincalConfigFileGraphQLField( + "originalFilename" + ) + raw_filename: "SincalConfigFileGraphQLField" = SincalConfigFileGraphQLField( + "rawFilename" + ) + standard_name: "SincalConfigFileGraphQLField" = SincalConfigFileGraphQLField( + "standardName" + ) + + def fields( + self, *subfields: SincalConfigFileGraphQLField + ) -> "SincalConfigFileFields": + """Subfields should come from the SincalConfigFileFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "SincalConfigFileFields": + self._alias = alias + return self + + +class SincalGlobalInputsConfigFields(GraphQLField): + @classmethod + def backend_config(cls) -> "SincalConfigFileFields": + return SincalConfigFileFields("backendConfig") + + @classmethod + def frontend_config(cls) -> "SincalConfigFileFields": + return SincalConfigFileFields("frontendConfig") + + @classmethod + def in_feeder_mapping_database(cls) -> "SincalConfigFileFields": + return SincalConfigFileFields("inFeederMappingDatabase") + + @classmethod + def local_standard_database(cls) -> "SincalConfigFileFields": + return SincalConfigFileFields("localStandardDatabase") + + @classmethod + def protection_standard_database(cls) -> "SincalConfigFileFields": + return SincalConfigFileFields("protectionStandardDatabase") + + @classmethod + def template(cls) -> "SincalConfigFileFields": + return SincalConfigFileFields("template") + + def fields( + self, + *subfields: Union[ + SincalGlobalInputsConfigGraphQLField, "SincalConfigFileFields" + ] + ) -> "SincalGlobalInputsConfigFields": + """Subfields should come from the SincalGlobalInputsConfigFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "SincalGlobalInputsConfigFields": + self._alias = alias + return self + + +class SincalModelFields(GraphQLField): + created_at: "SincalModelGraphQLField" = SincalModelGraphQLField("createdAt") + created_by: "SincalModelGraphQLField" = SincalModelGraphQLField("createdBy") + errors: "SincalModelGraphQLField" = SincalModelGraphQLField("errors") + + @classmethod + def generation_spec(cls) -> "SincalModelGenerationSpecFields": + return SincalModelGenerationSpecFields("generationSpec") + + id: "SincalModelGraphQLField" = SincalModelGraphQLField("id") + is_public: "SincalModelGraphQLField" = SincalModelGraphQLField("isPublic") + name: "SincalModelGraphQLField" = SincalModelGraphQLField("name") + state: "SincalModelGraphQLField" = SincalModelGraphQLField("state") + + def fields( + self, + *subfields: Union[SincalModelGraphQLField, "SincalModelGenerationSpecFields"] + ) -> "SincalModelFields": + """Subfields should come from the SincalModelFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "SincalModelFields": + self._alias = alias + return self + + +class SincalModelGenerationSpecFields(GraphQLField): + config: "SincalModelGenerationSpecGraphQLField" = ( + SincalModelGenerationSpecGraphQLField("config") + ) + equipment_container_mrids: "SincalModelGenerationSpecGraphQLField" = ( + SincalModelGenerationSpecGraphQLField("equipmentContainerMrids") + ) + + def fields( + self, *subfields: SincalModelGenerationSpecGraphQLField + ) -> "SincalModelGenerationSpecFields": + """Subfields should come from the SincalModelGenerationSpecFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "SincalModelGenerationSpecFields": + self._alias = alias + return self + + +class SincalModelPageFields(GraphQLField): + offset: "SincalModelPageGraphQLField" = SincalModelPageGraphQLField("offset") + + @classmethod + def sincal_models(cls) -> "SincalModelFields": + return SincalModelFields("sincalModels") + + total_count: "SincalModelPageGraphQLField" = SincalModelPageGraphQLField( + "totalCount" + ) + + def fields( + self, *subfields: Union[SincalModelPageGraphQLField, "SincalModelFields"] + ) -> "SincalModelPageFields": + """Subfields should come from the SincalModelPageFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "SincalModelPageFields": + self._alias = alias + return self + + +class SincalModelPresetFields(GraphQLField): + created_at: "SincalModelPresetGraphQLField" = SincalModelPresetGraphQLField( + "createdAt" + ) + created_by: "SincalModelPresetGraphQLField" = SincalModelPresetGraphQLField( + "createdBy" + ) + + @classmethod + def generation_spec(cls) -> "SincalModelGenerationSpecFields": + return SincalModelGenerationSpecFields("generationSpec") + + id: "SincalModelPresetGraphQLField" = SincalModelPresetGraphQLField("id") + is_public: "SincalModelPresetGraphQLField" = SincalModelPresetGraphQLField( + "isPublic" + ) + name: "SincalModelPresetGraphQLField" = SincalModelPresetGraphQLField("name") + + def fields( + self, + *subfields: Union[ + SincalModelPresetGraphQLField, "SincalModelGenerationSpecFields" + ] + ) -> "SincalModelPresetFields": + """Subfields should come from the SincalModelPresetFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "SincalModelPresetFields": + self._alias = alias + return self + + +class SincalModelPresetPageFields(GraphQLField): + offset: "SincalModelPresetPageGraphQLField" = SincalModelPresetPageGraphQLField( + "offset" + ) + + @classmethod + def presets(cls) -> "SincalModelPresetFields": + return SincalModelPresetFields("presets") + + total_count: "SincalModelPresetPageGraphQLField" = ( + SincalModelPresetPageGraphQLField("totalCount") + ) + + def fields( + self, + *subfields: Union[SincalModelPresetPageGraphQLField, "SincalModelPresetFields"] + ) -> "SincalModelPresetPageFields": + """Subfields should come from the SincalModelPresetPageFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "SincalModelPresetPageFields": + self._alias = alias + return self + + +class StateOverlayFields(GraphQLField): + styles: "StateOverlayGraphQLField" = StateOverlayGraphQLField("styles") + id: "StateOverlayGraphQLField" = StateOverlayGraphQLField("id") + data: "StateOverlayGraphQLField" = StateOverlayGraphQLField("data") + + def fields(self, *subfields: StateOverlayGraphQLField) -> "StateOverlayFields": + """Subfields should come from the StateOverlayFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "StateOverlayFields": + self._alias = alias + return self + + +class StudyFields(GraphQLField): + created_at: "StudyGraphQLField" = StudyGraphQLField("createdAt") + name: "StudyGraphQLField" = StudyGraphQLField("name") + tags: "StudyGraphQLField" = StudyGraphQLField("tags") + id: "StudyGraphQLField" = StudyGraphQLField("id") + + @classmethod + def created_by(cls) -> "GqlUserFields": + return GqlUserFields("createdBy") + + description: "StudyGraphQLField" = StudyGraphQLField("description") + + @classmethod + def results(cls) -> "StudyResultFields": + return StudyResultFields("results") + + styles: "StudyGraphQLField" = StudyGraphQLField("styles") + + def fields( + self, *subfields: Union[StudyGraphQLField, "GqlUserFields", "StudyResultFields"] + ) -> "StudyFields": + """Subfields should come from the StudyFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "StudyFields": + self._alias = alias + return self + + +class StudyPageFields(GraphQLField): + all_tags: "StudyPageGraphQLField" = StudyPageGraphQLField("allTags") + all_users: "StudyPageGraphQLField" = StudyPageGraphQLField("allUsers") + offset: "StudyPageGraphQLField" = StudyPageGraphQLField("offset") + + @classmethod + def studies(cls) -> "StudyFields": + return StudyFields("studies") + + total_count: "StudyPageGraphQLField" = StudyPageGraphQLField("totalCount") + + def fields( + self, *subfields: Union[StudyPageGraphQLField, "StudyFields"] + ) -> "StudyPageFields": + """Subfields should come from the StudyPageFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "StudyPageFields": + self._alias = alias + return self + + +class StudyResultFields(GraphQLField): + name: "StudyResultGraphQLField" = StudyResultGraphQLField("name") + id: "StudyResultGraphQLField" = StudyResultGraphQLField("id") + + @classmethod + def geo_json_overlay(cls) -> "GeoJsonOverlayFields": + return GeoJsonOverlayFields("geoJsonOverlay") + + @classmethod + def sections(cls) -> "ResultSectionInterface": + return ResultSectionInterface("sections") + + @classmethod + def state_overlay(cls) -> "StateOverlayFields": + return StateOverlayFields("stateOverlay") + + def fields( + self, + *subfields: Union[ + StudyResultGraphQLField, + "GeoJsonOverlayFields", + "ResultSectionInterface", + "StateOverlayFields", + ] + ) -> "StudyResultFields": + """Subfields should come from the StudyResultFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "StudyResultFields": + self._alias = alias + return self + + +class UploadUrlResponseFields(GraphQLField): + file_path: "UploadUrlResponseGraphQLField" = UploadUrlResponseGraphQLField( + "filePath" + ) + upload_url: "UploadUrlResponseGraphQLField" = UploadUrlResponseGraphQLField( + "uploadUrl" + ) + + def fields( + self, *subfields: UploadUrlResponseGraphQLField + ) -> "UploadUrlResponseFields": + """Subfields should come from the UploadUrlResponseFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "UploadUrlResponseFields": + self._alias = alias + return self + + +class UserCustomerListColumnConfigFields(GraphQLField): + @classmethod + def columns(cls) -> "CustomerListColumnConfigFields": + return CustomerListColumnConfigFields("columns") + + def fields( + self, + *subfields: Union[ + UserCustomerListColumnConfigGraphQLField, "CustomerListColumnConfigFields" + ] + ) -> "UserCustomerListColumnConfigFields": + """Subfields should come from the UserCustomerListColumnConfigFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "UserCustomerListColumnConfigFields": + self._alias = alias + return self + + +class VariantFields(GraphQLField): + conducting_equipment_count: "VariantGraphQLField" = VariantGraphQLField( + "conductingEquipmentCount" + ) + m_rid: "VariantGraphQLField" = VariantGraphQLField("mRID") + name: "VariantGraphQLField" = VariantGraphQLField("name") + network_database_location: "VariantGraphQLField" = VariantGraphQLField( + "networkDatabaseLocation" + ) + new_parent: "VariantGraphQLField" = VariantGraphQLField("newParent") + new_variant: "VariantGraphQLField" = VariantGraphQLField("newVariant") + parent: "VariantGraphQLField" = VariantGraphQLField("parent") + parent_mrid: "VariantGraphQLField" = VariantGraphQLField("parentMRID") + status: "VariantGraphQLField" = VariantGraphQLField("status") + + def fields(self, *subfields: VariantGraphQLField) -> "VariantFields": + """Subfields should come from the VariantFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "VariantFields": + self._alias = alias + return self + + +class VariantWorkPackageFields(GraphQLField): + status: "VariantWorkPackageGraphQLField" = VariantWorkPackageGraphQLField("status") + + @classmethod + def variants(cls) -> "VariantFields": + return VariantFields("variants") + + def fields( + self, *subfields: Union[VariantWorkPackageGraphQLField, "VariantFields"] + ) -> "VariantWorkPackageFields": + """Subfields should come from the VariantWorkPackageFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "VariantWorkPackageFields": + self._alias = alias + return self + + +class WorkPackageModelGroupingsFields(GraphQLField): + @classmethod + def cancelled(cls) -> "HcModelFields": + return HcModelFields("cancelled") + + @classmethod + def execution(cls) -> "HcModelFields": + return HcModelFields("execution") + + @classmethod + def failed(cls) -> "HcModelFields": + return HcModelFields("failed") + + @classmethod + def generation(cls) -> "HcModelFields": + return HcModelFields("generation") + + @classmethod + def pending(cls) -> "HcModelFields": + return HcModelFields("pending") + + @classmethod + def result_processing(cls) -> "HcModelFields": + return HcModelFields("resultProcessing") + + @classmethod + def succeeded(cls) -> "HcModelFields": + return HcModelFields("succeeded") + + @classmethod + def timed_out(cls) -> "HcModelFields": + return HcModelFields("timedOut") + + def fields( + self, *subfields: Union[WorkPackageModelGroupingsGraphQLField, "HcModelFields"] + ) -> "WorkPackageModelGroupingsFields": + """Subfields should come from the WorkPackageModelGroupingsFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "WorkPackageModelGroupingsFields": + self._alias = alias + return self + + +class WorkPackageModelTotalsFields(GraphQLField): + total_cancelled: "WorkPackageModelTotalsGraphQLField" = ( + WorkPackageModelTotalsGraphQLField("totalCancelled") + ) + total_failed: "WorkPackageModelTotalsGraphQLField" = ( + WorkPackageModelTotalsGraphQLField("totalFailed") + ) + total_models: "WorkPackageModelTotalsGraphQLField" = ( + WorkPackageModelTotalsGraphQLField("totalModels") + ) + total_pending: "WorkPackageModelTotalsGraphQLField" = ( + WorkPackageModelTotalsGraphQLField("totalPending") + ) + total_running: "WorkPackageModelTotalsGraphQLField" = ( + WorkPackageModelTotalsGraphQLField("totalRunning") + ) + total_succeeded: "WorkPackageModelTotalsGraphQLField" = ( + WorkPackageModelTotalsGraphQLField("totalSucceeded") + ) + total_timed_out: "WorkPackageModelTotalsGraphQLField" = ( + WorkPackageModelTotalsGraphQLField("totalTimedOut") + ) + + def fields( + self, *subfields: WorkPackageModelTotalsGraphQLField + ) -> "WorkPackageModelTotalsFields": + """Subfields should come from the WorkPackageModelTotalsFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "WorkPackageModelTotalsFields": + self._alias = alias + return self + + +class WorkPackageProgressDetailsFields(GraphQLField): + @classmethod + def model_groupings(cls) -> "WorkPackageModelGroupingsFields": + return WorkPackageModelGroupingsFields("modelGroupings") + + @classmethod + def model_totals(cls) -> "WorkPackageModelTotalsFields": + return WorkPackageModelTotalsFields("modelTotals") + + def fields( + self, + *subfields: Union[ + WorkPackageProgressDetailsGraphQLField, + "WorkPackageModelGroupingsFields", + "WorkPackageModelTotalsFields", + ] + ) -> "WorkPackageProgressDetailsFields": + """Subfields should come from the WorkPackageProgressDetailsFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "WorkPackageProgressDetailsFields": + self._alias = alias + return self + + +class WorkPackageTreeFields(GraphQLField): + @classmethod + def ancestors(cls) -> "HcWorkPackageFields": + return HcWorkPackageFields("ancestors") + + @classmethod + def children(cls) -> "HcWorkPackageFields": + return HcWorkPackageFields("children") + + def fields( + self, *subfields: Union[WorkPackageTreeGraphQLField, "HcWorkPackageFields"] + ) -> "WorkPackageTreeFields": + """Subfields should come from the WorkPackageTreeFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "WorkPackageTreeFields": + self._alias = alias + return self diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py b/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py new file mode 100644 index 0000000..54985d6 --- /dev/null +++ b/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py @@ -0,0 +1,491 @@ +# Generated by ariadne-codegen + +from typing import Any, Optional + +from . import SincalFileType, VariantFileType +from .custom_fields import ( + DiffResultFields, + RemoveAppOptionResultFields, + UserCustomerListColumnConfigFields, +) +from .custom_typing_fields import GraphQLField +from .input_types import ( + AppOptionsInput, + FeederLoadAnalysisInput, + HcGeneratorConfigInput, + IngestorConfigInput, + OpenDssModelInput, + PowerFactoryModelGenerationSpecInput, + PowerFactoryModelInput, + SincalModelGenerationSpecInput, + SincalModelInput, + StudyInput, + WorkPackageInput, +) + + +class Mutation: + @classmethod + def add_studies(cls, studies: list[StudyInput]) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "studies": {"type": "[StudyInput!]!", "value": studies} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField(field_name="addStudies", arguments=cleared_arguments) + + @classmethod + def delete_studies(cls, ids: list[str]) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = {"ids": {"type": "[ID!]!", "value": ids}} + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField(field_name="deleteStudies", arguments=cleared_arguments) + + @classmethod + def create_power_factory_model(cls, input: PowerFactoryModelInput) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "input": {"type": "PowerFactoryModelInput!", "value": input} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="createPowerFactoryModel", arguments=cleared_arguments + ) + + @classmethod + def create_power_factory_model_template( + cls, input: PowerFactoryModelInput + ) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "input": {"type": "PowerFactoryModelInput!", "value": input} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="createPowerFactoryModelTemplate", arguments=cleared_arguments + ) + + @classmethod + def delete_power_factory_model(cls, model_id: str) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "modelId": {"type": "ID!", "value": model_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="deletePowerFactoryModel", arguments=cleared_arguments + ) + + @classmethod + def delete_power_factory_model_template(cls, template_id: str) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "templateId": {"type": "ID!", "value": template_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="deletePowerFactoryModelTemplate", arguments=cleared_arguments + ) + + @classmethod + def update_power_factory_model_template( + cls, template_id: str, generation_spec: PowerFactoryModelGenerationSpecInput + ) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "templateId": {"type": "ID!", "value": template_id}, + "generationSpec": { + "type": "PowerFactoryModelGenerationSpecInput!", + "value": generation_spec, + }, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="updatePowerFactoryModelTemplate", arguments=cleared_arguments + ) + + @classmethod + def cancel_work_package(cls, work_package_id: str) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "workPackageId": {"type": "ID!", "value": work_package_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField(field_name="cancelWorkPackage", arguments=cleared_arguments) + + @classmethod + def delete_work_package(cls, work_package_ids: list[str]) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "workPackageIds": {"type": "[String!]!", "value": work_package_ids} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField(field_name="deleteWorkPackage", arguments=cleared_arguments) + + @classmethod + def edit_diff_package( + cls, + diff_id: str, + *, + name: Optional[str] = None, + description: Optional[str] = None + ) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "diffId": {"type": "ID!", "value": diff_id}, + "name": {"type": "String", "value": name}, + "description": {"type": "String", "value": description}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField(field_name="editDiffPackage", arguments=cleared_arguments) + + @classmethod + def edit_work_package( + cls, + work_package_id: str, + *, + name: Optional[str] = None, + description: Optional[str] = None + ) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "workPackageId": {"type": "ID!", "value": work_package_id}, + "name": {"type": "String", "value": name}, + "description": {"type": "String", "value": description}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField(field_name="editWorkPackage", arguments=cleared_arguments) + + @classmethod + def generate_enhanced_network_performance_diff( + cls, + diff_id: str, + work_package_id_1: str, + work_package_id_2: str, + *, + diff_name: Optional[str] = None, + scenario: Optional[str] = None, + feeder: Optional[str] = None, + year: Optional[int] = None, + season: Optional[str] = None, + time_of_day: Optional[str] = None + ) -> DiffResultFields: + arguments: dict[str, dict[str, Any]] = { + "diffId": {"type": "ID!", "value": diff_id}, + "diffName": {"type": "String", "value": diff_name}, + "workPackageId1": {"type": "ID!", "value": work_package_id_1}, + "workPackageId2": {"type": "ID!", "value": work_package_id_2}, + "scenario": {"type": "String", "value": scenario}, + "feeder": {"type": "String", "value": feeder}, + "year": {"type": "Int", "value": year}, + "season": {"type": "String", "value": season}, + "timeOfDay": {"type": "String", "value": time_of_day}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return DiffResultFields( + field_name="generateEnhancedNetworkPerformanceDiff", + arguments=cleared_arguments, + ) + + @classmethod + def generate_network_performance_diff( + cls, + diff_id: str, + work_package_id_1: str, + work_package_id_2: str, + *, + diff_name: Optional[str] = None, + scenario: Optional[str] = None, + feeder: Optional[str] = None, + year: Optional[int] = None + ) -> DiffResultFields: + arguments: dict[str, dict[str, Any]] = { + "diffId": {"type": "ID!", "value": diff_id}, + "diffName": {"type": "String", "value": diff_name}, + "workPackageId1": {"type": "ID!", "value": work_package_id_1}, + "workPackageId2": {"type": "ID!", "value": work_package_id_2}, + "scenario": {"type": "String", "value": scenario}, + "feeder": {"type": "String", "value": feeder}, + "year": {"type": "Int", "value": year}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return DiffResultFields( + field_name="generateNetworkPerformanceDiff", arguments=cleared_arguments + ) + + @classmethod + def process_input_database(cls, file_path: str) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "filePath": {"type": "String!", "value": file_path} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="processInputDatabase", arguments=cleared_arguments + ) + + @classmethod + def run_calibration( + cls, + calibration_name: str, + *, + calibration_time_local: Optional[Any] = None, + feeders: Optional[list[str]] = None, + generator_config: Optional[HcGeneratorConfigInput] = None + ) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "calibrationName": {"type": "String!", "value": calibration_name}, + "calibrationTimeLocal": { + "type": "LocalDateTime", + "value": calibration_time_local, + }, + "feeders": {"type": "[String!]", "value": feeders}, + "generatorConfig": { + "type": "HcGeneratorConfigInput", + "value": generator_config, + }, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField(field_name="runCalibration", arguments=cleared_arguments) + + @classmethod + def run_work_package( + cls, input: WorkPackageInput, work_package_name: str + ) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "input": {"type": "WorkPackageInput!", "value": input}, + "workPackageName": {"type": "String!", "value": work_package_name}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField(field_name="runWorkPackage", arguments=cleared_arguments) + + @classmethod + def lock_network_model_database(cls) -> GraphQLField: + return GraphQLField(field_name="lockNetworkModelDatabase") + + @classmethod + def switch_network_model_database(cls, date: str) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "date": {"type": "String!", "value": date} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="switchNetworkModelDatabase", arguments=cleared_arguments + ) + + @classmethod + def unlock_network_model_database(cls) -> GraphQLField: + return GraphQLField(field_name="unlockNetworkModelDatabase") + + @classmethod + def create_sincal_model(cls, input: SincalModelInput) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "input": {"type": "SincalModelInput!", "value": input} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField(field_name="createSincalModel", arguments=cleared_arguments) + + @classmethod + def create_sincal_model_preset(cls, input: SincalModelInput) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "input": {"type": "SincalModelInput!", "value": input} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="createSincalModelPreset", arguments=cleared_arguments + ) + + @classmethod + def delete_sincal_model(cls, model_id: str) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "modelId": {"type": "ID!", "value": model_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField(field_name="deleteSincalModel", arguments=cleared_arguments) + + @classmethod + def delete_sincal_model_preset(cls, preset_id: str) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "presetId": {"type": "ID!", "value": preset_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="deleteSincalModelPreset", arguments=cleared_arguments + ) + + @classmethod + def update_sincal_model_config_file_path( + cls, file_path: str, file_type: SincalFileType + ) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "filePath": {"type": "String!", "value": file_path}, + "fileType": {"type": "SincalFileType!", "value": file_type}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="updateSincalModelConfigFilePath", arguments=cleared_arguments + ) + + @classmethod + def update_sincal_model_preset( + cls, preset_id: str, generation_spec: SincalModelGenerationSpecInput + ) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "presetId": {"type": "ID!", "value": preset_id}, + "generationSpec": { + "type": "SincalModelGenerationSpecInput!", + "value": generation_spec, + }, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="updateSincalModelPreset", arguments=cleared_arguments + ) + + @classmethod + def execute_ingestor( + cls, *, run_config: Optional[list[IngestorConfigInput]] = None + ) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "runConfig": {"type": "[IngestorConfigInput!]", "value": run_config} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField(field_name="executeIngestor", arguments=cleared_arguments) + + @classmethod + def run_feeder_load_analysis(cls, input: FeederLoadAnalysisInput) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "input": {"type": "FeederLoadAnalysisInput!", "value": input} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="runFeederLoadAnalysis", arguments=cleared_arguments + ) + + @classmethod + def create_open_dss_model(cls, input: OpenDssModelInput) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "input": {"type": "OpenDssModelInput!", "value": input} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="createOpenDssModel", arguments=cleared_arguments + ) + + @classmethod + def delete_open_dss_model(cls, model_id: str) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "modelId": {"type": "ID!", "value": model_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="deleteOpenDssModel", arguments=cleared_arguments + ) + + @classmethod + def save_user_customer_list_column_config( + cls, columns: list[str] + ) -> UserCustomerListColumnConfigFields: + arguments: dict[str, dict[str, Any]] = { + "columns": {"type": "[String!]!", "value": columns} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return UserCustomerListColumnConfigFields( + field_name="saveUserCustomerListColumnConfig", arguments=cleared_arguments + ) + + @classmethod + def clear_app_option(cls, name: str) -> RemoveAppOptionResultFields: + arguments: dict[str, dict[str, Any]] = { + "name": {"type": "String!", "value": name} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return RemoveAppOptionResultFields( + field_name="clearAppOption", arguments=cleared_arguments + ) + + @classmethod + def set_app_option(cls, app_options: AppOptionsInput) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "appOptions": {"type": "AppOptionsInput!", "value": app_options} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField(field_name="setAppOption", arguments=cleared_arguments) + + @classmethod + def finalize_variant_processing( + cls, variant_upload_id: str, submitted_variants: list[str] + ) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "variantUploadId": {"type": "String!", "value": variant_upload_id}, + "submittedVariants": {"type": "[String!]!", "value": submitted_variants}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="finalizeVariantProcessing", arguments=cleared_arguments + ) + + @classmethod + def start_variant_processing( + cls, prefix: str, file_type: VariantFileType + ) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "prefix": {"type": "String!", "value": prefix}, + "fileType": {"type": "VariantFileType!", "value": file_type}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="startVariantProcessing", arguments=cleared_arguments + ) diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_queries.py b/src/zepben/eas/lib/generated_graphql_client/custom_queries.py new file mode 100644 index 0000000..b760ef3 --- /dev/null +++ b/src/zepben/eas/lib/generated_graphql_client/custom_queries.py @@ -0,0 +1,855 @@ +# Generated by ariadne-codegen + +from typing import Any, Optional + +from . import HostingCapacityFileType, WorkflowStatus, ContainerType, SincalFileType, VariantFileType +from .custom_fields import ( + AppOptionsFields, + CustomerDetailsResponseFields, + DurationCurveByTerminalFields, + FeederLoadAnalysisReportFields, + GqlTxTapRecordFields, + GqlUserFields, + GqlUserResponseFields, + HcCalibrationFields, + HcScenarioConfigsPageFields, + HcWorkPackageFields, + HcWorkPackagePageFields, + IngestionJobFields, + IngestionRunFields, + IngestorRunPageFields, + JobSourceFields, + MachineUserFields, + MetricFields, + NetworkModelsFields, + OpenDssModelPageFields, + OpportunitiesByYearFields, + OpportunityFields, + OpportunityLocationFields, + PowerFactoryModelFields, + PowerFactoryModelPageFields, + PowerFactoryModelTemplateFields, + PowerFactoryModelTemplatePageFields, + ProcessedDiffFields, + ProcessedDiffPageFields, + SincalGlobalInputsConfigFields, + SincalModelFields, + SincalModelPageFields, + SincalModelPresetFields, + SincalModelPresetPageFields, + StudyFields, + StudyPageFields, + StudyResultFields, + UploadUrlResponseFields, + UserCustomerListColumnConfigFields, + VariantWorkPackageFields, + WorkPackageTreeFields, +) +from .custom_typing_fields import GraphQLField +from .input_types import ( + GetOpenDssModelsFilterInput, + GetOpenDssModelsSortCriteriaInput, + GetPowerFactoryModelsFilterInput, + GetPowerFactoryModelsSortCriteriaInput, + GetPowerFactoryModelTemplatesFilterInput, + GetPowerFactoryModelTemplatesSortCriteriaInput, + GetSincalModelPresetsFilterInput, + GetSincalModelPresetsSortCriteriaInput, + GetSincalModelsFilterInput, + GetSincalModelsSortCriteriaInput, + GetStudiesFilterInput, + GetStudiesSortCriteriaInput, + HcScenarioConfigsFilterInput, + HcWorkPackagesFilterInput, + HcWorkPackagesSortCriteriaInput, + IngestorRunsFilterInput, + IngestorRunsSortCriteriaInput, + ProcessedDiffFilterInput, + ProcessedDiffSortCriteriaInput, + WorkPackageInput, +) + + +class Query: + @classmethod + def paged_studies( + cls, + *, + limit: Optional[int] = None, + offset: Optional[Any] = None, + filter_: Optional[GetStudiesFilterInput] = None, + sort: Optional[GetStudiesSortCriteriaInput] = None + ) -> StudyPageFields: + arguments: dict[str, dict[str, Any]] = { + "limit": {"type": "Int", "value": limit}, + "offset": {"type": "Long", "value": offset}, + "filter": {"type": "GetStudiesFilterInput", "value": filter_}, + "sort": {"type": "GetStudiesSortCriteriaInput", "value": sort}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return StudyPageFields(field_name="pagedStudies", arguments=cleared_arguments) + + @classmethod + def results_by_id(cls, ids: list[str]) -> StudyResultFields: + arguments: dict[str, dict[str, Any]] = {"ids": {"type": "[ID!]!", "value": ids}} + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return StudyResultFields(field_name="resultsById", arguments=cleared_arguments) + + @classmethod + def studies(cls, *, filter_: Optional[GetStudiesFilterInput] = None) -> StudyFields: + arguments: dict[str, dict[str, Any]] = { + "filter": {"type": "GetStudiesFilterInput", "value": filter_} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return StudyFields(field_name="studies", arguments=cleared_arguments) + + @classmethod + def studies_by_id(cls, ids: list[str]) -> StudyFields: + arguments: dict[str, dict[str, Any]] = {"ids": {"type": "[ID!]!", "value": ids}} + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return StudyFields(field_name="studiesById", arguments=cleared_arguments) + + @classmethod + def styles_by_id(cls, ids: list[str]) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = {"ids": {"type": "[ID!]!", "value": ids}} + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField(field_name="stylesById", arguments=cleared_arguments) + + @classmethod + def current_user(cls) -> GqlUserResponseFields: + return GqlUserResponseFields(field_name="currentUser") + + @classmethod + def paged_power_factory_model_templates( + cls, + *, + limit: Optional[int] = None, + offset: Optional[Any] = None, + filter_: Optional[GetPowerFactoryModelTemplatesFilterInput] = None, + sort: Optional[GetPowerFactoryModelTemplatesSortCriteriaInput] = None + ) -> PowerFactoryModelTemplatePageFields: + arguments: dict[str, dict[str, Any]] = { + "limit": {"type": "Int", "value": limit}, + "offset": {"type": "Long", "value": offset}, + "filter": { + "type": "GetPowerFactoryModelTemplatesFilterInput", + "value": filter_, + }, + "sort": { + "type": "GetPowerFactoryModelTemplatesSortCriteriaInput", + "value": sort, + }, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return PowerFactoryModelTemplatePageFields( + field_name="pagedPowerFactoryModelTemplates", arguments=cleared_arguments + ) + + @classmethod + def paged_power_factory_models( + cls, + *, + limit: Optional[int] = None, + offset: Optional[Any] = None, + filter_: Optional[GetPowerFactoryModelsFilterInput] = None, + sort: Optional[GetPowerFactoryModelsSortCriteriaInput] = None + ) -> PowerFactoryModelPageFields: + arguments: dict[str, dict[str, Any]] = { + "limit": {"type": "Int", "value": limit}, + "offset": {"type": "Long", "value": offset}, + "filter": {"type": "GetPowerFactoryModelsFilterInput", "value": filter_}, + "sort": {"type": "GetPowerFactoryModelsSortCriteriaInput", "value": sort}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return PowerFactoryModelPageFields( + field_name="pagedPowerFactoryModels", arguments=cleared_arguments + ) + + @classmethod + def power_factory_model_by_id(cls, model_id: str) -> PowerFactoryModelFields: + arguments: dict[str, dict[str, Any]] = { + "modelId": {"type": "ID!", "value": model_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return PowerFactoryModelFields( + field_name="powerFactoryModelById", arguments=cleared_arguments + ) + + @classmethod + def power_factory_model_template_by_id( + cls, template_id: str + ) -> PowerFactoryModelTemplateFields: + arguments: dict[str, dict[str, Any]] = { + "templateId": {"type": "ID!", "value": template_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return PowerFactoryModelTemplateFields( + field_name="powerFactoryModelTemplateById", arguments=cleared_arguments + ) + + @classmethod + def power_factory_model_templates_by_ids( + cls, template_ids: list[str] + ) -> PowerFactoryModelTemplateFields: + arguments: dict[str, dict[str, Any]] = { + "templateIds": {"type": "[ID!]!", "value": template_ids} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return PowerFactoryModelTemplateFields( + field_name="powerFactoryModelTemplatesByIds", arguments=cleared_arguments + ) + + @classmethod + def power_factory_models_by_ids( + cls, model_ids: list[str] + ) -> PowerFactoryModelFields: + arguments: dict[str, dict[str, Any]] = { + "modelIds": {"type": "[ID!]!", "value": model_ids} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return PowerFactoryModelFields( + field_name="powerFactoryModelsByIds", arguments=cleared_arguments + ) + + @classmethod + def get_active_work_packages(cls) -> GraphQLField: + return GraphQLField(field_name="getActiveWorkPackages") + + @classmethod + def get_all_work_packages_authors(cls) -> GqlUserFields: + return GqlUserFields(field_name="getAllWorkPackagesAuthors") + + @classmethod + def get_calibration_run(cls, id: str) -> HcCalibrationFields: + arguments: dict[str, dict[str, Any]] = {"id": {"type": "ID!", "value": id}} + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return HcCalibrationFields( + field_name="getCalibrationRun", arguments=cleared_arguments + ) + + @classmethod + def get_calibration_sets(cls) -> GraphQLField: + return GraphQLField(field_name="getCalibrationSets") + + @classmethod + def get_duration_curves( + cls, + work_package_id: str, + scenario: str, + feeder: str, + year: int, + conducting_equipment_mrid: str, + ) -> DurationCurveByTerminalFields: + arguments: dict[str, dict[str, Any]] = { + "workPackageId": {"type": "String!", "value": work_package_id}, + "scenario": {"type": "String!", "value": scenario}, + "feeder": {"type": "String!", "value": feeder}, + "year": {"type": "Int!", "value": year}, + "conductingEquipmentMrid": { + "type": "String!", + "value": conducting_equipment_mrid, + }, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return DurationCurveByTerminalFields( + field_name="getDurationCurves", arguments=cleared_arguments + ) + + @classmethod + def get_opportunities( + cls, *, year: Optional[int] = None + ) -> OpportunitiesByYearFields: + arguments: dict[str, dict[str, Any]] = {"year": {"type": "Int", "value": year}} + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return OpportunitiesByYearFields( + field_name="getOpportunities", arguments=cleared_arguments + ) + + @classmethod + def get_opportunities_for_equipment(cls, m_rid: str) -> OpportunityFields: + arguments: dict[str, dict[str, Any]] = { + "mRID": {"type": "String!", "value": m_rid} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return OpportunityFields( + field_name="getOpportunitiesForEquipment", arguments=cleared_arguments + ) + + @classmethod + def get_opportunity(cls, id: str) -> OpportunityFields: + arguments: dict[str, dict[str, Any]] = {"id": {"type": "String!", "value": id}} + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return OpportunityFields( + field_name="getOpportunity", arguments=cleared_arguments + ) + + @classmethod + def get_opportunity_locations( + cls, *, year: Optional[int] = None + ) -> OpportunityLocationFields: + arguments: dict[str, dict[str, Any]] = {"year": {"type": "Int", "value": year}} + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return OpportunityLocationFields( + field_name="getOpportunityLocations", arguments=cleared_arguments + ) + + @classmethod + def get_scenario_configurations( + cls, + *, + limit: Optional[int] = None, + offset: Optional[Any] = None, + filter_: Optional[HcScenarioConfigsFilterInput] = None + ) -> HcScenarioConfigsPageFields: + arguments: dict[str, dict[str, Any]] = { + "limit": {"type": "Int", "value": limit}, + "offset": {"type": "Long", "value": offset}, + "filter": {"type": "HcScenarioConfigsFilterInput", "value": filter_}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return HcScenarioConfigsPageFields( + field_name="getScenarioConfigurations", arguments=cleared_arguments + ) + + @classmethod + def get_transformer_tap_settings( + cls, + calibration_name: str, + *, + feeder: Optional[str] = None, + transformer_mrid: Optional[str] = None + ) -> GqlTxTapRecordFields: + arguments: dict[str, dict[str, Any]] = { + "calibrationName": {"type": "String!", "value": calibration_name}, + "feeder": {"type": "String", "value": feeder}, + "transformerMrid": {"type": "String", "value": transformer_mrid}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GqlTxTapRecordFields( + field_name="getTransformerTapSettings", arguments=cleared_arguments + ) + + @classmethod + def get_work_package_by_id( + cls, id: str, *, with_groupings: Optional[bool] = None + ) -> HcWorkPackageFields: + arguments: dict[str, dict[str, Any]] = { + "id": {"type": "ID!", "value": id}, + "withGroupings": {"type": "Boolean", "value": with_groupings}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return HcWorkPackageFields( + field_name="getWorkPackageById", arguments=cleared_arguments + ) + + @classmethod + def get_work_package_cost_estimation(cls, input: WorkPackageInput) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "input": {"type": "WorkPackageInput!", "value": input} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="getWorkPackageCostEstimation", arguments=cleared_arguments + ) + + @classmethod + def get_work_package_tree(cls, id: str) -> WorkPackageTreeFields: + arguments: dict[str, dict[str, Any]] = {"id": {"type": "ID!", "value": id}} + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return WorkPackageTreeFields( + field_name="getWorkPackageTree", arguments=cleared_arguments + ) + + @classmethod + def get_work_packages( + cls, + *, + limit: Optional[int] = None, + offset: Optional[Any] = None, + filter_: Optional[HcWorkPackagesFilterInput] = None, + sort: Optional[HcWorkPackagesSortCriteriaInput] = None, + with_groupings: Optional[bool] = None + ) -> HcWorkPackagePageFields: + arguments: dict[str, dict[str, Any]] = { + "limit": {"type": "Int", "value": limit}, + "offset": {"type": "Long", "value": offset}, + "filter": {"type": "HcWorkPackagesFilterInput", "value": filter_}, + "sort": {"type": "HcWorkPackagesSortCriteriaInput", "value": sort}, + "withGroupings": {"type": "Boolean", "value": with_groupings}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return HcWorkPackagePageFields( + field_name="getWorkPackages", arguments=cleared_arguments + ) + + @classmethod + def hosting_capacity_file_upload_url( + cls, filename: str, file_type: HostingCapacityFileType + ) -> UploadUrlResponseFields: + arguments: dict[str, dict[str, Any]] = { + "filename": {"type": "String!", "value": filename}, + "fileType": {"type": "HostingCapacityFileType!", "value": file_type}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return UploadUrlResponseFields( + field_name="hostingCapacityFileUploadUrl", arguments=cleared_arguments + ) + + @classmethod + def list_calibration_runs( + cls, + *, + name: Optional[str] = None, + calibration_time: Optional[Any] = None, + status: Optional[WorkflowStatus] = None + ) -> HcCalibrationFields: + arguments: dict[str, dict[str, Any]] = { + "name": {"type": "String", "value": name}, + "calibrationTime": {"type": "LocalDateTime", "value": calibration_time}, + "status": {"type": "WorkflowStatus", "value": status}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return HcCalibrationFields( + field_name="listCalibrationRuns", arguments=cleared_arguments + ) + + @classmethod + def get_processed_diff(cls, diff_id: str) -> ProcessedDiffFields: + arguments: dict[str, dict[str, Any]] = { + "diffId": {"type": "ID!", "value": diff_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return ProcessedDiffFields( + field_name="getProcessedDiff", arguments=cleared_arguments + ) + + @classmethod + def get_processed_diffs( + cls, + *, + limit: Optional[int] = None, + offset: Optional[Any] = None, + filter_: Optional[ProcessedDiffFilterInput] = None, + sort: Optional[ProcessedDiffSortCriteriaInput] = None + ) -> ProcessedDiffPageFields: + arguments: dict[str, dict[str, Any]] = { + "limit": {"type": "Int", "value": limit}, + "offset": {"type": "Long", "value": offset}, + "filter": {"type": "ProcessedDiffFilterInput", "value": filter_}, + "sort": {"type": "ProcessedDiffSortCriteriaInput", "value": sort}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return ProcessedDiffPageFields( + field_name="getProcessedDiffs", arguments=cleared_arguments + ) + + @classmethod + def get_all_jobs(cls) -> IngestionJobFields: + return IngestionJobFields(field_name="getAllJobs") + + @classmethod + def get_distinct_metric_names(cls, job_id: str) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "jobId": {"type": "String!", "value": job_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="getDistinctMetricNames", arguments=cleared_arguments + ) + + @classmethod + def get_metrics( + cls, job_id: str, container_type: ContainerType, container_id: str + ) -> MetricFields: + arguments: dict[str, dict[str, Any]] = { + "jobId": {"type": "String!", "value": job_id}, + "containerType": {"type": "ContainerType!", "value": container_type}, + "containerId": {"type": "String!", "value": container_id}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return MetricFields(field_name="getMetrics", arguments=cleared_arguments) + + @classmethod + def get_newest_job(cls) -> IngestionJobFields: + return IngestionJobFields(field_name="getNewestJob") + + @classmethod + def get_sources(cls, job_id: str) -> JobSourceFields: + arguments: dict[str, dict[str, Any]] = { + "jobId": {"type": "String!", "value": job_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return JobSourceFields(field_name="getSources", arguments=cleared_arguments) + + @classmethod + def paged_sincal_model_presets( + cls, + *, + limit: Optional[int] = None, + offset: Optional[Any] = None, + filter_: Optional[GetSincalModelPresetsFilterInput] = None, + sort: Optional[GetSincalModelPresetsSortCriteriaInput] = None + ) -> SincalModelPresetPageFields: + arguments: dict[str, dict[str, Any]] = { + "limit": {"type": "Int", "value": limit}, + "offset": {"type": "Long", "value": offset}, + "filter": {"type": "GetSincalModelPresetsFilterInput", "value": filter_}, + "sort": {"type": "GetSincalModelPresetsSortCriteriaInput", "value": sort}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return SincalModelPresetPageFields( + field_name="pagedSincalModelPresets", arguments=cleared_arguments + ) + + @classmethod + def paged_sincal_models( + cls, + *, + limit: Optional[int] = None, + offset: Optional[Any] = None, + filter_: Optional[GetSincalModelsFilterInput] = None, + sort: Optional[GetSincalModelsSortCriteriaInput] = None + ) -> SincalModelPageFields: + arguments: dict[str, dict[str, Any]] = { + "limit": {"type": "Int", "value": limit}, + "offset": {"type": "Long", "value": offset}, + "filter": {"type": "GetSincalModelsFilterInput", "value": filter_}, + "sort": {"type": "GetSincalModelsSortCriteriaInput", "value": sort}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return SincalModelPageFields( + field_name="pagedSincalModels", arguments=cleared_arguments + ) + + @classmethod + def sincal_model_by_id(cls, model_id: str) -> SincalModelFields: + arguments: dict[str, dict[str, Any]] = { + "modelId": {"type": "ID!", "value": model_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return SincalModelFields( + field_name="sincalModelById", arguments=cleared_arguments + ) + + @classmethod + def sincal_model_config_upload_url( + cls, filename: str, file_type: SincalFileType + ) -> UploadUrlResponseFields: + arguments: dict[str, dict[str, Any]] = { + "filename": {"type": "String!", "value": filename}, + "fileType": {"type": "SincalFileType!", "value": file_type}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return UploadUrlResponseFields( + field_name="sincalModelConfigUploadUrl", arguments=cleared_arguments + ) + + @classmethod + def sincal_model_global_config(cls) -> SincalGlobalInputsConfigFields: + return SincalGlobalInputsConfigFields(field_name="sincalModelGlobalConfig") + + @classmethod + def sincal_model_preset_by_id(cls, preset_id: str) -> SincalModelPresetFields: + arguments: dict[str, dict[str, Any]] = { + "presetId": {"type": "ID!", "value": preset_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return SincalModelPresetFields( + field_name="sincalModelPresetById", arguments=cleared_arguments + ) + + @classmethod + def sincal_model_presets_by_ids( + cls, preset_ids: list[str] + ) -> SincalModelPresetFields: + arguments: dict[str, dict[str, Any]] = { + "presetIds": {"type": "[ID!]!", "value": preset_ids} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return SincalModelPresetFields( + field_name="sincalModelPresetsByIds", arguments=cleared_arguments + ) + + @classmethod + def sincal_models_by_ids(cls, model_ids: list[str]) -> SincalModelFields: + arguments: dict[str, dict[str, Any]] = { + "modelIds": {"type": "[ID!]!", "value": model_ids} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return SincalModelFields( + field_name="sincalModelsByIds", arguments=cleared_arguments + ) + + @classmethod + def create_machine_api_key(cls, roles: list[str], token_name: str) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "roles": {"type": "[String!]!", "value": roles}, + "tokenName": {"type": "String!", "value": token_name}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="createMachineApiKey", arguments=cleared_arguments + ) + + @classmethod + def create_user_api_key(cls, roles: list[str], token_name: str) -> GraphQLField: + arguments: dict[str, dict[str, Any]] = { + "roles": {"type": "[String!]!", "value": roles}, + "tokenName": {"type": "String!", "value": token_name}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField(field_name="createUserApiKey", arguments=cleared_arguments) + + @classmethod + def get_machine_tokens(cls) -> MachineUserFields: + return MachineUserFields(field_name="getMachineTokens") + + @classmethod + def get_public_geo_view_config(cls) -> GraphQLField: + return GraphQLField(field_name="getPublicGeoViewConfig") + + @classmethod + def get_all_external_roles(cls) -> GraphQLField: + return GraphQLField(field_name="getAllExternalRoles") + + @classmethod + def get_network_models(cls) -> NetworkModelsFields: + return NetworkModelsFields(field_name="getNetworkModels") + + @classmethod + def get_feeder_load_analysis_report_status( + cls, report_id: str, full_spec: bool + ) -> FeederLoadAnalysisReportFields: + arguments: dict[str, dict[str, Any]] = { + "reportId": {"type": "ID!", "value": report_id}, + "fullSpec": {"type": "Boolean!", "value": full_spec}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return FeederLoadAnalysisReportFields( + field_name="getFeederLoadAnalysisReportStatus", arguments=cleared_arguments + ) + + @classmethod + def get_ingestor_run(cls, id: int) -> IngestionRunFields: + arguments: dict[str, dict[str, Any]] = {"id": {"type": "Int!", "value": id}} + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return IngestionRunFields( + field_name="getIngestorRun", arguments=cleared_arguments + ) + + @classmethod + def list_ingestor_runs( + cls, + *, + filter_: Optional[IngestorRunsFilterInput] = None, + sort: Optional[IngestorRunsSortCriteriaInput] = None + ) -> IngestionRunFields: + arguments: dict[str, dict[str, Any]] = { + "filter": {"type": "IngestorRunsFilterInput", "value": filter_}, + "sort": {"type": "IngestorRunsSortCriteriaInput", "value": sort}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return IngestionRunFields( + field_name="listIngestorRuns", arguments=cleared_arguments + ) + + @classmethod + def list_ingestor_runs_paged( + cls, + *, + limit: Optional[int] = None, + offset: Optional[Any] = None, + filter_: Optional[IngestorRunsFilterInput] = None, + sort: Optional[IngestorRunsSortCriteriaInput] = None + ) -> IngestorRunPageFields: + arguments: dict[str, dict[str, Any]] = { + "limit": {"type": "Int", "value": limit}, + "offset": {"type": "Long", "value": offset}, + "filter": {"type": "IngestorRunsFilterInput", "value": filter_}, + "sort": {"type": "IngestorRunsSortCriteriaInput", "value": sort}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return IngestorRunPageFields( + field_name="listIngestorRunsPaged", arguments=cleared_arguments + ) + + @classmethod + def paged_open_dss_models( + cls, + *, + limit: Optional[int] = None, + offset: Optional[Any] = None, + filter_: Optional[GetOpenDssModelsFilterInput] = None, + sort: Optional[GetOpenDssModelsSortCriteriaInput] = None + ) -> OpenDssModelPageFields: + arguments: dict[str, dict[str, Any]] = { + "limit": {"type": "Int", "value": limit}, + "offset": {"type": "Long", "value": offset}, + "filter": {"type": "GetOpenDssModelsFilterInput", "value": filter_}, + "sort": {"type": "GetOpenDssModelsSortCriteriaInput", "value": sort}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return OpenDssModelPageFields( + field_name="pagedOpenDssModels", arguments=cleared_arguments + ) + + @classmethod + def get_user_permitted_customer_list_column_config( + cls, + ) -> UserCustomerListColumnConfigFields: + return UserCustomerListColumnConfigFields( + field_name="getUserPermittedCustomerListColumnConfig" + ) + + @classmethod + def get_user_saved_customer_list_column_config( + cls, + ) -> UserCustomerListColumnConfigFields: + return UserCustomerListColumnConfigFields( + field_name="getUserSavedCustomerListColumnConfig" + ) + + @classmethod + def get_customer_list(cls, m_ri_ds: list[str]) -> CustomerDetailsResponseFields: + arguments: dict[str, dict[str, Any]] = { + "mRIDs": {"type": "[String!]!", "value": m_ri_ds} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return CustomerDetailsResponseFields( + field_name="getCustomerList", arguments=cleared_arguments + ) + + @classmethod + def get_customer_list_by_nmis( + cls, nmis: list[str] + ) -> CustomerDetailsResponseFields: + arguments: dict[str, dict[str, Any]] = { + "nmis": {"type": "[String!]!", "value": nmis} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return CustomerDetailsResponseFields( + field_name="getCustomerListByNmis", arguments=cleared_arguments + ) + + @classmethod + def get_app_options(cls) -> AppOptionsFields: + return AppOptionsFields(field_name="getAppOptions") + + @classmethod + def get_presigned_upload_url_for_variant( + cls, filename: str, file_type: VariantFileType + ) -> UploadUrlResponseFields: + arguments: dict[str, dict[str, Any]] = { + "filename": {"type": "String!", "value": filename}, + "fileType": {"type": "VariantFileType!", "value": file_type}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return UploadUrlResponseFields( + field_name="getPresignedUploadUrlForVariant", arguments=cleared_arguments + ) + + @classmethod + def get_variant_upload_info(cls, job_id: str) -> VariantWorkPackageFields: + arguments: dict[str, dict[str, Any]] = { + "jobID": {"type": "String!", "value": job_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return VariantWorkPackageFields( + field_name="getVariantUploadInfo", arguments=cleared_arguments + ) diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_typing_fields.py b/src/zepben/eas/lib/generated_graphql_client/custom_typing_fields.py new file mode 100644 index 0000000..bdee65d --- /dev/null +++ b/src/zepben/eas/lib/generated_graphql_client/custom_typing_fields.py @@ -0,0 +1,429 @@ +# Generated by ariadne-codegen + +from .base_operation import GraphQLField + + +class AppOptionsGraphQLField(GraphQLField): + def alias(self, alias: str) -> "AppOptionsGraphQLField": + self._alias = alias + return self + + +class ColumnGraphQLField(GraphQLField): + def alias(self, alias: str) -> "ColumnGraphQLField": + self._alias = alias + return self + + +class CoordinateGraphQLField(GraphQLField): + def alias(self, alias: str) -> "CoordinateGraphQLField": + self._alias = alias + return self + + +class CustomerDetailsGraphQLField(GraphQLField): + def alias(self, alias: str) -> "CustomerDetailsGraphQLField": + self._alias = alias + return self + + +class CustomerDetailsResponseGraphQLField(GraphQLField): + def alias(self, alias: str) -> "CustomerDetailsResponseGraphQLField": + self._alias = alias + return self + + +class CustomerListColumnConfigGraphQLField(GraphQLField): + def alias(self, alias: str) -> "CustomerListColumnConfigGraphQLField": + self._alias = alias + return self + + +class DiffResultGraphQLField(GraphQLField): + def alias(self, alias: str) -> "DiffResultGraphQLField": + self._alias = alias + return self + + +class DurationCurveGraphQLField(GraphQLField): + def alias(self, alias: str) -> "DurationCurveGraphQLField": + self._alias = alias + return self + + +class DurationCurveByTerminalGraphQLField(GraphQLField): + def alias(self, alias: str) -> "DurationCurveByTerminalGraphQLField": + self._alias = alias + return self + + +class DurationCurvePointGraphQLField(GraphQLField): + def alias(self, alias: str) -> "DurationCurvePointGraphQLField": + self._alias = alias + return self + + +class EquipmentGraphQLField(GraphQLField): + def alias(self, alias: str) -> "EquipmentGraphQLField": + self._alias = alias + return self + + +class FeederLoadAnalysisReportGraphQLField(GraphQLField): + def alias(self, alias: str) -> "FeederLoadAnalysisReportGraphQLField": + self._alias = alias + return self + + +class FeederLoadAnalysisSpecGraphQLField(GraphQLField): + def alias(self, alias: str) -> "FeederLoadAnalysisSpecGraphQLField": + self._alias = alias + return self + + +class GeoJsonFeatureGraphQLField(GraphQLField): + def alias(self, alias: str) -> "GeoJsonFeatureGraphQLField": + self._alias = alias + return self + + +class GeoJsonGeometryGraphQLField(GraphQLField): + def alias(self, alias: str) -> "GeoJsonGeometryGraphQLField": + self._alias = alias + return self + + +class GeoJsonOverlayGraphQLField(GraphQLField): + def alias(self, alias: str) -> "GeoJsonOverlayGraphQLField": + self._alias = alias + return self + + +class GeoJsonPropertiesGraphQLField(GraphQLField): + def alias(self, alias: str) -> "GeoJsonPropertiesGraphQLField": + self._alias = alias + return self + + +class GqlDistributionTransformerConfigGraphQLField(GraphQLField): + def alias(self, alias: str) -> "GqlDistributionTransformerConfigGraphQLField": + self._alias = alias + return self + + +class GqlLoadConfigGraphQLField(GraphQLField): + def alias(self, alias: str) -> "GqlLoadConfigGraphQLField": + self._alias = alias + return self + + +class GqlScenarioConfigGraphQLField(GraphQLField): + def alias(self, alias: str) -> "GqlScenarioConfigGraphQLField": + self._alias = alias + return self + + +class GqlTxTapRecordGraphQLField(GraphQLField): + def alias(self, alias: str) -> "GqlTxTapRecordGraphQLField": + self._alias = alias + return self + + +class GqlUserGraphQLField(GraphQLField): + def alias(self, alias: str) -> "GqlUserGraphQLField": + self._alias = alias + return self + + +class GqlUserResponseGraphQLField(GraphQLField): + def alias(self, alias: str) -> "GqlUserResponseGraphQLField": + self._alias = alias + return self + + +class HcCalibrationGraphQLField(GraphQLField): + def alias(self, alias: str) -> "HcCalibrationGraphQLField": + self._alias = alias + return self + + +class HcModelGraphQLField(GraphQLField): + def alias(self, alias: str) -> "HcModelGraphQLField": + self._alias = alias + return self + + +class HcScenarioConfigsPageGraphQLField(GraphQLField): + def alias(self, alias: str) -> "HcScenarioConfigsPageGraphQLField": + self._alias = alias + return self + + +class HcWorkPackageGraphQLField(GraphQLField): + def alias(self, alias: str) -> "HcWorkPackageGraphQLField": + self._alias = alias + return self + + +class HcWorkPackagePageGraphQLField(GraphQLField): + def alias(self, alias: str) -> "HcWorkPackagePageGraphQLField": + self._alias = alias + return self + + +class IngestionJobGraphQLField(GraphQLField): + def alias(self, alias: str) -> "IngestionJobGraphQLField": + self._alias = alias + return self + + +class IngestionRunGraphQLField(GraphQLField): + def alias(self, alias: str) -> "IngestionRunGraphQLField": + self._alias = alias + return self + + +class IngestorRunPageGraphQLField(GraphQLField): + def alias(self, alias: str) -> "IngestorRunPageGraphQLField": + self._alias = alias + return self + + +class JobSourceGraphQLField(GraphQLField): + def alias(self, alias: str) -> "JobSourceGraphQLField": + self._alias = alias + return self + + +class MachineUserGraphQLField(GraphQLField): + def alias(self, alias: str) -> "MachineUserGraphQLField": + self._alias = alias + return self + + +class MetricGraphQLField(GraphQLField): + def alias(self, alias: str) -> "MetricGraphQLField": + self._alias = alias + return self + + +class NetworkModelGraphQLField(GraphQLField): + def alias(self, alias: str) -> "NetworkModelGraphQLField": + self._alias = alias + return self + + +class NetworkModelsGraphQLField(GraphQLField): + def alias(self, alias: str) -> "NetworkModelsGraphQLField": + self._alias = alias + return self + + +class OpenDssModelGraphQLField(GraphQLField): + def alias(self, alias: str) -> "OpenDssModelGraphQLField": + self._alias = alias + return self + + +class OpenDssModelPageGraphQLField(GraphQLField): + def alias(self, alias: str) -> "OpenDssModelPageGraphQLField": + self._alias = alias + return self + + +class OpportunitiesByYearGraphQLField(GraphQLField): + def alias(self, alias: str) -> "OpportunitiesByYearGraphQLField": + self._alias = alias + return self + + +class OpportunityGraphQLField(GraphQLField): + def alias(self, alias: str) -> "OpportunityGraphQLField": + self._alias = alias + return self + + +class OpportunityLocationGraphQLField(GraphQLField): + def alias(self, alias: str) -> "OpportunityLocationGraphQLField": + self._alias = alias + return self + + +class PowerFactoryModelGraphQLField(GraphQLField): + def alias(self, alias: str) -> "PowerFactoryModelGraphQLField": + self._alias = alias + return self + + +class PowerFactoryModelGenerationSpecGraphQLField(GraphQLField): + def alias(self, alias: str) -> "PowerFactoryModelGenerationSpecGraphQLField": + self._alias = alias + return self + + +class PowerFactoryModelPageGraphQLField(GraphQLField): + def alias(self, alias: str) -> "PowerFactoryModelPageGraphQLField": + self._alias = alias + return self + + +class PowerFactoryModelTemplateGraphQLField(GraphQLField): + def alias(self, alias: str) -> "PowerFactoryModelTemplateGraphQLField": + self._alias = alias + return self + + +class PowerFactoryModelTemplatePageGraphQLField(GraphQLField): + def alias(self, alias: str) -> "PowerFactoryModelTemplatePageGraphQLField": + self._alias = alias + return self + + +class ProcessedDiffGraphQLField(GraphQLField): + def alias(self, alias: str) -> "ProcessedDiffGraphQLField": + self._alias = alias + return self + + +class ProcessedDiffPageGraphQLField(GraphQLField): + def alias(self, alias: str) -> "ProcessedDiffPageGraphQLField": + self._alias = alias + return self + + +class RemoveAppOptionResultGraphQLField(GraphQLField): + def alias(self, alias: str) -> "RemoveAppOptionResultGraphQLField": + self._alias = alias + return self + + +class ResultSectionGraphQLField(GraphQLField): + def alias(self, alias: str) -> "ResultSectionGraphQLField": + self._alias = alias + return self + + +class ScenarioConfigurationGraphQLField(GraphQLField): + def alias(self, alias: str) -> "ScenarioConfigurationGraphQLField": + self._alias = alias + return self + + +class SincalConfigFileGraphQLField(GraphQLField): + def alias(self, alias: str) -> "SincalConfigFileGraphQLField": + self._alias = alias + return self + + +class SincalGlobalInputsConfigGraphQLField(GraphQLField): + def alias(self, alias: str) -> "SincalGlobalInputsConfigGraphQLField": + self._alias = alias + return self + + +class SincalModelGraphQLField(GraphQLField): + def alias(self, alias: str) -> "SincalModelGraphQLField": + self._alias = alias + return self + + +class SincalModelGenerationSpecGraphQLField(GraphQLField): + def alias(self, alias: str) -> "SincalModelGenerationSpecGraphQLField": + self._alias = alias + return self + + +class SincalModelPageGraphQLField(GraphQLField): + def alias(self, alias: str) -> "SincalModelPageGraphQLField": + self._alias = alias + return self + + +class SincalModelPresetGraphQLField(GraphQLField): + def alias(self, alias: str) -> "SincalModelPresetGraphQLField": + self._alias = alias + return self + + +class SincalModelPresetPageGraphQLField(GraphQLField): + def alias(self, alias: str) -> "SincalModelPresetPageGraphQLField": + self._alias = alias + return self + + +class StateOverlayGraphQLField(GraphQLField): + def alias(self, alias: str) -> "StateOverlayGraphQLField": + self._alias = alias + return self + + +class StudyGraphQLField(GraphQLField): + def alias(self, alias: str) -> "StudyGraphQLField": + self._alias = alias + return self + + +class StudyPageGraphQLField(GraphQLField): + def alias(self, alias: str) -> "StudyPageGraphQLField": + self._alias = alias + return self + + +class StudyResultGraphQLField(GraphQLField): + def alias(self, alias: str) -> "StudyResultGraphQLField": + self._alias = alias + return self + + +class TableSectionGraphQLField(GraphQLField): + def alias(self, alias: str) -> "TableSectionGraphQLField": + self._alias = alias + return self + + +class UploadUrlResponseGraphQLField(GraphQLField): + def alias(self, alias: str) -> "UploadUrlResponseGraphQLField": + self._alias = alias + return self + + +class UserCustomerListColumnConfigGraphQLField(GraphQLField): + def alias(self, alias: str) -> "UserCustomerListColumnConfigGraphQLField": + self._alias = alias + return self + + +class VariantGraphQLField(GraphQLField): + def alias(self, alias: str) -> "VariantGraphQLField": + self._alias = alias + return self + + +class VariantWorkPackageGraphQLField(GraphQLField): + def alias(self, alias: str) -> "VariantWorkPackageGraphQLField": + self._alias = alias + return self + + +class WorkPackageModelGroupingsGraphQLField(GraphQLField): + def alias(self, alias: str) -> "WorkPackageModelGroupingsGraphQLField": + self._alias = alias + return self + + +class WorkPackageModelTotalsGraphQLField(GraphQLField): + def alias(self, alias: str) -> "WorkPackageModelTotalsGraphQLField": + self._alias = alias + return self + + +class WorkPackageProgressDetailsGraphQLField(GraphQLField): + def alias(self, alias: str) -> "WorkPackageProgressDetailsGraphQLField": + self._alias = alias + return self + + +class WorkPackageTreeGraphQLField(GraphQLField): + def alias(self, alias: str) -> "WorkPackageTreeGraphQLField": + self._alias = alias + return self diff --git a/src/zepben/eas/lib/generated_graphql_client/enums.py b/src/zepben/eas/lib/generated_graphql_client/enums.py new file mode 100644 index 0000000..2d28db4 --- /dev/null +++ b/src/zepben/eas/lib/generated_graphql_client/enums.py @@ -0,0 +1,225 @@ +# Generated by ariadne-codegen +# Source: http://127.0.0.1:7654/api/graphql + +from enum import Enum + + +class CandidateGenerationType(str, Enum): + CRITERIA = "CRITERIA" + TAP_OPTIMIZATION = "TAP_OPTIMIZATION" + + +class ColumnGroup(str, Enum): + PII = "PII" + NON_PII = "NON_PII" + + +class ColumnName(str, Enum): + ZONE_SUBSTATION = "ZONE_SUBSTATION" + FEEDER = "FEEDER" + TRANSFORMER_ID = "TRANSFORMER_ID" + TRANSFORMER_DESCRIPTION = "TRANSFORMER_DESCRIPTION" + METER_NUMBER = "METER_NUMBER" + SERVICE_ADDRESS = "SERVICE_ADDRESS" + SUPPLY_POINT_ID = "SUPPLY_POINT_ID" + NMI = "NMI" + LV_FEEDER = "LV_FEEDER" + MOBILE_NUMBER = "MOBILE_NUMBER" + PHONE_NUMBER = "PHONE_NUMBER" + POSTAL_ADDRESS = "POSTAL_ADDRESS" + TNI = "TNI" + DLF = "DLF" + FIRST_NAME = "FIRST_NAME" + LAST_NAME = "LAST_NAME" + CUSTOMER_TYPE = "CUSTOMER_TYPE" + IS_ENERGY_FEEDBACK = "IS_ENERGY_FEEDBACK" + IS_EMBEDDED_NETWORK = "IS_EMBEDDED_NETWORK" + DISTRIBUTOR = "DISTRIBUTOR" + SENSITIVITY_CATEGORY = "SENSITIVITY_CATEGORY" + MOVE_IN_DATE = "MOVE_IN_DATE" + NMI_CLASS = "NMI_CLASS" + SERVICE_PROVISION_STATUS = "SERVICE_PROVISION_STATUS" + TARIFF = "TARIFF" + + +class ContainerType(str, Enum): + TOTAL = "TOTAL" + GeographicalRegion = "GeographicalRegion" + SubGeographicalRegion = "SubGeographicalRegion" + SubstationTotal = "SubstationTotal" + Substation = "Substation" + FeederTotal = "FeederTotal" + Feeder = "Feeder" + LvFeeder = "LvFeeder" + + +class DaysRequired(str, Enum): + WEEKDAYS = "WEEKDAYS" + WEEKENDS = "WEEKENDS" + ANYDAYS = "ANYDAYS" + WEEKDAYSEXHOLIDAYS = "WEEKDAYSEXHOLIDAYS" + + +class DiffType(str, Enum): + NETWORK_PERFORMANCE_METRICS = "NETWORK_PERFORMANCE_METRICS" + NETWORK_PERFORMANCE_METRICS_ENHANCED = "NETWORK_PERFORMANCE_METRICS_ENHANCED" + + +class HcFeederScenarioAllocationStrategy(str, Enum): + RANDOM = "RANDOM" + ADDITIVE = "ADDITIVE" + + +class HcLoadPlacement(str, Enum): + PER_ENERGY_CONSUMER = "PER_ENERGY_CONSUMER" + PER_USAGE_POINT = "PER_USAGE_POINT" + + +class HcSolveMode(str, Enum): + YEARLY = "YEARLY" + DAILY = "DAILY" + + +class HcSwitchClass(str, Enum): + BREAKER = "BREAKER" + DISCONNECTOR = "DISCONNECTOR" + FUSE = "FUSE" + JUMPER = "JUMPER" + LOAD_BREAK_SWITCH = "LOAD_BREAK_SWITCH" + RECLOSER = "RECLOSER" + + +class HcWriterType(str, Enum): + POSTGRES = "POSTGRES" + PARQUET = "PARQUET" + + +class HostingCapacityFileType(str, Enum): + INPUT_DATABASE = "INPUT_DATABASE" + + +class IngestorRunState(str, Enum): + INITIALIZED = "INITIALIZED" + QUEUED = "QUEUED" + STARTED = "STARTED" + RUNNING = "RUNNING" + SUCCESS = "SUCCESS" + FAILURE = "FAILURE" + FAILED_TO_START = "FAILED_TO_START" + + +class IngestorRuntimeKind(str, Enum): + AZURE_CONTAINER_APP_JOB = "AZURE_CONTAINER_APP_JOB" + DOCKER = "DOCKER" + ECS = "ECS" + KUBERNETES = "KUBERNETES" + TEMPORAL_KUBERNETES = "TEMPORAL_KUBERNETES" + + +class InterventionClass(str, Enum): + TARIFF_REFORM = "TARIFF_REFORM" + CONTROLLED_LOAD_HOT_WATER = "CONTROLLED_LOAD_HOT_WATER" + COMMUNITY_BESS = "COMMUNITY_BESS" + DISTRIBUTION_TX_OLTC = "DISTRIBUTION_TX_OLTC" + LV_STATCOMS = "LV_STATCOMS" + DVMS = "DVMS" + PHASE_REBALANCING = "PHASE_REBALANCING" + DISTRIBUTION_TAP_OPTIMIZATION = "DISTRIBUTION_TAP_OPTIMIZATION" + UNKNOWN = "UNKNOWN" + + +class MeasurementZoneType(str, Enum): + UNKNOWN = "UNKNOWN" + TRANSFORMER = "TRANSFORMER" + BREAKER = "BREAKER" + DISCONNECTOR = "DISCONNECTOR" + FUSE = "FUSE" + JUMPER = "JUMPER" + LOAD_BREAK_SWITCH = "LOAD_BREAK_SWITCH" + RECLOSER = "RECLOSER" + ENERGY_CONSUMER = "ENERGY_CONSUMER" + FEEDER_HEAD = "FEEDER_HEAD" + CALIBRATION = "CALIBRATION" + + +class OpportunitiesNeed(str, Enum): + EXPORTDECREASE = "EXPORTDECREASE" + EXPORTINCREASE = "EXPORTINCREASE" + CONSUMPTIONDECREASE = "CONSUMPTIONDECREASE" + CONSUMPTIONINCREASE = "CONSUMPTIONINCREASE" + + +class OpportunitiesType(str, Enum): + CAPACITY = "CAPACITY" + VOLTAGESUPPORT = "VOLTAGESUPPORT" + + +class SectionType(str, Enum): + TABLE = "TABLE" + + +class SerializationType(str, Enum): + TABLE_JSON = "TABLE_JSON" + TABLE_CSV = "TABLE_CSV" + COMPRESSED_BYTES = "COMPRESSED_BYTES" + + +class SincalFileType(str, Enum): + TEMPLATE = "TEMPLATE" + IN_FEEDER_MAPPING_DATABASE = "IN_FEEDER_MAPPING_DATABASE" + LOCAL_STANDARD_DATABASE = "LOCAL_STANDARD_DATABASE" + PROTECTION_STANDARD_DATABASE = "PROTECTION_STANDARD_DATABASE" + FRONTEND_CONFIG = "FRONTEND_CONFIG" + BACKEND_CONFIG = "BACKEND_CONFIG" + + +class SortOrder(str, Enum): + ASC = "ASC" + DESC = "DESC" + ASC_NULLS_FIRST = "ASC_NULLS_FIRST" + DESC_NULLS_FIRST = "DESC_NULLS_FIRST" + ASC_NULLS_LAST = "ASC_NULLS_LAST" + DESC_NULLS_LAST = "DESC_NULLS_LAST" + + +class VariantFileType(str, Enum): + POWER_FACTORY_DGS = "POWER_FACTORY_DGS" + + +class VariantStatus(str, Enum): + Pending = "Pending" + Converting = "Converting" + AwaitingConflictDetection = "AwaitingConflictDetection" + ConflictDetection = "ConflictDetection" + ReadyForSubmission = "ReadyForSubmission" + Done = "Done" + Deleted = "Deleted" + Errored = "Errored" + + +class VariantWorkflowStatus(str, Enum): + Scheduled = "Scheduled" + Extracting = "Extracting" + Converting = "Converting" + ConflictDetection = "ConflictDetection" + WaitingForSubmission = "WaitingForSubmission" + Submitting = "Submitting" + Done = "Done" + Errored = "Errored" + + +class WorkPackageState(str, Enum): + SETUP = "SETUP" + PRRP = "PRRP" + RUNNING = "RUNNING" + SUCCEEDED = "SUCCEEDED" + FAILED = "FAILED" + CANCELLED = "CANCELLED" + TIMEDOUT = "TIMEDOUT" + + +class WorkflowStatus(str, Enum): + STARTED = "STARTED" + RUNNING = "RUNNING" + COMPLETED = "COMPLETED" + FAILED = "FAILED" diff --git a/src/zepben/eas/lib/generated_graphql_client/exceptions.py b/src/zepben/eas/lib/generated_graphql_client/exceptions.py new file mode 100644 index 0000000..6dcc565 --- /dev/null +++ b/src/zepben/eas/lib/generated_graphql_client/exceptions.py @@ -0,0 +1,85 @@ +# Generated by ariadne-codegen + +from typing import Any, Optional, Union + +import httpx + + +class GraphQLClientError(Exception): + """Base exception.""" + + +class GraphQLClientHttpError(GraphQLClientError): + def __init__(self, status_code: int, response: httpx.Response) -> None: + self.status_code = status_code + self.response = response + + def __str__(self) -> str: + return f"HTTP status code: {self.status_code}" + + +class GraphQLClientInvalidResponseError(GraphQLClientError): + def __init__(self, response: httpx.Response) -> None: + self.response = response + + def __str__(self) -> str: + return "Invalid response format." + + +class GraphQLClientGraphQLError(GraphQLClientError): + def __init__( + self, + message: str, + locations: Optional[list[dict[str, int]]] = None, + path: Optional[list[str]] = None, + extensions: Optional[dict[str, object]] = None, + original: Optional[dict[str, object]] = None, + ): + self.message = message + self.locations = locations + self.path = path + self.extensions = extensions + self.original = original + + def __str__(self) -> str: + return self.message + + @classmethod + def from_dict(cls, error: dict[str, Any]) -> "GraphQLClientGraphQLError": + return cls( + message=error["message"], + locations=error.get("locations"), + path=error.get("path"), + extensions=error.get("extensions"), + original=error, + ) + + +class GraphQLClientGraphQLMultiError(GraphQLClientError): + def __init__( + self, + errors: list[GraphQLClientGraphQLError], + data: Optional[dict[str, Any]] = None, + ): + self.errors = errors + self.data = data + + def __str__(self) -> str: + return "; ".join(str(e) for e in self.errors) + + @classmethod + def from_errors_dicts( + cls, errors_dicts: list[dict[str, Any]], data: Optional[dict[str, Any]] = None + ) -> "GraphQLClientGraphQLMultiError": + return cls( + errors=[GraphQLClientGraphQLError.from_dict(e) for e in errors_dicts], + data=data, + ) + + +class GraphQLClientInvalidMessageFormat(GraphQLClientError): # noqa: N818 + def __init__(self, message: Union[str, bytes]) -> None: + self.message = message + + def __str__(self) -> str: + return "Invalid message format." diff --git a/src/zepben/eas/lib/generated_graphql_client/input_types.py b/src/zepben/eas/lib/generated_graphql_client/input_types.py new file mode 100644 index 0000000..3037e00 --- /dev/null +++ b/src/zepben/eas/lib/generated_graphql_client/input_types.py @@ -0,0 +1,775 @@ +# Generated by ariadne-codegen +# Source: http://127.0.0.1:7654/api/graphql + +from typing import Any, Optional + +from pydantic import Field + +from .base_model import BaseModel +from .enums import ( + CandidateGenerationType, + HcFeederScenarioAllocationStrategy, + HcLoadPlacement, + HcSolveMode, + HcSwitchClass, + HcWriterType, + IngestorRunState, + IngestorRuntimeKind, + InterventionClass, + SectionType, + SortOrder, +) + + +class AppOptionsInput(BaseModel): + asset_name_format: Optional[str] = Field(alias="assetNameFormat", default=None) + pole_string_format: Optional[str] = Field(alias="poleStringFormat", default=None) + + +class CandidateGenerationConfigInput(BaseModel): + average_voltage_spread_threshold: Optional[int] = Field( + alias="averageVoltageSpreadThreshold", default=None + ) + intervention_criteria_name: Optional[str] = Field( + alias="interventionCriteriaName", default=None + ) + tap_weighting_factor_lower_threshold: Optional[float] = Field( + alias="tapWeightingFactorLowerThreshold", default=None + ) + tap_weighting_factor_upper_threshold: Optional[float] = Field( + alias="tapWeightingFactorUpperThreshold", default=None + ) + type_: CandidateGenerationType = Field(alias="type") + voltage_over_limit_hours_threshold: Optional[int] = Field( + alias="voltageOverLimitHoursThreshold", default=None + ) + voltage_under_limit_hours_threshold: Optional[int] = Field( + alias="voltageUnderLimitHoursThreshold", default=None + ) + + +class DvmsConfigInput(BaseModel): + lower_limit: float = Field(alias="lowerLimit") + lower_percentile: float = Field(alias="lowerPercentile") + max_iterations: int = Field(alias="maxIterations") + regulator_config: "DvmsRegulatorConfigInput" = Field(alias="regulatorConfig") + upper_limit: float = Field(alias="upperLimit") + upper_percentile: float = Field(alias="upperPercentile") + + +class DvmsRegulatorConfigInput(BaseModel): + allow_push_to_limit: bool = Field(alias="allowPushToLimit") + max_tap_change_per_step: int = Field(alias="maxTapChangePerStep") + pu_deadband_percent: float = Field(alias="puDeadbandPercent") + pu_target: float = Field(alias="puTarget") + + +class FeederConfigInput(BaseModel): + feeder: str + fixed_time: Optional["FixedTimeInput"] = Field(alias="fixedTime", default=None) + scenarios: list[str] + time_period: Optional["TimePeriodInput"] = Field(alias="timePeriod", default=None) + years: list[int] + + +class FeederConfigsInput(BaseModel): + configs: list["FeederConfigInput"] + + +class FeederLoadAnalysisInput(BaseModel): + aggregate_at_feeder_level: bool = Field(alias="aggregateAtFeederLevel") + end_date: str = Field(alias="endDate") + feeders: Optional[list[str]] = None + fetch_lv_network: bool = Field(alias="fetchLvNetwork") + fla_forecast_config: Optional["FlaForecastConfigInput"] = Field( + alias="flaForecastConfig", default=None + ) + geographical_regions: Optional[list[str]] = Field( + alias="geographicalRegions", default=None + ) + output: Optional[str] = None + process_coincident_loads: bool = Field(alias="processCoincidentLoads") + process_feeder_loads: bool = Field(alias="processFeederLoads") + produce_conductor_report: bool = Field(alias="produceConductorReport") + start_date: str = Field(alias="startDate") + sub_geographical_regions: Optional[list[str]] = Field( + alias="subGeographicalRegions", default=None + ) + substations: Optional[list[str]] = None + + +class FixedTimeInput(BaseModel): + load_time: Any = Field(alias="loadTime") + overrides: Optional[list["FixedTimeLoadOverrideInput"]] = None + + +class FixedTimeLoadOverrideInput(BaseModel): + gen_var_override: Optional[list[float]] = Field( + alias="genVarOverride", default=None + ) + gen_watts_override: Optional[list[float]] = Field( + alias="genWattsOverride", default=None + ) + load_id: str = Field(alias="loadId") + load_var_override: Optional[list[float]] = Field( + alias="loadVarOverride", default=None + ) + load_watts_override: Optional[list[float]] = Field( + alias="loadWattsOverride", default=None + ) + + +class FlaForecastConfigInput(BaseModel): + bess_upgrade_threshold: Optional[int] = Field( + alias="bessUpgradeThreshold", default=None + ) + pv_upgrade_threshold: Optional[int] = Field( + alias="pvUpgradeThreshold", default=None + ) + scenario_id: str = Field(alias="scenarioID") + seed: Optional[int] = None + year: int + + +class ForecastConfigInput(BaseModel): + feeders: list[str] + fixed_time: Optional["FixedTimeInput"] = Field(alias="fixedTime", default=None) + scenarios: list[str] + time_period: Optional["TimePeriodInput"] = Field(alias="timePeriod", default=None) + years: list[int] + + +class GeoJsonOverlayInput(BaseModel): + data: Any + source_properties: Optional[Any] = Field(alias="sourceProperties", default=None) + styles: list[str] + + +class GetOpenDssModelsFilterInput(BaseModel): + is_public: Optional[bool] = Field(alias="isPublic", default=None) + name: Optional[str] = None + state: Optional[list[str]] = None + + +class GetOpenDssModelsSortCriteriaInput(BaseModel): + created_at: Optional[SortOrder] = Field(alias="createdAt", default=None) + is_public: Optional[SortOrder] = Field(alias="isPublic", default=None) + name: Optional[SortOrder] = None + state: Optional[SortOrder] = None + + +class GetPowerFactoryModelTemplatesFilterInput(BaseModel): + is_public: Optional[bool] = Field(alias="isPublic", default=None) + name: Optional[str] = None + + +class GetPowerFactoryModelTemplatesSortCriteriaInput(BaseModel): + created_at: Optional[SortOrder] = Field(alias="createdAt", default=None) + is_public: Optional[SortOrder] = Field(alias="isPublic", default=None) + name: Optional[SortOrder] = None + + +class GetPowerFactoryModelsFilterInput(BaseModel): + is_public: Optional[bool] = Field(alias="isPublic", default=None) + name: Optional[str] = None + state: Optional[list[str]] = None + + +class GetPowerFactoryModelsSortCriteriaInput(BaseModel): + created_at: Optional[SortOrder] = Field(alias="createdAt", default=None) + is_public: Optional[SortOrder] = Field(alias="isPublic", default=None) + name: Optional[SortOrder] = None + state: Optional[SortOrder] = None + + +class GetSincalModelPresetsFilterInput(BaseModel): + is_public: Optional[bool] = Field(alias="isPublic", default=None) + name: Optional[str] = None + + +class GetSincalModelPresetsSortCriteriaInput(BaseModel): + created_at: Optional[SortOrder] = Field(alias="createdAt", default=None) + is_public: Optional[SortOrder] = Field(alias="isPublic", default=None) + name: Optional[SortOrder] = None + + +class GetSincalModelsFilterInput(BaseModel): + is_public: Optional[bool] = Field(alias="isPublic", default=None) + name: Optional[str] = None + state: Optional[list[str]] = None + + +class GetSincalModelsSortCriteriaInput(BaseModel): + created_at: Optional[SortOrder] = Field(alias="createdAt", default=None) + is_public: Optional[SortOrder] = Field(alias="isPublic", default=None) + name: Optional[SortOrder] = None + state: Optional[SortOrder] = None + + +class GetStudiesFilterInput(BaseModel): + created_after: Optional[Any] = Field(alias="createdAfter", default=None) + created_before: Optional[Any] = Field(alias="createdBefore", default=None) + created_by: Optional[list[str]] = Field(alias="createdBy", default=None) + id: Optional[str] = None + name: Optional[str] = None + tags: Optional[list[str]] = None + + +class GetStudiesSortCriteriaInput(BaseModel): + created_at: Optional[SortOrder] = Field(alias="createdAt", default=None) + created_by: Optional[SortOrder] = Field(alias="createdBy", default=None) + description: Optional[SortOrder] = None + name: Optional[SortOrder] = None + + +class GqlDistributionTransformerConfigInput(BaseModel): + r_ground: float = Field(alias="rGround") + x_ground: float = Field(alias="xGround") + + +class GqlLoadConfigInput(BaseModel): + spread_max_demand: bool = Field(alias="spreadMaxDemand") + + +class GqlScenarioConfigInput(BaseModel): + bess_upgrade_threshold: int = Field(alias="bessUpgradeThreshold") + pv_upgrade_threshold: int = Field(alias="pvUpgradeThreshold") + scenario_id: str = Field(alias="scenarioID") + years: list[int] + + +class GqlSincalModelForecastSpecInput(BaseModel): + scenario_id: str = Field(alias="scenarioId") + year: int + + +class HcEnhancedMetricsConfigInput(BaseModel): + calculate_co_2: Optional[bool] = Field(alias="calculateCO2", default=None) + calculate_emerg_for_gen_thermal: Optional[bool] = Field( + alias="calculateEmergForGenThermal", default=None + ) + calculate_emerg_for_load_thermal: Optional[bool] = Field( + alias="calculateEmergForLoadThermal", default=None + ) + calculate_normal_for_gen_thermal: Optional[bool] = Field( + alias="calculateNormalForGenThermal", default=None + ) + calculate_normal_for_load_thermal: Optional[bool] = Field( + alias="calculateNormalForLoadThermal", default=None + ) + populate_constraints: Optional[bool] = Field( + alias="populateConstraints", default=None + ) + populate_duration_curves: Optional[bool] = Field( + alias="populateDurationCurves", default=None + ) + populate_enhanced_metrics: Optional[bool] = Field( + alias="populateEnhancedMetrics", default=None + ) + populate_enhanced_metrics_profile: Optional[bool] = Field( + alias="populateEnhancedMetricsProfile", default=None + ) + populate_weekly_reports: Optional[bool] = Field( + alias="populateWeeklyReports", default=None + ) + + +class HcExecutorConfigInput(BaseModel): + value: Optional[str] = None + + +class HcGeneratorConfigInput(BaseModel): + model: Optional["HcModelConfigInput"] = None + node_level_results: Optional["HcNodeLevelResultsConfigInput"] = Field( + alias="nodeLevelResults", default=None + ) + raw_results: Optional["HcRawResultsConfigInput"] = Field( + alias="rawResults", default=None + ) + solve: Optional["HcSolveConfigInput"] = None + + +class HcInverterControlConfigInput(BaseModel): + after_cut_off_profile: Optional[str] = Field( + alias="afterCutOffProfile", default=None + ) + before_cut_off_profile: Optional[str] = Field( + alias="beforeCutOffProfile", default=None + ) + cut_off_date: Optional[Any] = Field(alias="cutOffDate", default=None) + + +class HcMeterPlacementConfigInput(BaseModel): + dist_transformers: Optional[bool] = Field(alias="distTransformers", default=None) + energy_consumer_meter_group: Optional[str] = Field( + alias="energyConsumerMeterGroup", default=None + ) + feeder_head: Optional[bool] = Field(alias="feederHead", default=None) + switch_meter_placement_configs: Optional[ + list["HcSwitchMeterPlacementConfigInput"] + ] = Field(alias="switchMeterPlacementConfigs", default=None) + + +class HcMetricsResultsConfigInput(BaseModel): + calculate_performance_metrics: Optional[bool] = Field( + alias="calculatePerformanceMetrics", default=None + ) + + +class HcModelConfigInput(BaseModel): + calibration: Optional[bool] = None + closed_loop_time_delay: Optional[int] = Field( + alias="closedLoopTimeDelay", default=None + ) + closed_loop_v_band: Optional[float] = Field(alias="closedLoopVBand", default=None) + closed_loop_v_limit: Optional[float] = Field(alias="closedLoopVLimit", default=None) + closed_loop_v_reg_enabled: Optional[bool] = Field( + alias="closedLoopVRegEnabled", default=None + ) + closed_loop_v_reg_replace_all: Optional[bool] = Field( + alias="closedLoopVRegReplaceAll", default=None + ) + closed_loop_v_reg_set_point: Optional[float] = Field( + alias="closedLoopVRegSetPoint", default=None + ) + collapse_lv_networks: Optional[bool] = Field( + alias="collapseLvNetworks", default=None + ) + collapse_negligible_impedances: Optional[bool] = Field( + alias="collapseNegligibleImpedances", default=None + ) + collapse_swer: Optional[bool] = Field(alias="collapseSWER", default=None) + combine_common_impedances: Optional[bool] = Field( + alias="combineCommonImpedances", default=None + ) + ct_prim_scaling_factor: Optional[float] = Field( + alias="ctPrimScalingFactor", default=None + ) + default_gen_var: Optional[list[float]] = Field(alias="defaultGenVar", default=None) + default_gen_watts: Optional[list[float]] = Field( + alias="defaultGenWatts", default=None + ) + default_load_var: Optional[list[float]] = Field( + alias="defaultLoadVar", default=None + ) + default_load_watts: Optional[list[float]] = Field( + alias="defaultLoadWatts", default=None + ) + default_tap_changer_band: Optional[float] = Field( + alias="defaultTapChangerBand", default=None + ) + default_tap_changer_set_point_pu: Optional[float] = Field( + alias="defaultTapChangerSetPointPu", default=None + ) + default_tap_changer_time_delay: Optional[int] = Field( + alias="defaultTapChangerTimeDelay", default=None + ) + emerg_amp_scaling: Optional[float] = Field(alias="emergAmpScaling", default=None) + feeder_scenario_allocation_strategy: Optional[ + HcFeederScenarioAllocationStrategy + ] = Field(alias="feederScenarioAllocationStrategy", default=None) + fix_overloading_consumers: Optional[bool] = Field( + alias="fixOverloadingConsumers", default=None + ) + fix_single_phase_loads: Optional[bool] = Field( + alias="fixSinglePhaseLoads", default=None + ) + fix_undersized_service_lines: Optional[bool] = Field( + alias="fixUndersizedServiceLines", default=None + ) + gen_v_max_pu: Optional[float] = Field(alias="genVMaxPu", default=None) + gen_v_min_pu: Optional[float] = Field(alias="genVMinPu", default=None) + inverter_control_config: Optional["HcInverterControlConfigInput"] = Field( + alias="inverterControlConfig", default=None + ) + load_interval_length_hours: Optional[float] = Field( + alias="loadIntervalLengthHours", default=None + ) + load_model: Optional[int] = Field(alias="loadModel", default=None) + load_placement: Optional[HcLoadPlacement] = Field( + alias="loadPlacement", default=None + ) + load_v_max_pu: Optional[float] = Field(alias="loadVMaxPu", default=None) + load_v_min_pu: Optional[float] = Field(alias="loadVMinPu", default=None) + max_gen_tx_ratio: Optional[float] = Field(alias="maxGenTxRatio", default=None) + max_load_lv_line_ratio: Optional[float] = Field( + alias="maxLoadLvLineRatio", default=None + ) + max_load_service_line_ratio: Optional[float] = Field( + alias="maxLoadServiceLineRatio", default=None + ) + max_load_tx_ratio: Optional[float] = Field(alias="maxLoadTxRatio", default=None) + max_single_phase_load: Optional[float] = Field( + alias="maxSinglePhaseLoad", default=None + ) + meter_placement_config: Optional["HcMeterPlacementConfigInput"] = Field( + alias="meterPlacementConfig", default=None + ) + p_factor_base_exports: Optional[float] = Field( + alias="pFactorBaseExports", default=None + ) + p_factor_base_imports: Optional[float] = Field( + alias="pFactorBaseImports", default=None + ) + p_factor_forecast_pv: Optional[float] = Field( + alias="pFactorForecastPv", default=None + ) + rating_threshold: Optional[float] = Field(alias="ratingThreshold", default=None) + seed: Optional[int] = None + simplify_network: Optional[bool] = Field(alias="simplifyNetwork", default=None) + simplify_plsi_threshold: Optional[float] = Field( + alias="simplifyPLSIThreshold", default=None + ) + split_phase_default_load_loss_percentage: Optional[float] = Field( + alias="splitPhaseDefaultLoadLossPercentage", default=None + ) + split_phase_lvkv: Optional[float] = Field(alias="splitPhaseLVKV", default=None) + swer_voltage_to_line_voltage: Optional[list[list[int]]] = Field( + alias="swerVoltageToLineVoltage", default=None + ) + transformer_tap_settings: Optional[str] = Field( + alias="transformerTapSettings", default=None + ) + use_span_level_threshold: Optional[bool] = Field( + alias="useSpanLevelThreshold", default=None + ) + vm_pu: Optional[float] = Field(alias="vmPu", default=None) + + +class HcNodeLevelResultsConfigInput(BaseModel): + collect_all_conductors: Optional[bool] = Field( + alias="collectAllConductors", default=None + ) + collect_all_energy_consumers: Optional[bool] = Field( + alias="collectAllEnergyConsumers", default=None + ) + collect_all_switches: Optional[bool] = Field( + alias="collectAllSwitches", default=None + ) + collect_all_transformers: Optional[bool] = Field( + alias="collectAllTransformers", default=None + ) + collect_current: Optional[bool] = Field(alias="collectCurrent", default=None) + collect_power: Optional[bool] = Field(alias="collectPower", default=None) + collect_voltage: Optional[bool] = Field(alias="collectVoltage", default=None) + mrids_to_collect: Optional[list[str]] = Field(alias="mridsToCollect", default=None) + + +class HcRawResultsConfigInput(BaseModel): + energy_meter_voltages_raw: Optional[bool] = Field( + alias="energyMeterVoltagesRaw", default=None + ) + energy_meters_raw: Optional[bool] = Field(alias="energyMetersRaw", default=None) + overloads_raw: Optional[bool] = Field(alias="overloadsRaw", default=None) + results_per_meter: Optional[bool] = Field(alias="resultsPerMeter", default=None) + voltage_exceptions_raw: Optional[bool] = Field( + alias="voltageExceptionsRaw", default=None + ) + + +class HcResultProcessorConfigInput(BaseModel): + metrics: Optional["HcMetricsResultsConfigInput"] = None + stored_results: Optional["HcStoredResultsConfigInput"] = Field( + alias="storedResults", default=None + ) + writer_config: Optional["HcWriterConfigInput"] = Field( + alias="writerConfig", default=None + ) + + +class HcScenarioConfigsFilterInput(BaseModel): + id: Optional[str] = None + name: Optional[str] = None + + +class HcSolveConfigInput(BaseModel): + base_frequency: Optional[int] = Field(alias="baseFrequency", default=None) + emerg_v_max_pu: Optional[float] = Field(alias="emergVMaxPu", default=None) + emerg_v_min_pu: Optional[float] = Field(alias="emergVMinPu", default=None) + max_control_iter: Optional[int] = Field(alias="maxControlIter", default=None) + max_iter: Optional[int] = Field(alias="maxIter", default=None) + mode: Optional[HcSolveMode] = None + norm_v_max_pu: Optional[float] = Field(alias="normVMaxPu", default=None) + norm_v_min_pu: Optional[float] = Field(alias="normVMinPu", default=None) + step_size_minutes: Optional[int] = Field(alias="stepSizeMinutes", default=None) + voltage_bases: Optional[list[float]] = Field(alias="voltageBases", default=None) + + +class HcStoredResultsConfigInput(BaseModel): + energy_meter_voltages_raw: Optional[bool] = Field( + alias="energyMeterVoltagesRaw", default=None + ) + energy_meters_raw: Optional[bool] = Field(alias="energyMetersRaw", default=None) + overloads_raw: Optional[bool] = Field(alias="overloadsRaw", default=None) + voltage_exceptions_raw: Optional[bool] = Field( + alias="voltageExceptionsRaw", default=None + ) + + +class HcSwitchMeterPlacementConfigInput(BaseModel): + meter_switch_class: HcSwitchClass = Field(alias="meterSwitchClass") + name_pattern: str = Field(alias="namePattern") + + +class HcWorkPackagesFilterInput(BaseModel): + created_by: Optional[list[str]] = Field(alias="createdBy", default=None) + id: Optional[str] = None + name: Optional[str] = None + search_text: Optional[str] = Field(alias="searchText", default=None) + + +class HcWorkPackagesSortCriteriaInput(BaseModel): + created_at: Optional[SortOrder] = Field(alias="createdAt", default=None) + name: Optional[SortOrder] = None + + +class HcWriterConfigInput(BaseModel): + output_writer_config: Optional["HcWriterOutputConfigInput"] = Field( + alias="outputWriterConfig", default=None + ) + writer_type: Optional[HcWriterType] = Field(alias="writerType", default=None) + + +class HcWriterOutputConfigInput(BaseModel): + enhanced_metrics_config: Optional["HcEnhancedMetricsConfigInput"] = Field( + alias="enhancedMetricsConfig", default=None + ) + + +class IngestorConfigInput(BaseModel): + key: str + value: str + + +class IngestorRunsFilterInput(BaseModel): + completed: Optional[bool] = None + container_runtime_type: Optional[list[IngestorRuntimeKind]] = Field( + alias="containerRuntimeType", default=None + ) + id: Optional[str] = None + status: Optional[list[IngestorRunState]] = None + + +class IngestorRunsSortCriteriaInput(BaseModel): + completed_at: Optional[SortOrder] = Field(alias="completedAt", default=None) + container_runtime_type: Optional[SortOrder] = Field( + alias="containerRuntimeType", default=None + ) + started_at: Optional[SortOrder] = Field(alias="startedAt", default=None) + status: Optional[SortOrder] = None + status_last_updated_at: Optional[SortOrder] = Field( + alias="statusLastUpdatedAt", default=None + ) + + +class InterventionConfigInput(BaseModel): + allocation_criteria: Optional[str] = Field(alias="allocationCriteria", default=None) + allocation_limit_per_year: Optional[int] = Field( + alias="allocationLimitPerYear", default=None + ) + base_work_package_id: str = Field(alias="baseWorkPackageId") + candidate_generation: Optional["CandidateGenerationConfigInput"] = Field( + alias="candidateGeneration", default=None + ) + dvms: Optional["DvmsConfigInput"] = None + intervention_type: InterventionClass = Field(alias="interventionType") + phase_rebalance_proportions: Optional["PhaseRebalanceProportionsInput"] = Field( + alias="phaseRebalanceProportions", default=None + ) + specific_allocation_instance: Optional[str] = Field( + alias="specificAllocationInstance", default=None + ) + year_range: Optional["YearRangeInput"] = Field(alias="yearRange", default=None) + + +class OpenDssCommonConfigInput(BaseModel): + fixed_time: Optional["FixedTimeInput"] = Field(alias="fixedTime", default=None) + time_period: Optional["TimePeriodInput"] = Field(alias="timePeriod", default=None) + + +class OpenDssModelGenerationSpecInput(BaseModel): + model_options: "OpenDssModelOptionsInput" = Field(alias="modelOptions") + modules_configuration: "OpenDssModulesConfigInput" = Field( + alias="modulesConfiguration" + ) + + +class OpenDssModelInput(BaseModel): + generation_spec: "OpenDssModelGenerationSpecInput" = Field(alias="generationSpec") + is_public: Optional[bool] = Field(alias="isPublic", default=None) + model_name: Optional[str] = Field(alias="modelName", default=None) + + +class OpenDssModelOptionsInput(BaseModel): + feeder: str + scenario: str + year: int + + +class OpenDssModulesConfigInput(BaseModel): + common: "OpenDssCommonConfigInput" + generator: Optional["HcGeneratorConfigInput"] = None + + +class PhaseRebalanceProportionsInput(BaseModel): + a: float + b: float + c: float + + +class PowerFactoryModelGenerationSpecInput(BaseModel): + distribution_transformer_config: Optional[ + "GqlDistributionTransformerConfigInput" + ] = Field(alias="distributionTransformerConfig", default=None) + equipment_container_mrids: list[str] = Field(alias="equipmentContainerMrids") + load_config: Optional["GqlLoadConfigInput"] = Field( + alias="loadConfig", default=None + ) + model_name: Optional[str] = Field(alias="modelName", default=None) + scenario_config: Optional["GqlScenarioConfigInput"] = Field( + alias="scenarioConfig", default=None + ) + + +class PowerFactoryModelInput(BaseModel): + generation_spec: "PowerFactoryModelGenerationSpecInput" = Field( + alias="generationSpec" + ) + is_public: Optional[bool] = Field(alias="isPublic", default=None) + name: Optional[str] = None + + +class ProcessedDiffFilterInput(BaseModel): + type_: Optional[str] = Field(alias="type", default=None) + w_p_id: Optional[str] = Field(alias="wPId", default=None) + + +class ProcessedDiffSortCriteriaInput(BaseModel): + type_: Optional[SortOrder] = Field(alias="type", default=None) + work_packaged_id_1: Optional[SortOrder] = Field( + alias="workPackagedId1", default=None + ) + + +class ResultSectionInput(BaseModel): + columns: Any + data: Any + description: str + name: str + type_: SectionType = Field(alias="type") + + +class SincalModelGenerationSpecInput(BaseModel): + equipment_container_mrids: Optional[list[str]] = Field( + alias="equipmentContainerMrids", default=None + ) + forecast_spec: Optional["GqlSincalModelForecastSpecInput"] = Field( + alias="forecastSpec", default=None + ) + frontend_config: str = Field(alias="frontendConfig") + + +class SincalModelInput(BaseModel): + generation_spec: "SincalModelGenerationSpecInput" = Field(alias="generationSpec") + is_public: Optional[bool] = Field(alias="isPublic", default=None) + model_name: Optional[str] = Field(alias="modelName", default=None) + + +class StateOverlayInput(BaseModel): + data: Any + styles: list[str] + + +class StudyInput(BaseModel): + description: str + name: str + results: list["StudyResultInput"] + styles: list[Any] + tags: list[str] + + +class StudyResultInput(BaseModel): + geo_json_overlay: Optional["GeoJsonOverlayInput"] = Field( + alias="geoJsonOverlay", default=None + ) + name: str + sections: list["ResultSectionInput"] + state_overlay: Optional["StateOverlayInput"] = Field( + alias="stateOverlay", default=None + ) + + +class TimePeriodInput(BaseModel): + end_time: Any = Field(alias="endTime") + overrides: Optional[list["TimePeriodLoadOverrideInput"]] = None + start_time: Any = Field(alias="startTime") + + +class TimePeriodLoadOverrideInput(BaseModel): + gen_var_override: Optional[list[float]] = Field( + alias="genVarOverride", default=None + ) + gen_watts_override: Optional[list[float]] = Field( + alias="genWattsOverride", default=None + ) + load_id: str = Field(alias="loadId") + load_var_override: Optional[list[float]] = Field( + alias="loadVarOverride", default=None + ) + load_watts_override: Optional[list[float]] = Field( + alias="loadWattsOverride", default=None + ) + + +class WorkPackageInput(BaseModel): + executor_config: Optional["HcExecutorConfigInput"] = Field( + alias="executorConfig", default=None + ) + feeder_configs: Optional["FeederConfigsInput"] = Field( + alias="feederConfigs", default=None + ) + forecast_config: Optional["ForecastConfigInput"] = Field( + alias="forecastConfig", default=None + ) + generator_config: Optional["HcGeneratorConfigInput"] = Field( + alias="generatorConfig", default=None + ) + intervention: Optional["InterventionConfigInput"] = None + quality_assurance_processing: Optional[bool] = Field( + alias="qualityAssuranceProcessing", default=None + ) + result_processor_config: Optional["HcResultProcessorConfigInput"] = Field( + alias="resultProcessorConfig", default=None + ) + + +class YearRangeInput(BaseModel): + max_year: int = Field(alias="maxYear") + min_year: int = Field(alias="minYear") + + +DvmsConfigInput.model_rebuild() +FeederConfigInput.model_rebuild() +FeederConfigsInput.model_rebuild() +FeederLoadAnalysisInput.model_rebuild() +FixedTimeInput.model_rebuild() +ForecastConfigInput.model_rebuild() +HcGeneratorConfigInput.model_rebuild() +HcMeterPlacementConfigInput.model_rebuild() +HcModelConfigInput.model_rebuild() +HcResultProcessorConfigInput.model_rebuild() +HcWriterConfigInput.model_rebuild() +HcWriterOutputConfigInput.model_rebuild() +InterventionConfigInput.model_rebuild() +OpenDssCommonConfigInput.model_rebuild() +OpenDssModelGenerationSpecInput.model_rebuild() +OpenDssModelInput.model_rebuild() +OpenDssModulesConfigInput.model_rebuild() +PowerFactoryModelGenerationSpecInput.model_rebuild() +PowerFactoryModelInput.model_rebuild() +SincalModelGenerationSpecInput.model_rebuild() +SincalModelInput.model_rebuild() +StudyInput.model_rebuild() +StudyResultInput.model_rebuild() +TimePeriodInput.model_rebuild() +WorkPackageInput.model_rebuild() diff --git a/test/test_eas_client.py b/test/test_eas_client.py index 81b7a2f..91b69e5 100644 --- a/test/test_eas_client.py +++ b/test/test_eas_client.py @@ -9,27 +9,24 @@ import string from datetime import datetime from http import HTTPStatus -from unittest import mock +import httpx import pytest import trustme from pytest_httpserver import HTTPServer from werkzeug import Response -from zepben.ewb.auth import ZepbenTokenFetcher - -from zepben.eas import EasClient, Study, SolveConfig, InterventionConfig, YearRange, CandidateGenerationConfig, CandidateGenerationType -from zepben.eas import FeederConfig, ForecastConfig, FixedTimeLoadOverride -from zepben.eas.client.ingestor import IngestorConfigInput, IngestorRunsSortCriteriaInput, IngestorRunsFilterInput, \ - IngestorRunState, IngestorRuntimeKind -from zepben.eas.client.opendss import OpenDssConfig, GetOpenDssModelsFilterInput, OpenDssModelState, \ - GetOpenDssModelsSortCriteriaInput, \ - Order -from zepben.eas.client.study import Result -from zepben.eas.client.work_package import FeederConfigs, TimePeriodLoadOverride, \ - FixedTime, NodeLevelResultsConfig, PVVoltVARVoltWattConfig, InterventionClass -from zepben.eas.client.work_package import WorkPackageConfig, TimePeriod, GeneratorConfig, ModelConfig, \ - FeederScenarioAllocationStrategy, LoadPlacement, MeterPlacementConfig, SwitchMeterPlacementConfig, SwitchClass, \ - SolveMode, RawResultsConfig + +from zepben.eas import EasClient +from zepben.eas.client.enums import OpenDssModelState +from zepben.eas.lib.generated_graphql_client import WorkPackageInput, ForecastConfigInput, TimePeriodInput, \ + FeederConfigInput, FeederConfigsInput, FixedTimeInput, FixedTimeLoadOverrideInput, TimePeriodLoadOverrideInput, \ + StudyInput, StudyResultInput, InterventionConfigInput, YearRangeInput, InterventionClass, \ + CandidateGenerationConfigInput, CandidateGenerationType, HcGeneratorConfigInput, HcModelConfigInput, \ + HcSolveConfigInput, GetOpenDssModelsFilterInput, GetOpenDssModelsSortCriteriaInput, SortOrder, IngestorConfigInput, \ + IngestorRunsFilterInput, IngestorRunState, IngestorRuntimeKind, IngestorRunsSortCriteriaInput, OpenDssModelInput, \ + OpenDssModelGenerationSpecInput, OpenDssModelOptionsInput, OpenDssModulesConfigInput, OpenDssCommonConfigInput, \ + HcFeederScenarioAllocationStrategy, HcLoadPlacement, HcMeterPlacementConfigInput, HcSwitchMeterPlacementConfigInput, \ + HcSwitchClass, HcInverterControlConfigInput, HcSolveMode, HcRawResultsConfigInput, HcNodeLevelResultsConfigInput mock_host = ''.join(random.choices(string.ascii_lowercase, k=10)) mock_port = random.randrange(1024) @@ -72,20 +69,6 @@ def test_create_eas_client_success(): assert eas_client._host == mock_host assert eas_client._port == mock_port assert eas_client._protocol == mock_protocol - assert eas_client._verify_certificate == mock_verify_certificate - - -def test_create_eas_client_with_access_token_success(): - eas_client = EasClient( - mock_host, - mock_port, - access_token=mock_access_token, - ) - - assert eas_client is not None - assert eas_client._host == mock_host - assert eas_client._port == mock_port - assert eas_client._access_token == mock_access_token def test_get_request_headers_adds_access_token_in_auth_header(): @@ -95,7 +78,7 @@ def test_get_request_headers_adds_access_token_in_auth_header(): access_token=mock_access_token, ) - headers = eas_client._get_request_headers() + headers = eas_client._gql_client.http_client.headers assert headers["authorization"] == f"Bearer {mock_access_token}" @@ -131,16 +114,16 @@ def test_get_work_package_cost_estimation_no_verify_success(httpserver: HTTPServ httpserver.expect_oneshot_request("/api/graphql").respond_with_json( {"data": {"getWorkPackageCostEstimation": "123.45"}}) res = eas_client.get_work_package_cost_estimation( - WorkPackageConfig( - "wp_name", - ForecastConfig( - ["feeder"], - [1], - ["scenario"], - TimePeriod( - datetime(2022, 1, 1, 10), - datetime(2022, 1, 2, 12), - None + WorkPackageInput( + #"wp_name", + forecastConfig=ForecastConfigInput( + feeders=["feeder"], + years=[1], + scenarios=["scenario"], + timePeriod=TimePeriodInput( + startTime=datetime(2022, 1, 1, 10), + endTime=datetime(2022, 1, 2, 12), + overrides=None ) ) ) @@ -160,18 +143,18 @@ def test_get_work_package_cost_estimation_invalid_certificate_failure(ca: trustm httpserver.expect_oneshot_request("/api/graphql").respond_with_json( {"data": {"getWorkPackageCostEstimation": "123.45"}}) - with pytest.raises(ssl.SSLError): + with pytest.raises(httpx.ConnectError): eas_client.get_work_package_cost_estimation( - WorkPackageConfig( - "wp_name", - ForecastConfig( - ["feeder"], - [1], - ["scenario"], - TimePeriod( - datetime(2022, 1, 1), - datetime(2022, 1, 2), - None + WorkPackageInput( + #"wp_name", + forecastConfig=ForecastConfigInput( + feeders=["feeder"], + years=[1], + scenarios=["scenario"], + timePeriod=TimePeriodInput( + startTime=datetime(2022, 1, 1, 10), + endTime=datetime(2022, 1, 2, 12), + overrides=None ) ) ) @@ -190,21 +173,31 @@ def test_get_work_package_cost_estimation_valid_certificate_success(ca: trustme. httpserver.expect_oneshot_request("/api/graphql").respond_with_json( {"data": {"getWorkPackageCostEstimation": "123.45"}}) res = eas_client.get_work_package_cost_estimation( - WorkPackageConfig( - "wp_name", - FeederConfigs( - [FeederConfig( - "feeder", - [1], - ["scenario"], - FixedTime( - datetime(2022, 1, 1), - {"meter": FixedTimeLoadOverride(1, 2, 3, 4)} + WorkPackageInput( + feederConfigs=FeederConfigsInput( + configs=[ + FeederConfigInput( + feeder="feeder", + years=[1], + scenarios=["scenario"], + fixedTime=FixedTimeInput( + loadTime=datetime(2022, 1, 1), + overrides=[ + FixedTimeLoadOverrideInput( + loadId="meter", + genVarOverride=[1], + genWattsOverride=[2], + loadVarOverride=[3], + loadWattsOverride=[4] + ) + ] + ) ) - )] + ] ) ) ) + httpserver.check_assertions() assert res == {"data": {"getWorkPackageCostEstimation": "123.45"}} @@ -218,19 +211,18 @@ def test_run_hosting_capacity_work_package_no_verify_success(httpserver: HTTPSer httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"data": {"runWorkPackage": "workPackageId"}}) res = eas_client.run_hosting_capacity_work_package( - WorkPackageConfig( - "wp_name", - ForecastConfig( - ["feeder"], - [1], - ["scenario"], - TimePeriod( - datetime(2022, 1, 1), - datetime(2022, 1, 2), - None + WorkPackageInput( + forecastConfig=ForecastConfigInput( + feeders=["feeder"], + years=[1], + scenarios=["scenario"], + timePeriod=TimePeriodInput( + startTime=datetime(2022, 1, 1), + endTime=datetime(2022, 1, 2), + overrides=None ) ) - ) + ), work_package_name="wp_name", ) httpserver.check_assertions() assert res == {"data": {"runWorkPackage": "workPackageId"}} @@ -247,21 +239,20 @@ def test_run_hosting_capacity_work_package_invalid_certificate_failure(ca: trust httpserver.expect_oneshot_request("/api/graphql").respond_with_json( {"data": {"runWorkPackage": "workPackageId"}}) - with pytest.raises(ssl.SSLError): + with pytest.raises(httpx.ConnectError): eas_client.run_hosting_capacity_work_package( - WorkPackageConfig( - "wp_name", - ForecastConfig( - ["feeder"], - [1], - ["scenario"], - TimePeriod( - datetime(2022, 1, 1), - datetime(2022, 1, 2), - None + WorkPackageInput( + forecastConfig=ForecastConfigInput( + feeders=["feeder"], + years=[1], + scenarios=["scenario"], + timePeriod=TimePeriodInput( + startTime=datetime(2022, 1, 1), + endTime=datetime(2022, 1, 2), + overrides=None ) ) - ) + ), work_package_name="wp_name", ) @@ -277,19 +268,26 @@ def test_run_hosting_capacity_work_package_valid_certificate_success(ca: trustme httpserver.expect_oneshot_request("/api/graphql").respond_with_json( {"data": {"runWorkPackage": "workPackageId"}}) res = eas_client.run_hosting_capacity_work_package( - WorkPackageConfig( - "wp_name", - ForecastConfig( - ["feeder"], - [1], - ["scenario"], - TimePeriod( - datetime(2022, 1, 1), - datetime(2022, 1, 2), - {"meter1": TimePeriodLoadOverride([1.0], [2.0], [3.0], [4.0])} + WorkPackageInput( + forecastConfig=ForecastConfigInput( + feeders=["feeder"], + years=[1], + scenarios=["scenario"], + timePeriod=TimePeriodInput( + startTime=datetime(2022, 1, 1), + endTime=datetime(2022, 1, 2), + overrides=[ + TimePeriodLoadOverrideInput( + loadId="meter1", + loadWattsOverride=[1.0], + genWattsOverride=[2.0], + loadVarOverride=[3.0], + genVarOverride=[4.0] + ) + ] ) ) - ) + ), work_package_name="wp_name", ) httpserver.check_assertions() assert res == {"data": {"runWorkPackage": "workPackageId"}} @@ -321,7 +319,7 @@ def test_cancel_hosting_capacity_work_package_invalid_certificate_failure(ca: tr httpserver.expect_oneshot_request("/api/graphql").respond_with_json( {"data": {"cancelWorkPackage": "workPackageId"}}) - with pytest.raises(ssl.SSLError): + with pytest.raises(httpx.ConnectError): eas_client.cancel_hosting_capacity_work_package("workPackageId") @@ -367,7 +365,7 @@ def test_get_hosting_capacity_work_package_progress_invalid_certificate_failure( httpserver.expect_oneshot_request("/api/graphql").respond_with_json( {"data": {"getWorkPackageProgress": {}}}) - with pytest.raises(ssl.SSLError): + with pytest.raises(httpx.ConnectError): eas_client.get_hosting_capacity_work_packages_progress() @@ -395,7 +393,15 @@ def test_upload_study_no_verify_success(httpserver: HTTPServer): ) httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) - res = eas_client.upload_study(Study("Test study", "description", ["tag"], [Result("Huge success")], [])) + res = eas_client.upload_study( + StudyInput( + name="Test study", + description="description", + tags=["tag"], + results=[StudyResultInput(name="Huge success", sections=[])], + styles=[] + ) + ) httpserver.check_assertions() assert res == {"result": "success"} @@ -410,8 +416,8 @@ def test_upload_study_invalid_certificate_failure(ca: trustme.CA, httpserver: HT ) httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) - with pytest.raises(ssl.SSLError): - eas_client.upload_study(Study("Test study", "description", ["tag"], [Result("Huge success")], [])) + with pytest.raises(httpx.ConnectError): + eas_client.upload_study(StudyInput(name="Test study", description="description", tags=["tag"], results=[StudyResultInput(name="Huge success", sections=[])], styles=[])) def test_upload_study_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): @@ -424,7 +430,7 @@ def test_upload_study_valid_certificate_success(ca: trustme.CA, httpserver: HTTP ) httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) - res = eas_client.upload_study(Study("Test study", "description", ["tag"], [Result("Huge success")], [])) + res = eas_client.upload_study(StudyInput(name="Test study", description="description", tags=["tag"], results=[StudyResultInput(name="Huge success", sections=[])], styles=[])) httpserver.check_assertions() assert res == {"result": "success"} @@ -433,10 +439,9 @@ def hosting_capacity_run_calibration_request_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - assert query == "mutation runCalibration($calibrationName: String!, $calibrationTimeLocal: LocalDateTime, $feeders: [String!], $generatorConfig: HcGeneratorConfigInput) { runCalibration(calibrationName: $calibrationName, calibrationTimeLocal: $calibrationTimeLocal, feeders: $feeders, generatorConfig: $generatorConfig) }" - assert actual_body['variables'] == {"calibrationName": "TEST CALIBRATION", - "calibrationTimeLocal": datetime(2025, month=7, day=12).isoformat(), - "feeders": None, 'generatorConfig': None + assert query == "mutation runCalibration($calibrationName_0: String!, $calibrationTimeLocal_0: LocalDateTime) { runCalibration( calibrationName: $calibrationName_0 calibrationTimeLocal: $calibrationTimeLocal_0 ) }" + assert actual_body['variables'] == {"calibrationName_0": "TEST CALIBRATION", + "calibrationTimeLocal_0": datetime(2025, month=7, day=12).isoformat(), } return Response(json.dumps({"result": "success"}), status=200, content_type="application/json") @@ -466,7 +471,7 @@ def test_run_hosting_capacity_calibration_invalid_certificate_failure(ca: trustm ) httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) - with pytest.raises(ssl.SSLError): + with pytest.raises(httpx.ConnectError): eas_client.run_hosting_capacity_calibration("TEST CALIBRATION", datetime(2025, month=7, day=12)) @@ -490,8 +495,8 @@ def get_hosting_capacity_run_calibration_request_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - assert query == "query getCalibrationRun($id: ID!) { getCalibrationRun(id: $id) { id name workflowId runId calibrationTimeLocal startAt completedAt status feeders calibrationWorkPackageConfig } }" - assert actual_body['variables'] == {"id": "calibration-id"} + assert query == "query getCalibrationRun($id_0: ID!) { getCalibrationRun(id: $id_0) }" + assert actual_body['variables'] == {"id_0": "calibration-id"} return Response(json.dumps({"result": "success"}), status=200, content_type="application/json") @@ -520,7 +525,7 @@ def test_get_hosting_capacity_calibration_run_invalid_certificate_failure(ca: tr ) httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) - with pytest.raises(ssl.SSLError): + with pytest.raises(httpx.ConnectError): eas_client.get_hosting_capacity_calibration_run("calibration-id") @@ -544,68 +549,17 @@ def hosting_capacity_run_calibration_with_calibration_time_request_handler(reque actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - assert query == "mutation runCalibration($calibrationName: String!, $calibrationTimeLocal: LocalDateTime, $feeders: [String!], $generatorConfig: HcGeneratorConfigInput) { runCalibration(calibrationName: $calibrationName, calibrationTimeLocal: $calibrationTimeLocal, feeders: $feeders, generatorConfig: $generatorConfig) }" - assert actual_body['variables'] == {"calibrationName": "TEST CALIBRATION", - "calibrationTimeLocal": datetime(1902, month=1, day=28, hour=0, minute=0, + assert query == "mutation runCalibration($calibrationName_0: String!, $calibrationTimeLocal_0: LocalDateTime, $feeders_0: [String!], $generatorConfig_0: HcGeneratorConfigInput) { runCalibration( calibrationName: $calibrationName_0 calibrationTimeLocal: $calibrationTimeLocal_0 feeders: $feeders_0 generatorConfig: $generatorConfig_0 ) }" + assert actual_body['variables'] == {"calibrationName_0": "TEST CALIBRATION", + "calibrationTimeLocal_0": datetime(1902, month=1, day=28, hour=0, minute=0, second=20).isoformat(), - "feeders": ["one", "two"], - "generatorConfig": { + "feeders_0": ["one", "two"], + "generatorConfig_0": { 'model': { - 'calibration': None, - 'closedLoopTimeDelay': None, - 'closedLoopVBand': None, - 'closedLoopVLimit': None, - 'closedLoopVRegEnabled': None, - 'closedLoopVRegReplaceAll': None, - 'closedLoopVRegSetPoint': None, - 'collapseLvNetworks': None, - 'collapseNegligibleImpedances': None, - 'collapseSWER': None, - 'combineCommonImpedances': None, - 'ctPrimScalingFactor': None, - 'defaultGenVar': None, - 'defaultGenWatts': None, - 'defaultLoadVar': None, - 'defaultLoadWatts': None, - 'defaultTapChangerBand': None, - 'defaultTapChangerSetPointPu': None, - 'defaultTapChangerTimeDelay': None, - 'feederScenarioAllocationStrategy': None, - 'fixOverloadingConsumers': None, - 'fixSinglePhaseLoads': None, - 'fixUndersizedServiceLines': None, - 'genVMaxPu': None, - 'genVMinPu': None, - 'inverterControlConfig': None, - 'loadIntervalLengthHours': None, - 'loadModel': None, - 'loadPlacement': None, - 'loadVMaxPu': None, - 'loadVMinPu': None, - 'maxGenTxRatio': None, - 'maxLoadLvLineRatio': None, - 'maxLoadServiceLineRatio': None, - 'maxLoadTxRatio': None, - 'maxSinglePhaseLoad': None, - 'meterPlacementConfig': None, - 'pFactorBaseExports': None, - 'pFactorBaseImports': None, - 'pFactorForecastPv': None, - 'seed': None, - 'simplifyNetwork': None, - 'useSpanLevelThreshold': False, - 'ratingThreshold': None, - 'simplifyPLSIThreshold': None, - 'emergAmpScaling': None, - 'splitPhaseDefaultLoadLossPercentage': None, - 'splitPhaseLVKV': None, - 'swerVoltageToLineVoltage': None, - 'transformerTapSettings': 'test_tap_settings', - 'vmPu': None}, - 'rawResults': None, - 'nodeLevelResults': None, - 'solve': None} + 'transformerTapSettings': 'test_tap_settings' + }, } + } return Response(json.dumps({"result": "success"}), status=200, content_type="application/json") @@ -622,8 +576,8 @@ def test_run_hosting_capacity_calibration_with_calibration_time_no_verify_succes res = eas_client.run_hosting_capacity_calibration("TEST CALIBRATION", datetime(1902, month=1, day=28, hour=0, minute=0, second=20), ["one", "two"], - generator_config=GeneratorConfig(model=ModelConfig( - transformer_tap_settings="test_tap_settings" + generator_config=HcGeneratorConfigInput(model=HcModelConfigInput( + transformerTapSettings="test_tap_settings" )) ) httpserver.check_assertions() @@ -653,78 +607,17 @@ def hosting_capacity_run_calibration_with_generator_config_request_handler(reque actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - assert query == "mutation runCalibration($calibrationName: String!, $calibrationTimeLocal: LocalDateTime, $feeders: [String!], $generatorConfig: HcGeneratorConfigInput) { runCalibration(calibrationName: $calibrationName, calibrationTimeLocal: $calibrationTimeLocal, feeders: $feeders, generatorConfig: $generatorConfig) }" - assert actual_body['variables'] == {"calibrationName": "TEST CALIBRATION", - "calibrationTimeLocal": datetime(1902, month=1, day=28, hour=0, minute=0, + assert query == "mutation runCalibration($calibrationName_0: String!, $calibrationTimeLocal_0: LocalDateTime, $feeders_0: [String!], $generatorConfig_0: HcGeneratorConfigInput) { runCalibration( calibrationName: $calibrationName_0 calibrationTimeLocal: $calibrationTimeLocal_0 feeders: $feeders_0 generatorConfig: $generatorConfig_0 ) }" + assert actual_body['variables'] == {"calibrationName_0": "TEST CALIBRATION", + "calibrationTimeLocal_0": datetime(1902, month=1, day=28, hour=0, minute=0, second=20).isoformat(), - "feeders": ["one", "two"], - "generatorConfig": { + "feeders_0": ["one", "two"], + "generatorConfig_0": { 'model': { - 'calibration': None, - 'closedLoopTimeDelay': None, - 'closedLoopVBand': None, - 'closedLoopVLimit': None, - 'closedLoopVRegEnabled': None, - 'closedLoopVRegReplaceAll': None, - 'closedLoopVRegSetPoint': None, - 'collapseLvNetworks': None, - 'collapseNegligibleImpedances': None, - 'collapseSWER': None, - 'combineCommonImpedances': None, - 'ctPrimScalingFactor': None, - 'defaultGenVar': None, - 'defaultGenWatts': None, - 'defaultLoadVar': None, - 'defaultLoadWatts': None, - 'defaultTapChangerBand': None, - 'defaultTapChangerSetPointPu': None, - 'defaultTapChangerTimeDelay': None, - 'feederScenarioAllocationStrategy': None, - 'fixOverloadingConsumers': None, - 'fixSinglePhaseLoads': None, - 'fixUndersizedServiceLines': None, - 'genVMaxPu': None, - 'genVMinPu': None, - 'inverterControlConfig': None, - 'loadIntervalLengthHours': None, - 'loadModel': None, - 'loadPlacement': None, - 'loadVMaxPu': None, - 'loadVMinPu': None, - 'maxGenTxRatio': None, - 'maxLoadLvLineRatio': None, - 'maxLoadServiceLineRatio': None, - 'maxLoadTxRatio': None, - 'maxSinglePhaseLoad': None, - 'meterPlacementConfig': None, - 'pFactorBaseExports': None, - 'pFactorBaseImports': None, - 'pFactorForecastPv': None, - 'seed': None, - 'simplifyNetwork': None, - 'useSpanLevelThreshold': False, - 'ratingThreshold': None, - 'simplifyPLSIThreshold': None, - 'emergAmpScaling': None, - 'splitPhaseDefaultLoadLossPercentage': None, - 'splitPhaseLVKV': None, - 'swerVoltageToLineVoltage': None, 'transformerTapSettings': 'test_tap_settings', - 'vmPu': None }, - 'nodeLevelResults': None, - 'rawResults': None, 'solve': { - 'baseFrequency': None, - 'emergVMaxPu': None, - 'emergVMinPu': None, - 'maxControlIter': None, - 'maxIter': None, - 'mode': None, 'normVMaxPu': 23.9, - 'normVMinPu': None, - 'stepSizeMinutes': None, - 'voltageBases': None } } } @@ -746,8 +639,8 @@ def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings datetime(1902, month=1, day=28, hour=0, minute=0, second=20), ["one", "two"], transformer_tap_settings="test_tap_settings", - generator_config=GeneratorConfig( - solve=SolveConfig(norm_vmax_pu=23.9)) + generator_config=HcGeneratorConfigInput( + solve=HcSolveConfigInput(normVMaxPu=23.9)) ) httpserver.check_assertions() assert res == {"result": "success"} @@ -757,68 +650,16 @@ def hosting_capacity_run_calibration_with_partial_model_config_request_handler(r actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - assert query == "mutation runCalibration($calibrationName: String!, $calibrationTimeLocal: LocalDateTime, $feeders: [String!], $generatorConfig: HcGeneratorConfigInput) { runCalibration(calibrationName: $calibrationName, calibrationTimeLocal: $calibrationTimeLocal, feeders: $feeders, generatorConfig: $generatorConfig) }" - assert actual_body['variables'] == {"calibrationName": "TEST CALIBRATION", - "calibrationTimeLocal": datetime(1902, month=1, day=28, hour=0, minute=0, + assert query == "mutation runCalibration($calibrationName_0: String!, $calibrationTimeLocal_0: LocalDateTime, $feeders_0: [String!], $generatorConfig_0: HcGeneratorConfigInput) { runCalibration( calibrationName: $calibrationName_0 calibrationTimeLocal: $calibrationTimeLocal_0 feeders: $feeders_0 generatorConfig: $generatorConfig_0 ) }" + assert actual_body['variables'] == {"calibrationName_0": "TEST CALIBRATION", + "calibrationTimeLocal_0": datetime(1902, month=1, day=28, hour=0, minute=0, second=20).isoformat(), - "feeders": ["one", "two"], - "generatorConfig": { + "feeders_0": ["one", "two"], + "generatorConfig_0": { 'model': { - 'calibration': None, - 'closedLoopTimeDelay': None, - 'closedLoopVBand': None, - 'closedLoopVLimit': None, - 'closedLoopVRegEnabled': None, - 'closedLoopVRegReplaceAll': None, - 'closedLoopVRegSetPoint': None, - 'collapseLvNetworks': None, - 'collapseNegligibleImpedances': None, - 'collapseSWER': None, - 'combineCommonImpedances': None, - 'ctPrimScalingFactor': None, - 'defaultGenVar': None, - 'defaultGenWatts': None, - 'defaultLoadVar': None, - 'defaultLoadWatts': None, - 'defaultTapChangerBand': None, - 'defaultTapChangerSetPointPu': None, - 'defaultTapChangerTimeDelay': None, - 'feederScenarioAllocationStrategy': None, - 'fixOverloadingConsumers': None, - 'fixSinglePhaseLoads': None, - 'fixUndersizedServiceLines': None, - 'genVMaxPu': None, - 'genVMinPu': None, - 'inverterControlConfig': None, - 'loadIntervalLengthHours': None, - 'loadModel': None, - 'loadPlacement': None, - 'loadVMaxPu': None, - 'loadVMinPu': None, - 'maxGenTxRatio': None, - 'maxLoadLvLineRatio': None, - 'maxLoadServiceLineRatio': None, - 'maxLoadTxRatio': None, - 'maxSinglePhaseLoad': None, - 'meterPlacementConfig': None, - 'pFactorBaseExports': None, - 'pFactorBaseImports': None, - 'pFactorForecastPv': None, - 'seed': None, - 'simplifyNetwork': None, - 'useSpanLevelThreshold': False, - 'ratingThreshold': None, - 'simplifyPLSIThreshold': None, - 'emergAmpScaling': None, - 'splitPhaseDefaultLoadLossPercentage': None, - 'splitPhaseLVKV': None, - 'swerVoltageToLineVoltage': None, 'transformerTapSettings': 'test_tap_settings', 'vmPu': 123.4 }, - 'nodeLevelResults': None, - 'rawResults': None, - 'solve': None } } @@ -839,7 +680,7 @@ def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings datetime(1902, month=1, day=28, hour=0, minute=0, second=20), ["one", "two"], transformer_tap_settings="test_tap_settings", - generator_config=GeneratorConfig(model=ModelConfig(vm_pu=123.4)) + generator_config=HcGeneratorConfigInput(model=HcModelConfigInput(vmPu=123.4)) ) httpserver.check_assertions() assert res == {"result": "success"} @@ -858,8 +699,8 @@ def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings datetime(1902, month=1, day=28, hour=0, minute=0, second=20), ["one", "two"], transformer_tap_settings="test_tap_settings", - generator_config=GeneratorConfig(model=ModelConfig( - transformer_tap_settings="this_should_be_over_written" + generator_config=HcGeneratorConfigInput(model=HcModelConfigInput( + transformerTapSettings="this_should_be_over_written" )) ) httpserver.check_assertions() @@ -868,11 +709,9 @@ def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings def get_hosting_capacity_calibration_sets_request_handler(request): actual_body = json.loads(request.data.decode()) - query = " ".join(actual_body['query'].split()) + query = actual_body['query'].replace('\n', '') - assert query == "query { getCalibrationSets }" - - assert "variables" not in actual_body + assert query == "query getCalibrationSets { getCalibrationSets}" return Response(json.dumps(["one", "two", "three"]), status=200, content_type="application/json") @@ -895,9 +734,9 @@ def run_opendss_export_request_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - assert query == "mutation createOpenDssModel($input: OpenDssModelInput!) { createOpenDssModel(input: $input) }" + assert query == "mutation createOpenDssModel($input_0: OpenDssModelInput!) { createOpenDssModel(input: $input_0) }" assert actual_body['variables'] == { - "input": { + "input_0": { "modelName": "TEST OPENDSS MODEL 1", "isPublic": True, "generationSpec": { @@ -908,27 +747,27 @@ def run_opendss_export_request_handler(request): }, "modulesConfiguration": { "common": { - **({"fixedTime": { + "fixedTime": { "loadTime": "2022-04-01T00:00:00", "overrides": [{ 'loadId': 'meter1', - 'loadWattsOverride': [1.0], + 'loadWattsOverride': [4.0], 'genWattsOverride': [2.0], 'loadVarOverride': [3.0], - 'genVarOverride': [4.0] + 'genVarOverride': [1.0] }] - }} if isinstance(OPENDSS_CONFIG.load_time, FixedTime) else - {"timePeriod": { + }, + "timePeriod": { "startTime": "2022-04-01T10:13:00", "endTime": "2023-04-01T12:14:00", "overrides": [{ 'loadId': 'meter1', - 'loadWattsOverride': [1.0], + 'loadWattsOverride': [4.0], 'genWattsOverride': [2.0], 'loadVarOverride': [3.0], - 'genVarOverride': [4.0] - }] - }}) + 'genVarOverride': [1.0] + }] + } }, "generator": { "model": { @@ -1043,118 +882,153 @@ def run_opendss_export_request_handler(request): return Response(json.dumps({"result": "success"}), status=200, content_type="application/json") +OPENDSS_CONFIG = OpenDssModelInput( + modelName="TEST OPENDSS MODEL 1", + isPublic=True, + generationSpec=OpenDssModelGenerationSpecInput( + modelOptions=OpenDssModelOptionsInput( + scenario="scenario1", + year=2024, + feeder="feeder1", + ), + modulesConfiguration=OpenDssModulesConfigInput( + common=OpenDssCommonConfigInput( + fixedTime= FixedTimeInput( + loadTime=datetime(2022, 4, 1), + overrides=[ + FixedTimeLoadOverrideInput( + loadId="meter1", + genVarOverride=[1.0], + genWattsOverride=[2.0], + loadVarOverride=[3.0], + loadWattsOverride=[4.0] + ) + ] + ), + timePeriod=TimePeriodInput( + startTime=datetime(2022, 4, 1, 10, 13), + endTime=datetime(2023, 4, 1, 12, 14), + overrides=[ + TimePeriodLoadOverrideInput( + loadId="meter1", + genVarOverride=[1.0], + genWattsOverride=[2.0], + loadVarOverride=[3.0], + loadWattsOverride=[4.0] + ) + ] + ), + ), + generator=HcGeneratorConfigInput( + model=HcModelConfigInput( + vmPu=1.0, + loadVMinPu=0.80, + loadVMaxPu=1.15, + genVMinPu=0.50, + genVMaxPu=2.00, + loadModel=1, + collapse_swer=False, + calibration=False, + p_factor_base_exports=0.95, + p_factor_base_imports=0.90, + p_factor_forecast_pv=1.0, + fix_single_phase_loads=True, + max_single_phase_load=30000.0, + fix_overloading_consumers=True, + max_load_tx_ratio=3.0, + max_gen_tx_ratio=10.0, + fix_undersized_service_lines=True, + max_load_service_line_ratio=1.5, + max_load_lv_line_ratio=2.0, + simplify_network=False, + collapse_lv_networks=False, + collapse_negligible_impedances=False, + combine_common_impedances=False, + feeder_scenario_allocation_strategy=HcFeederScenarioAllocationStrategy.ADDITIVE, + closed_loop_v_reg_enabled=True, + closed_loop_v_reg_replace_all=True, + closed_loop_v_reg_set_point=0.985, + closed_loop_v_band=2.0, + closed_loop_time_delay=100, + closed_loop_v_limit=1.1, + default_tap_changer_time_delay=100, + default_tap_changer_set_point_pu=1.0, + default_tap_changer_band=2.0, + split_phase_default_load_loss_percentage=0.4, + splitPhaseLVKV=0.25, + swer_voltage_to_line_voltage=[ + [230, 400], + [240, 415], + [250, 433], + [6350, 11000], + [6400, 11000], + [12700, 22000], + [19100, 33000] + ], + load_placement=HcLoadPlacement.PER_USAGE_POINT, + loadIntervalLengthHours=0.5, + meter_placement_config=HcMeterPlacementConfigInput( + feederHead=True, + distTransformers=True, + switchMeterPlacementConfigs=[ + HcSwitchMeterPlacementConfigInput( + meterSwitchClass=HcSwitchClass.LOAD_BREAK_SWITCH, + namePattern=".*" + ) + ], + energyConsumerMeterGroup="meter group 1" + ), + seed=42, + default_load_watts=[100.0, 200.0, 300.0], + default_gen_watts=[50.0, 150.0, 250.0], + default_load_var=[10.0, 20.0, 30.0], + default_gen_var=[5.0, 15.0, 25.0], + transformer_tap_settings="tap-3", + ct_prim_scaling_factor=2.0, + use_span_level_threshold=True, + rating_threshold=20.0, + simplify_plsi_threshold=20.0, + emerg_amp_scaling=1.8, + inverter_control_config=HcInverterControlConfigInput( + cut_off_date=datetime(2024, 4, 12, 11, 42), + before_cut_off_profile="beforeProfile", + after_cut_off_profile="afterProfile" + ) + ), + solve=HcSolveConfigInput( + normVMinPu=0.9, + normVMaxPu=1.054, + emergVMinPu=0.8, + emergVMaxPu=1.1, + base_frequency=50, + voltage_bases=[0.4, 0.433, 6.6, 11.0, 22.0, 33.0, 66.0, 132.0], + max_iter=25, + max_control_iter=20, + mode=HcSolveMode.YEARLY, + step_size_minutes=60 + ), + rawResults=HcRawResultsConfigInput( + energy_meter_voltages_raw=True, + energy_meters_raw=True, + results_per_meter=True, + overloads_raw=True, + voltage_exceptions_raw=True + ), + nodeLevelResults=HcNodeLevelResultsConfigInput( + collect_voltage=True, + collect_current=False, + collect_power=True, + mrids_to_collect=["mrid_one", "mrid_two"], + collect_all_switches=False, + collect_all_transformers=True, + collect_all_conductors=False, + collect_all_energy_consumers=True + ) -OPENDSS_CONFIG = OpenDssConfig( - scenario="scenario1", - year=2024, - feeder="feeder1", - load_time=TimePeriod( - datetime(2022, 4, 1, 10, 13), - datetime(2023, 4, 1, 12, 14), - {"meter1": TimePeriodLoadOverride([1.0], [2.0], [3.0], [4.0])} - ), - model_name="TEST OPENDSS MODEL 1", - generator_config=GeneratorConfig( - ModelConfig( - vm_pu=1.0, - load_vmin_pu=0.80, - load_vmax_pu=1.15, - gen_vmin_pu=0.50, - gen_vmax_pu=2.00, - load_model=1, - collapse_swer=False, - calibration=False, - p_factor_base_exports=0.95, - p_factor_base_imports=0.90, - p_factor_forecast_pv=1.0, - fix_single_phase_loads=True, - max_single_phase_load=30000.0, - fix_overloading_consumers=True, - max_load_tx_ratio=3.0, - max_gen_tx_ratio=10.0, - fix_undersized_service_lines=True, - max_load_service_line_ratio=1.5, - max_load_lv_line_ratio=2.0, - simplify_network=False, - collapse_lv_networks=False, - collapse_negligible_impedances=False, - combine_common_impedances=False, - feeder_scenario_allocation_strategy=FeederScenarioAllocationStrategy.ADDITIVE, - closed_loop_v_reg_enabled=True, - closed_loop_v_reg_replace_all=True, - closed_loop_v_reg_set_point=0.985, - closed_loop_v_band=2.0, - closed_loop_time_delay=100, - closed_loop_v_limit=1.1, - default_tap_changer_time_delay=100, - default_tap_changer_set_point_pu=1.0, - default_tap_changer_band=2.0, - split_phase_default_load_loss_percentage=0.4, - split_phase_lv_kv=0.25, - swer_voltage_to_line_voltage=[ - [230, 400], - [240, 415], - [250, 433], - [6350, 11000], - [6400, 11000], - [12700, 22000], - [19100, 33000] - ], - load_placement=LoadPlacement.PER_USAGE_POINT, - load_interval_length_hours=0.5, - meter_placement_config=MeterPlacementConfig( - True, - True, - [SwitchMeterPlacementConfig(SwitchClass.LOAD_BREAK_SWITCH, ".*")], - "meter group 1"), - seed=42, - default_load_watts=[100.0, 200.0, 300.0], - default_gen_watts=[50.0, 150.0, 250.0], - default_load_var=[10.0, 20.0, 30.0], - default_gen_var=[5.0, 15.0, 25.0], - transformer_tap_settings="tap-3", - ct_prim_scaling_factor=2.0, - use_span_level_threshold=True, - rating_threshold=20.0, - simplify_plsi_threshold=20.0, - emerg_amp_scaling=1.8, - inverter_control_config=PVVoltVARVoltWattConfig( - cut_off_date=datetime(2024, 4, 12, 11, 42), - before_cut_off_profile="beforeProfile", - after_cut_off_profile="afterProfile" ) - ), - SolveConfig( - norm_vmin_pu=0.9, - norm_vmax_pu=1.054, - emerg_vmin_pu=0.8, - emerg_vmax_pu=1.1, - base_frequency=50, - voltage_bases=[0.4, 0.433, 6.6, 11.0, 22.0, 33.0, 66.0, 132.0], - max_iter=25, - max_control_iter=20, - mode=SolveMode.YEARLY, - step_size_minutes=60 - ), - RawResultsConfig( - energy_meter_voltages_raw=True, - energy_meters_raw=True, - results_per_meter=True, - overloads_raw=True, - voltage_exceptions_raw=True - ), - NodeLevelResultsConfig( - collect_voltage=True, - collect_current=False, - collect_power=True, - mrids_to_collect=["mrid_one", "mrid_two"], - collect_all_switches=False, - collect_all_transformers=True, - collect_all_conductors=False, - collect_all_energy_consumers=True ) ), - is_public=True) +) def test_run_opendss_export_no_verify_success(httpserver: HTTPServer): @@ -1180,7 +1054,7 @@ def test_run_opendss_export_invalid_certificate_failure(ca: trustme.CA, httpserv ) httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) - with pytest.raises(ssl.SSLError): + with pytest.raises(httpx.ConnectError): eas_client.run_opendss_export(OPENDSS_CONFIG) @@ -1192,32 +1066,26 @@ def test_run_opendss_export_valid_certificate_success(ca: trustme.CA, httpserver verify_certificate=True, ca_filename=ca_filename ) - - OPENDSS_CONFIG.load_time = FixedTime(datetime(2022, 4, 1), - {"meter1": FixedTimeLoadOverride([1.0], [2.0], [3.0], [4.0])}) + dss_conf = OPENDSS_CONFIG.model_copy() + dss_conf.generation_spec.modules_configuration.common.fixed_time = FixedTimeInput(load_time=datetime(2022, 4, 1), + overrides=[ + FixedTimeLoadOverrideInput( + loadId="meter1", + genVarOverride=[1.0], + genWattsOverride=[2.0], + loadVarOverride=[3.0], + loadWattsOverride=[4.0] + ) + ]) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler(run_opendss_export_request_handler) - res = eas_client.run_opendss_export(OPENDSS_CONFIG) + res = eas_client.run_opendss_export(dss_conf) httpserver.check_assertions() assert res == {"result": "success"} get_paged_opendss_models_query = """ - query pagedOpenDssModels($limit: Int, $offset: Long, $filter: GetOpenDssModelsFilterInput, $sort: GetOpenDssModelsSortCriteriaInput) { - pagedOpenDssModels(limit: $limit, offset: $offset, filter: $filter,sort: $sort) { - totalCount - offset, - models { - id - name - createdAt - createdBy - state - downloadUrl - isPublic - errors - generationSpec - } - } + query pagedOpenDssModels($limit_0: Int, $offset_0: Long, $filter_0: GetOpenDssModelsFilterInput, $sort_0: GetOpenDssModelsSortCriteriaInput) { + pagedOpenDssModels( limit: $limit_0 offset: $offset_0 filter: $filter_0 sort: $sort_0 ) } """ @@ -1228,18 +1096,15 @@ def get_paged_opendss_models_request_handler(request): assert query == " ".join(line.strip() for line in get_paged_opendss_models_query.strip().splitlines()) assert actual_body['variables'] == { - "limit": 5, - "offset": 0, - "filter": { + "limit_0": 5, + "offset_0": 0, + "filter_0": { "name": "TEST OPENDSS MODEL 1", "isPublic": True, "state": ["COMPLETED"], }, - "sort": { + "sort_0": { "state": "ASC", - "createdAt": None, - "name": None, - "isPublic": None } } @@ -1256,8 +1121,8 @@ def test_get_paged_opendss_models_no_verify_success(httpserver: HTTPServer): httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( get_paged_opendss_models_request_handler) res = eas_client.get_paged_opendss_models( - 5, 0, GetOpenDssModelsFilterInput("TEST OPENDSS MODEL 1", True, [OpenDssModelState.COMPLETED]), - GetOpenDssModelsSortCriteriaInput(state=Order.ASC)) + 5, 0, GetOpenDssModelsFilterInput(name="TEST OPENDSS MODEL 1", isPublic=True, state=[OpenDssModelState.COMPLETED.name]), + GetOpenDssModelsSortCriteriaInput(state=SortOrder.ASC)) httpserver.check_assertions() assert res == {"result": "success"} @@ -1272,7 +1137,7 @@ def test_get_paged_opendss_models_invalid_certificate_failure(ca: trustme.CA, ht ) httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) - with pytest.raises(ssl.SSLError): + with pytest.raises(httpx.ConnectError): eas_client.get_paged_opendss_models() @@ -1280,7 +1145,7 @@ def get_paged_opendss_models_no_param_request_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - assert query == " ".join(line.strip() for line in get_paged_opendss_models_query.strip().splitlines()) + assert query == 'query pagedOpenDssModels { pagedOpenDssModels }' assert actual_body['variables'] == {} return Response(json.dumps({"result": "success"}), status=200, content_type="application/json") @@ -1331,7 +1196,7 @@ def test_get_opendss_model_download_url_invalid_certificate_failure(ca: trustme. status=HTTPStatus.FOUND, headers={"Location": "https://example.com/download/1"} )) - with pytest.raises(ssl.SSLError): + with pytest.raises(httpx.ConnectError): eas_client.get_opendss_model_download_url(1) @@ -1357,8 +1222,8 @@ def run_ingestor_request_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - assert query == "mutation executeIngestor($runConfig: [IngestorConfigInput!]) { executeIngestor(runConfig: $runConfig) }" - assert actual_body['variables'] == {'runConfig': [{'key': 'random.config', 'value': 'random.value'}, + assert query == "mutation executeIngestor($runConfig_0: [IngestorConfigInput!]) { executeIngestor(runConfig: $runConfig_0) }" + assert actual_body['variables'] == {'runConfig_0': [{'key': 'random.config', 'value': 'random.value'}, {'key': 'dataStorePath', 'value': '/some/place/with/data'}]} return Response(json.dumps({"executeIngestor": 5}), status=200, content_type="application/json") @@ -1373,8 +1238,8 @@ def test_run_ingestor_no_verify_success(httpserver: HTTPServer): httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( run_ingestor_request_handler) - res = eas_client.run_ingestor([IngestorConfigInput("random.config", "random.value"), - IngestorConfigInput("dataStorePath", "/some/place/with/data")]) + res = eas_client.run_ingestor([IngestorConfigInput(key="random.config", value="random.value"), + IngestorConfigInput(key="dataStorePath", value="/some/place/with/data")]) httpserver.check_assertions() assert res == {"executeIngestor": 5} @@ -1383,8 +1248,8 @@ def get_ingestor_run_request_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - assert query == "query getIngestorRun($id: Int!) { getIngestorRun(id: $id) { id containerRuntimeType, payload, token, status, startedAt, statusLastUpdatedAt, completedAt } }" - assert actual_body['variables'] == {"id": 1} + assert query == "query getIngestorRun($id_0: Int!) { getIngestorRun(id: $id_0) }" + assert actual_body['variables'] == {"id_0": 1} return Response(json.dumps({"result": "success"}), status=200, content_type="application/json") @@ -1406,20 +1271,7 @@ def get_ingestor_run_list_request_empty_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - get_ingestor_run_list_query = """ - query listIngestorRuns($filter: IngestorRunsFilterInput, $sort: IngestorRunsSortCriteriaInput) { - listIngestorRuns(filter: $filter, sort: $sort) { - id - containerRuntimeType, - payload, - token, - status, - startedAt, - statusLastUpdatedAt, - completedAt - } - } - """ + get_ingestor_run_list_query = """query listIngestorRuns { listIngestorRuns }""" assert query == " ".join(line.strip() for line in get_ingestor_run_list_query.strip().splitlines()) assert actual_body['variables'] == {} @@ -1444,28 +1296,19 @@ def get_ingestor_run_list_request_complete_handler(request): query = " ".join(actual_body['query'].split()) get_ingestor_run_list_query = """ - query listIngestorRuns($filter: IngestorRunsFilterInput, $sort: IngestorRunsSortCriteriaInput) { - listIngestorRuns(filter: $filter, sort: $sort) { - id - containerRuntimeType, - payload, - token, - status, - startedAt, - statusLastUpdatedAt, - completedAt - } + query listIngestorRuns($filter_0: IngestorRunsFilterInput, $sort_0: IngestorRunsSortCriteriaInput) { + listIngestorRuns(filter: $filter_0, sort: $sort_0) } """ assert query == " ".join(line.strip() for line in get_ingestor_run_list_query.strip().splitlines()) assert actual_body['variables'] == { - "filter": { - "id": 4, + "filter_0": { + "id": '4', "status": ["SUCCESS", "STARTED", "FAILED_TO_START"], "completed": True, "containerRuntimeType": ["TEMPORAL_KUBERNETES", "AZURE_CONTAINER_APP_JOB"] }, - "sort": { + "sort_0": { "status": "ASC", "startedAt": "DESC", "statusLastUpdatedAt": "ASC", @@ -1488,18 +1331,18 @@ def test_get_ingestor_run_list_all_filters_no_verify_success(httpserver: HTTPSer get_ingestor_run_list_request_complete_handler) res = eas_client.get_ingestor_run_list( query_filter=IngestorRunsFilterInput( - id=4, + id='4', status=[IngestorRunState.SUCCESS, IngestorRunState.STARTED, IngestorRunState.FAILED_TO_START], completed=True, - container_runtime_type=[IngestorRuntimeKind.TEMPORAL_KUBERNETES, + containerRuntimeType=[IngestorRuntimeKind.TEMPORAL_KUBERNETES, IngestorRuntimeKind.AZURE_CONTAINER_APP_JOB] ), query_sort=IngestorRunsSortCriteriaInput( - status=Order.ASC, - started_at=Order.DESC, - status_last_updated_at=Order.ASC, - completed_at=Order.DESC, - container_runtime_type=Order.ASC + status=SortOrder.ASC, + startedAt=SortOrder.DESC, + statusLastUpdatedAt=SortOrder.ASC, + completedAt=SortOrder.DESC, + containerRuntimeType=SortOrder.ASC ) ) httpserver.check_assertions() @@ -1513,24 +1356,18 @@ def test_work_package_config_to_json_omits_server_defaulted_fields_if_unspecifie verify_certificate=False ) - wp_config = WorkPackageConfig( - name="wp", - syf_config=FeederConfigs([]), - intervention=InterventionConfig( - base_work_package_id="abc", - intervention_type=InterventionClass.COMMUNITY_BESS + wp_config = WorkPackageInput( + feederConfigs=FeederConfigsInput(configs=[]), + intervention=InterventionConfigInput( + baseWorkPackageId="abc", + interventionType=InterventionClass.COMMUNITY_BESS ) ) - json_config = eas_client.work_package_config_to_json(wp_config) + json_config = wp_config.model_dump_json(by_alias=True, exclude_defaults=True) - assert json_config["intervention"] == { + assert json.loads(json_config)['intervention'] == { "baseWorkPackageId": "abc", "interventionType": "COMMUNITY_BESS", - "candidateGeneration": None, - "allocationCriteria": None, - "specificAllocationInstance": None, - "phaseRebalanceProportions": None, - "dvms": None } def test_work_package_config_to_json_includes_server_defaulted_fields_if_specified(httpserver: HTTPServer): @@ -1540,19 +1377,18 @@ def test_work_package_config_to_json_includes_server_defaulted_fields_if_specifi verify_certificate=False ) - wp_config = WorkPackageConfig( - name="wp", - syf_config=FeederConfigs([]), - intervention=InterventionConfig( - base_work_package_id="abc", - year_range=YearRange(2020, 2025), - intervention_type=InterventionClass.COMMUNITY_BESS, - allocation_limit_per_year=5 + wp_config = WorkPackageInput( + feederConfigs=FeederConfigsInput(configs=[]), + intervention=InterventionConfigInput( + baseWorkPackageId="abc", + yearRange=YearRangeInput(minYear=2020, maxYear=2025), + interventionType=InterventionClass.COMMUNITY_BESS, + allocationLimitPerYear=5 ) ) - json_config = eas_client.work_package_config_to_json(wp_config) + json_config = wp_config.model_dump_json(by_alias=True) - assert json_config["intervention"] == { + assert json.loads(json_config)['intervention'] == { "baseWorkPackageId": "abc", "yearRange": { "maxYear": 2025, @@ -1574,45 +1410,52 @@ def test_work_package_config_to_json_for_tap_optimization(httpserver: HTTPServer verify_certificate=False ) - wp_config = WorkPackageConfig( - name="wp", - syf_config=FeederConfigs([]), - intervention=InterventionConfig( - base_work_package_id="abc", - year_range=YearRange(2020, 2025), - intervention_type=InterventionClass.DISTRIBUTION_TAP_OPTIMIZATION, - allocation_limit_per_year=5, - candidate_generation=CandidateGenerationConfig( + wp_config = WorkPackageInput( + feederConfigs=FeederConfigsInput(configs=[]), + intervention=InterventionConfigInput( + baseWorkPackageId="abc", + yearRange=YearRangeInput(minYear=2020, maxYear=2025), + interventionType=InterventionClass.DISTRIBUTION_TAP_OPTIMIZATION, + allocationLimitPerYear=5, + candidateGeneration=CandidateGenerationConfigInput( type=CandidateGenerationType.TAP_OPTIMIZATION, - average_voltage_spread_threshold=40, - voltage_under_limit_hours_threshold=1, - voltage_over_limit_hours_threshold=2, - tap_weighting_factor_lower_threshold=-0.3, - tap_weighting_factor_upper_threshold=0.4 + averageVoltageSpreadThreshold=40, + voltageUnderLimitHoursThreshold=1, + voltageOverLimitHoursThreshold=2, + tapWeightingFactorLowerThreshold=-0.3, + tapWeightingFactorUpperThreshold=0.4 ) ) ) - json_config = eas_client.work_package_config_to_json(wp_config) - - assert json_config["intervention"] == { - "baseWorkPackageId": "abc", - "yearRange": { - "maxYear": 2025, - "minYear": 2020 - }, - "interventionType": "DISTRIBUTION_TAP_OPTIMIZATION", - "candidateGeneration": { - "type": "TAP_OPTIMIZATION", - "interventionCriteriaName": None, - "averageVoltageSpreadThreshold": 40, - "voltageUnderLimitHoursThreshold": 1, - "voltageOverLimitHoursThreshold": 2, - "tapWeightingFactorLowerThreshold": -0.3, - "tapWeightingFactorUpperThreshold": 0.4, + json_config = wp_config.model_dump_json(by_alias=True) + + assert json.loads(json_config) == { + "executorConfig": None, + "feederConfigs": {"configs": []}, + "forecastConfig": None, + "generatorConfig": None, + "intervention": { + "baseWorkPackageId": "abc", + "yearRange": { + "maxYear": 2025, + "minYear": 2020 + }, + "interventionType": "DISTRIBUTION_TAP_OPTIMIZATION", + "candidateGeneration": { + "type": "TAP_OPTIMIZATION", + "interventionCriteriaName": None, + "averageVoltageSpreadThreshold": 40, + "voltageUnderLimitHoursThreshold": 1, + "voltageOverLimitHoursThreshold": 2, + "tapWeightingFactorLowerThreshold": -0.3, + "tapWeightingFactorUpperThreshold": 0.4, + }, + "allocationCriteria": None, + "specificAllocationInstance": None, + "phaseRebalanceProportions": None, + "dvms": None, + "allocationLimitPerYear": 5 }, - "allocationCriteria": None, - "specificAllocationInstance": None, - "phaseRebalanceProportions": None, - "dvms": None, - "allocationLimitPerYear": 5 + "qualityAssuranceProcessing": None, + "resultProcessorConfig": None, } diff --git a/test/test_feeder_load_analysis_input.py b/test/test_feeder_load_analysis_input.py index 402734f..e15cab7 100644 --- a/test/test_feeder_load_analysis_input.py +++ b/test/test_feeder_load_analysis_input.py @@ -3,25 +3,24 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. - -from zepben.eas.client.feeder_load_analysis_input import FeederLoadAnalysisInput -from zepben.eas.client.fla_forecast_config import FlaForecastConfig +from zepben.eas.lib.generated_graphql_client import FeederLoadAnalysisInput, FlaForecastConfigInput def test_feeder_load_analysis_constructor(): feeder_load_analysis_input = FeederLoadAnalysisInput( + aggregate_at_feeder_level=False, + end_date="2022-12-31", feeders=["feeder123"], substations=["sub1"], sub_geographical_regions=["sgr1"], geographical_regions=["gr1"], start_date="2022-04-01", - end_date="2022-12-31", fetch_lv_network=True, process_feeder_loads=True, process_coincident_loads=True, - aggregate_at_feeder_level=False, + produce_conductor_report=False, output="Test", - fla_forecast_config=FlaForecastConfig( + fla_forecast_config=FlaForecastConfigInput( scenario_id="1", year=2030, pv_upgrade_threshold=8000, From b0c941438bc7da80cba5bb8e16ccfa15f7bcde7e Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Wed, 4 Mar 2026 02:01:22 +1100 Subject: [PATCH 02/32] license date, some tests, much tidy Signed-off-by: Max Chesterfield --- src/zepben/eas/__init__.py | 3 +- src/zepben/eas/client/__init__.py | 2 +- src/zepben/eas/client/eas_client.py | 2 +- src/zepben/eas/client/enums.py | 2 +- .../eas/client/patched_generated_client.py | 4 + test/test_eas_client.py | 567 +++++++----------- test/test_feeder_load_analysis_input.py | 2 +- test/test_patched_client.py | 30 + 8 files changed, 267 insertions(+), 345 deletions(-) create mode 100644 test/test_patched_client.py diff --git a/src/zepben/eas/__init__.py b/src/zepben/eas/__init__.py index b4ff019..71a7abc 100644 --- a/src/zepben/eas/__init__.py +++ b/src/zepben/eas/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 Zeppelin Bend Pty Ltd +# Copyright 2026 Zeppelin Bend Pty Ltd # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this @@ -6,3 +6,4 @@ # from zepben.eas.client.eas_client import * +from zepben.eas.client.enums import * diff --git a/src/zepben/eas/client/__init__.py b/src/zepben/eas/client/__init__.py index 78960ad..189c70a 100644 --- a/src/zepben/eas/client/__init__.py +++ b/src/zepben/eas/client/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 Zeppelin Bend Pty Ltd +# Copyright 2026 Zeppelin Bend Pty Ltd # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this diff --git a/src/zepben/eas/client/eas_client.py b/src/zepben/eas/client/eas_client.py index efed17a..e31e3c6 100644 --- a/src/zepben/eas/client/eas_client.py +++ b/src/zepben/eas/client/eas_client.py @@ -1,4 +1,4 @@ -# Copyright 2025 Zeppelin Bend Pty Ltd +# Copyright 2026 Zeppelin Bend Pty Ltd # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this diff --git a/src/zepben/eas/client/enums.py b/src/zepben/eas/client/enums.py index f42063c..0499f03 100644 --- a/src/zepben/eas/client/enums.py +++ b/src/zepben/eas/client/enums.py @@ -13,4 +13,4 @@ class OpenDssModelState(Enum): COULD_NOT_START = 'COULD_NOT_START' CREATION = 'CREATION' COMPLETED = 'COMPLETED' - FAILED = 'FAILED' \ No newline at end of file + FAILED = 'FAILED' diff --git a/src/zepben/eas/client/patched_generated_client.py b/src/zepben/eas/client/patched_generated_client.py index 728a16b..083afac 100644 --- a/src/zepben/eas/client/patched_generated_client.py +++ b/src/zepben/eas/client/patched_generated_client.py @@ -3,6 +3,7 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. + from typing import Any, cast import httpx @@ -13,6 +14,9 @@ class PatchedClient(Client): + """ + Used to override autogenerated client code, rather than having to maintain a diff to be applied after generating. + """ def get_data(self, response: httpx.Response) -> dict[str, Any]: if not response.is_success: diff --git a/test/test_eas_client.py b/test/test_eas_client.py index 91b69e5..fd56cef 100644 --- a/test/test_eas_client.py +++ b/test/test_eas_client.py @@ -1,4 +1,4 @@ -# Copyright 2022 Zeppelin Bend Pty Ltd +# Copyright 2026 Zeppelin Bend Pty Ltd # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this @@ -132,74 +132,61 @@ def test_get_work_package_cost_estimation_no_verify_success(httpserver: HTTPServ assert res == {"data": {"getWorkPackageCostEstimation": "123.45"}} -def test_get_work_package_cost_estimation_invalid_certificate_failure(ca: trustme.CA, httpserver: HTTPServer): - with trustme.Blob(b"invalid ca").tempfile() as ca_filename: - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, - ca_filename=ca_filename - ) +def test_get_work_package_cost_estimation_invalid_certificate_failure(httpserver: HTTPServer): + eas_client = _invalid_ca(httpserver.port) - httpserver.expect_oneshot_request("/api/graphql").respond_with_json( - {"data": {"getWorkPackageCostEstimation": "123.45"}}) - with pytest.raises(httpx.ConnectError): - eas_client.get_work_package_cost_estimation( - WorkPackageInput( - #"wp_name", - forecastConfig=ForecastConfigInput( - feeders=["feeder"], - years=[1], - scenarios=["scenario"], - timePeriod=TimePeriodInput( - startTime=datetime(2022, 1, 1, 10), - endTime=datetime(2022, 1, 2, 12), - overrides=None - ) + httpserver.expect_oneshot_request("/api/graphql").respond_with_json( + {"data": {"getWorkPackageCostEstimation": "123.45"}}) + with pytest.raises(httpx.ConnectError): + eas_client.get_work_package_cost_estimation( + WorkPackageInput( + forecastConfig=ForecastConfigInput( + feeders=["feeder"], + years=[1], + scenarios=["scenario"], + timePeriod=TimePeriodInput( + startTime=datetime(2022, 1, 1, 10), + endTime=datetime(2022, 1, 2, 12), + overrides=None ) ) ) + ) -def test_get_work_package_cost_estimation_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): - with ca.cert_pem.tempfile() as ca_filename: - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, - ca_filename=ca_filename - ) +def test_get_work_package_cost_estimation_valid_certificate_success(httpserver: HTTPServer, ca: trustme.CA): + eas_client = _valid_ca(httpserver.port, ca) - httpserver.expect_oneshot_request("/api/graphql").respond_with_json( - {"data": {"getWorkPackageCostEstimation": "123.45"}}) - res = eas_client.get_work_package_cost_estimation( - WorkPackageInput( - feederConfigs=FeederConfigsInput( - configs=[ - FeederConfigInput( - feeder="feeder", - years=[1], - scenarios=["scenario"], - fixedTime=FixedTimeInput( - loadTime=datetime(2022, 1, 1), - overrides=[ - FixedTimeLoadOverrideInput( - loadId="meter", - genVarOverride=[1], - genWattsOverride=[2], - loadVarOverride=[3], - loadWattsOverride=[4] - ) - ] - ) + httpserver.expect_oneshot_request("/api/graphql").respond_with_json( + {"data": {"getWorkPackageCostEstimation": "123.45"}}) + res = eas_client.get_work_package_cost_estimation( + WorkPackageInput( + feederConfigs=FeederConfigsInput( + configs=[ + FeederConfigInput( + feeder="feeder", + years=[1], + scenarios=["scenario"], + fixedTime=FixedTimeInput( + loadTime=datetime(2022, 1, 1), + overrides=[ + FixedTimeLoadOverrideInput( + loadId="meter", + genVarOverride=[1], + genWattsOverride=[2], + loadVarOverride=[3], + loadWattsOverride=[4] + ) + ] ) - ] - ) + ) + ] ) ) + ) - httpserver.check_assertions() - assert res == {"data": {"getWorkPackageCostEstimation": "123.45"}} + httpserver.check_assertions() + assert res == {"data": {"getWorkPackageCostEstimation": "123.45"}} def test_run_hosting_capacity_work_package_no_verify_success(httpserver: HTTPServer): @@ -228,46 +215,13 @@ def test_run_hosting_capacity_work_package_no_verify_success(httpserver: HTTPSer assert res == {"data": {"runWorkPackage": "workPackageId"}} -def test_run_hosting_capacity_work_package_invalid_certificate_failure(ca: trustme.CA, httpserver: HTTPServer): - with trustme.Blob(b"invalid ca").tempfile() as ca_filename: - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, - ca_filename=ca_filename - ) - - httpserver.expect_oneshot_request("/api/graphql").respond_with_json( - {"data": {"runWorkPackage": "workPackageId"}}) - with pytest.raises(httpx.ConnectError): - eas_client.run_hosting_capacity_work_package( - WorkPackageInput( - forecastConfig=ForecastConfigInput( - feeders=["feeder"], - years=[1], - scenarios=["scenario"], - timePeriod=TimePeriodInput( - startTime=datetime(2022, 1, 1), - endTime=datetime(2022, 1, 2), - overrides=None - ) - ) - ), work_package_name="wp_name", - ) - - -def test_run_hosting_capacity_work_package_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): - with ca.cert_pem.tempfile() as ca_filename: - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, - ca_filename=ca_filename - ) +def test_run_hosting_capacity_work_package_invalid_certificate_failure(httpserver: HTTPServer): + eas_client = _invalid_ca(httpserver.port) - httpserver.expect_oneshot_request("/api/graphql").respond_with_json( - {"data": {"runWorkPackage": "workPackageId"}}) - res = eas_client.run_hosting_capacity_work_package( + httpserver.expect_oneshot_request("/api/graphql").respond_with_json( + {"data": {"runWorkPackage": "workPackageId"}}) + with pytest.raises(httpx.ConnectError): + eas_client.run_hosting_capacity_work_package( WorkPackageInput( forecastConfig=ForecastConfigInput( feeders=["feeder"], @@ -276,21 +230,42 @@ def test_run_hosting_capacity_work_package_valid_certificate_success(ca: trustme timePeriod=TimePeriodInput( startTime=datetime(2022, 1, 1), endTime=datetime(2022, 1, 2), - overrides=[ - TimePeriodLoadOverrideInput( - loadId="meter1", - loadWattsOverride=[1.0], - genWattsOverride=[2.0], - loadVarOverride=[3.0], - genVarOverride=[4.0] - ) - ] + overrides=None ) ) ), work_package_name="wp_name", ) - httpserver.check_assertions() - assert res == {"data": {"runWorkPackage": "workPackageId"}} + + +def test_run_hosting_capacity_work_package_valid_certificate_success(httpserver: HTTPServer, ca: trustme.CA): + eas_client = _valid_ca(httpserver.port, ca) + + httpserver.expect_oneshot_request("/api/graphql").respond_with_json( + {"data": {"runWorkPackage": "workPackageId"}}) + res = eas_client.run_hosting_capacity_work_package( + WorkPackageInput( + forecastConfig=ForecastConfigInput( + feeders=["feeder"], + years=[1], + scenarios=["scenario"], + timePeriod=TimePeriodInput( + startTime=datetime(2022, 1, 1), + endTime=datetime(2022, 1, 2), + overrides=[ + TimePeriodLoadOverrideInput( + loadId="meter1", + loadWattsOverride=[1.0], + genWattsOverride=[2.0], + loadVarOverride=[3.0], + genVarOverride=[4.0] + ) + ] + ) + ) + ), work_package_name="wp_name", + ) + httpserver.check_assertions() + assert res == {"data": {"runWorkPackage": "workPackageId"}} def test_cancel_hosting_capacity_work_package_no_verify_success(httpserver: HTTPServer): @@ -308,35 +283,23 @@ def test_cancel_hosting_capacity_work_package_no_verify_success(httpserver: HTTP assert res == {"data": {"cancelHostingCapacity": "workPackageId"}} -def test_cancel_hosting_capacity_work_package_invalid_certificate_failure(ca: trustme.CA, httpserver: HTTPServer): - with trustme.Blob(b"invalid ca").tempfile() as ca_filename: - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, - ca_filename=ca_filename - ) +def test_cancel_hosting_capacity_work_package_invalid_certificate_failure(httpserver: HTTPServer): + eas_client = _invalid_ca(httpserver.port) - httpserver.expect_oneshot_request("/api/graphql").respond_with_json( - {"data": {"cancelWorkPackage": "workPackageId"}}) - with pytest.raises(httpx.ConnectError): - eas_client.cancel_hosting_capacity_work_package("workPackageId") + httpserver.expect_oneshot_request("/api/graphql").respond_with_json( + {"data": {"cancelWorkPackage": "workPackageId"}}) + with pytest.raises(httpx.ConnectError): + eas_client.cancel_hosting_capacity_work_package("workPackageId") -def test_cancel_hosting_capacity_work_package_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): - with ca.cert_pem.tempfile() as ca_filename: - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, - ca_filename=ca_filename - ) +def test_cancel_hosting_capacity_work_package_valid_certificate_success(httpserver: HTTPServer, ca: trustme.CA): + eas_client = _valid_ca(httpserver.port, ca) - httpserver.expect_oneshot_request("/api/graphql").respond_with_json( - {"data": {"cancelWorkPackage": "workPackageId"}}) - res = eas_client.cancel_hosting_capacity_work_package("workPackageId") - httpserver.check_assertions() - assert res == {"data": {"cancelWorkPackage": "workPackageId"}} + httpserver.expect_oneshot_request("/api/graphql").respond_with_json( + {"data": {"cancelWorkPackage": "workPackageId"}}) + res = eas_client.cancel_hosting_capacity_work_package("workPackageId") + httpserver.check_assertions() + assert res == {"data": {"cancelWorkPackage": "workPackageId"}} def test_get_hosting_capacity_work_package_progress_no_verify_success(httpserver: HTTPServer): @@ -354,35 +317,23 @@ def test_get_hosting_capacity_work_package_progress_no_verify_success(httpserver assert res == {"data": {"getWorkPackageProgress": {}}} -def test_get_hosting_capacity_work_package_progress_invalid_certificate_failure(ca: trustme.CA, httpserver: HTTPServer): - with trustme.Blob(b"invalid ca").tempfile() as ca_filename: - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, - ca_filename=ca_filename - ) +def test_get_hosting_capacity_work_package_progress_invalid_certificate_failure(httpserver: HTTPServer): + eas_client = _invalid_ca(httpserver.port) - httpserver.expect_oneshot_request("/api/graphql").respond_with_json( - {"data": {"getWorkPackageProgress": {}}}) - with pytest.raises(httpx.ConnectError): - eas_client.get_hosting_capacity_work_packages_progress() + httpserver.expect_oneshot_request("/api/graphql").respond_with_json( + {"data": {"getWorkPackageProgress": {}}}) + with pytest.raises(httpx.ConnectError): + eas_client.get_hosting_capacity_work_packages_progress() -def test_get_hosting_capacity_work_package_progress_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): - with ca.cert_pem.tempfile() as ca_filename: - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, - ca_filename=ca_filename - ) +def test_get_hosting_capacity_work_package_progress_valid_certificate_success(httpserver: HTTPServer, ca: trustme.CA): + eas_client = _valid_ca(httpserver.port, ca) - httpserver.expect_oneshot_request("/api/graphql").respond_with_json( - {"data": {"getWorkPackageProgress": {}}}) - res = eas_client.get_hosting_capacity_work_packages_progress() - httpserver.check_assertions() - assert res == {"data": {"getWorkPackageProgress": {}}} + httpserver.expect_oneshot_request("/api/graphql").respond_with_json( + {"data": {"getWorkPackageProgress": {}}}) + res = eas_client.get_hosting_capacity_work_packages_progress() + httpserver.check_assertions() + assert res == {"data": {"getWorkPackageProgress": {}}} def test_upload_study_no_verify_success(httpserver: HTTPServer): @@ -406,33 +357,21 @@ def test_upload_study_no_verify_success(httpserver: HTTPServer): assert res == {"result": "success"} -def test_upload_study_invalid_certificate_failure(ca: trustme.CA, httpserver: HTTPServer): - with trustme.Blob(b"invalid ca").tempfile() as ca_filename: - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, - ca_filename=ca_filename - ) +def test_upload_study_invalid_certificate_failure(httpserver: HTTPServer): + eas_client = _invalid_ca(httpserver.port) - httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) - with pytest.raises(httpx.ConnectError): - eas_client.upload_study(StudyInput(name="Test study", description="description", tags=["tag"], results=[StudyResultInput(name="Huge success", sections=[])], styles=[])) + httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) + with pytest.raises(httpx.ConnectError): + eas_client.upload_study(StudyInput(name="Test study", description="description", tags=["tag"], results=[StudyResultInput(name="Huge success", sections=[])], styles=[])) -def test_upload_study_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): - with ca.cert_pem.tempfile() as ca_filename: - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, - ca_filename=ca_filename - ) +def test_upload_study_valid_certificate_success(httpserver: HTTPServer, ca: trustme.CA): + eas_client = _valid_ca(httpserver.port, ca) - httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) - res = eas_client.upload_study(StudyInput(name="Test study", description="description", tags=["tag"], results=[StudyResultInput(name="Huge success", sections=[])], styles=[])) - httpserver.check_assertions() - assert res == {"result": "success"} + httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) + res = eas_client.upload_study(StudyInput(name="Test study", description="description", tags=["tag"], results=[StudyResultInput(name="Huge success", sections=[])], styles=[])) + httpserver.check_assertions() + assert res == {"result": "success"} def hosting_capacity_run_calibration_request_handler(request): @@ -461,34 +400,22 @@ def test_run_hosting_capacity_calibration_no_verify_success(httpserver: HTTPServ assert res == {"result": "success"} -def test_run_hosting_capacity_calibration_invalid_certificate_failure(ca: trustme.CA, httpserver: HTTPServer): - with trustme.Blob(b"invalid ca").tempfile() as ca_filename: - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, - ca_filename=ca_filename - ) +def test_run_hosting_capacity_calibration_invalid_certificate_failure(httpserver: HTTPServer): + eas_client = _invalid_ca(httpserver.port) - httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) - with pytest.raises(httpx.ConnectError): - eas_client.run_hosting_capacity_calibration("TEST CALIBRATION", datetime(2025, month=7, day=12)) + httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) + with pytest.raises(httpx.ConnectError): + eas_client.run_hosting_capacity_calibration("TEST CALIBRATION", datetime(2025, month=7, day=12)) -def test_run_hosting_capacity_calibration_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): - with ca.cert_pem.tempfile() as ca_filename: - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, - ca_filename=ca_filename - ) +def test_run_hosting_capacity_calibration_valid_certificate_success(httpserver: HTTPServer, ca: trustme.CA): + eas_client = _valid_ca(httpserver.port, ca) - httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( - hosting_capacity_run_calibration_request_handler) - res = eas_client.run_hosting_capacity_calibration("TEST CALIBRATION", datetime(2025, month=7, day=12)) - httpserver.check_assertions() - assert res == {"result": "success"} + httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( + hosting_capacity_run_calibration_request_handler) + res = eas_client.run_hosting_capacity_calibration("TEST CALIBRATION", datetime(2025, month=7, day=12)) + httpserver.check_assertions() + assert res == {"result": "success"} def get_hosting_capacity_run_calibration_request_handler(request): @@ -515,34 +442,22 @@ def test_get_hosting_capacity_calibration_run_no_verify_success(httpserver: HTTP assert res == {"result": "success"} -def test_get_hosting_capacity_calibration_run_invalid_certificate_failure(ca: trustme.CA, httpserver: HTTPServer): - with trustme.Blob(b"invalid ca").tempfile() as ca_filename: - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, - ca_filename=ca_filename - ) +def test_get_hosting_capacity_calibration_run_invalid_certificate_failure(httpserver: HTTPServer): + eas_client = _invalid_ca(httpserver.port) - httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) - with pytest.raises(httpx.ConnectError): - eas_client.get_hosting_capacity_calibration_run("calibration-id") + httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) + with pytest.raises(httpx.ConnectError): + eas_client.get_hosting_capacity_calibration_run("calibration-id") -def test_get_hosting_capacity_calibration_run_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): - with ca.cert_pem.tempfile() as ca_filename: - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, - ca_filename=ca_filename - ) +def test_get_hosting_capacity_calibration_run_valid_certificate_success(httpserver: HTTPServer, ca: trustme.CA): + eas_client = _valid_ca(httpserver.port, ca) - httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( - get_hosting_capacity_run_calibration_request_handler) - res = eas_client.get_hosting_capacity_calibration_run("calibration-id") - httpserver.check_assertions() - assert res == {"result": "success"} + httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( + get_hosting_capacity_run_calibration_request_handler) + res = eas_client.get_hosting_capacity_calibration_run("calibration-id") + httpserver.check_assertions() + assert res == {"result": "success"} def hosting_capacity_run_calibration_with_calibration_time_request_handler(request): @@ -585,7 +500,8 @@ def test_run_hosting_capacity_calibration_with_calibration_time_no_verify_succes def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings_no_generator_config( - httpserver: HTTPServer): + httpserver: HTTPServer +): eas_client = EasClient( LOCALHOST, httpserver.port, @@ -626,7 +542,8 @@ def hosting_capacity_run_calibration_with_generator_config_request_handler(reque def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings_partial_generator_config( - httpserver: HTTPServer): + httpserver: HTTPServer +): eas_client = EasClient( LOCALHOST, httpserver.port, @@ -667,7 +584,8 @@ def hosting_capacity_run_calibration_with_partial_model_config_request_handler(r def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings_partial_model_config( - httpserver: HTTPServer): + httpserver: HTTPServer +): eas_client = EasClient( LOCALHOST, httpserver.port, @@ -1044,43 +962,32 @@ def test_run_opendss_export_no_verify_success(httpserver: HTTPServer): assert res == {"result": "success"} -def test_run_opendss_export_invalid_certificate_failure(ca: trustme.CA, httpserver: HTTPServer): - with trustme.Blob(b"invalid ca").tempfile() as ca_filename: - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, - ca_filename=ca_filename - ) +def test_run_opendss_export_invalid_certificate_failure(httpserver: HTTPServer): + eas_client = _invalid_ca(httpserver.port) - httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) - with pytest.raises(httpx.ConnectError): - eas_client.run_opendss_export(OPENDSS_CONFIG) - - -def test_run_opendss_export_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): - with ca.cert_pem.tempfile() as ca_filename: - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, - ca_filename=ca_filename - ) - dss_conf = OPENDSS_CONFIG.model_copy() - dss_conf.generation_spec.modules_configuration.common.fixed_time = FixedTimeInput(load_time=datetime(2022, 4, 1), - overrides=[ - FixedTimeLoadOverrideInput( - loadId="meter1", - genVarOverride=[1.0], - genWattsOverride=[2.0], - loadVarOverride=[3.0], - loadWattsOverride=[4.0] - ) - ]) - httpserver.expect_oneshot_request("/api/graphql").respond_with_handler(run_opendss_export_request_handler) - res = eas_client.run_opendss_export(dss_conf) - httpserver.check_assertions() - assert res == {"result": "success"} + httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) + with pytest.raises(httpx.ConnectError): + eas_client.run_opendss_export(OPENDSS_CONFIG) + + +def test_run_opendss_export_valid_certificate_success(httpserver: HTTPServer, ca: trustme.CA): + eas_client = _valid_ca(httpserver.port, ca) + + dss_conf = OPENDSS_CONFIG.model_copy() + dss_conf.generation_spec.modules_configuration.common.fixed_time = FixedTimeInput(load_time=datetime(2022, 4, 1), + overrides=[ + FixedTimeLoadOverrideInput( + loadId="meter1", + genVarOverride=[1.0], + genWattsOverride=[2.0], + loadVarOverride=[3.0], + loadWattsOverride=[4.0] + ) + ]) + httpserver.expect_oneshot_request("/api/graphql").respond_with_handler(run_opendss_export_request_handler) + res = eas_client.run_opendss_export(dss_conf) + httpserver.check_assertions() + assert res == {"result": "success"} get_paged_opendss_models_query = """ @@ -1127,18 +1034,12 @@ def test_get_paged_opendss_models_no_verify_success(httpserver: HTTPServer): assert res == {"result": "success"} -def test_get_paged_opendss_models_invalid_certificate_failure(ca: trustme.CA, httpserver: HTTPServer): - with trustme.Blob(b"invalid ca").tempfile() as ca_filename: - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, - ca_filename=ca_filename - ) +def test_get_paged_opendss_models_invalid_certificate_failure(httpserver: HTTPServer): + eas_client = _invalid_ca(httpserver.port) - httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) - with pytest.raises(httpx.ConnectError): - eas_client.get_paged_opendss_models() + httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) + with pytest.raises(httpx.ConnectError): + eas_client.get_paged_opendss_models() def get_paged_opendss_models_no_param_request_handler(request): @@ -1151,20 +1052,14 @@ def get_paged_opendss_models_no_param_request_handler(request): return Response(json.dumps({"result": "success"}), status=200, content_type="application/json") -def test_get_paged_opendss_models_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): - with ca.cert_pem.tempfile() as ca_filename: - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, - ca_filename=ca_filename - ) +def test_get_paged_opendss_models_valid_certificate_success(httpserver: HTTPServer, ca: trustme.CA): + eas_client = _valid_ca(httpserver.port, ca) - httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( - get_paged_opendss_models_no_param_request_handler) - res = eas_client.get_paged_opendss_models() - httpserver.check_assertions() - assert res == {"result": "success"} + httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( + get_paged_opendss_models_no_param_request_handler) + res = eas_client.get_paged_opendss_models() + httpserver.check_assertions() + assert res == {"result": "success"} def test_get_opendss_model_download_url_no_verify_success(httpserver: HTTPServer): @@ -1183,39 +1078,27 @@ def test_get_opendss_model_download_url_no_verify_success(httpserver: HTTPServer assert res == "https://example.com/download/1" -def test_get_opendss_model_download_url_invalid_certificate_failure(ca: trustme.CA, httpserver: HTTPServer): - with trustme.Blob(b"invalid ca").tempfile() as ca_filename: - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, - ca_filename=ca_filename - ) +def test_get_opendss_model_download_url_invalid_certificate_failure(httpserver: HTTPServer): + eas_client = _invalid_ca(httpserver.port) - httpserver.expect_oneshot_request("/api/opendss-model/1", method="GET").respond_with_response(Response( - status=HTTPStatus.FOUND, - headers={"Location": "https://example.com/download/1"} - )) - with pytest.raises(httpx.ConnectError): - eas_client.get_opendss_model_download_url(1) + httpserver.expect_oneshot_request("/api/opendss-model/1", method="GET").respond_with_response(Response( + status=HTTPStatus.FOUND, + headers={"Location": "https://example.com/download/1"} + )) + with pytest.raises(httpx.ConnectError): + eas_client.get_opendss_model_download_url(1) -def test_get_opendss_model_download_url_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): - with ca.cert_pem.tempfile() as ca_filename: - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, - ca_filename=ca_filename - ) +def test_get_opendss_model_download_url_valid_certificate_success(httpserver: HTTPServer, ca: trustme.CA): + eas_client = _valid_ca(httpserver.port, ca) - httpserver.expect_oneshot_request("/api/opendss-model/1", method="GET").respond_with_response(Response( - status=HTTPStatus.FOUND, - headers={"Location": "https://example.com/download/1"} - )) - res = eas_client.get_opendss_model_download_url(1) - httpserver.check_assertions() - assert res == "https://example.com/download/1" + httpserver.expect_oneshot_request("/api/opendss-model/1", method="GET").respond_with_response(Response( + status=HTTPStatus.FOUND, + headers={"Location": "https://example.com/download/1"} + )) + res = eas_client.get_opendss_model_download_url(1) + httpserver.check_assertions() + assert res == "https://example.com/download/1" def run_ingestor_request_handler(request): @@ -1349,12 +1232,7 @@ def test_get_ingestor_run_list_all_filters_no_verify_success(httpserver: HTTPSer assert res == {"result": "success"} -def test_work_package_config_to_json_omits_server_defaulted_fields_if_unspecified(httpserver: HTTPServer): - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False - ) +def test_work_package_config_to_json_omits_server_defaulted_fields_if_unspecified(): wp_config = WorkPackageInput( feederConfigs=FeederConfigsInput(configs=[]), @@ -1370,12 +1248,7 @@ def test_work_package_config_to_json_omits_server_defaulted_fields_if_unspecifie "interventionType": "COMMUNITY_BESS", } -def test_work_package_config_to_json_includes_server_defaulted_fields_if_specified(httpserver: HTTPServer): - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False - ) +def test_work_package_config_to_json_includes_server_defaulted_fields_if_specified(): wp_config = WorkPackageInput( feederConfigs=FeederConfigsInput(configs=[]), @@ -1403,13 +1276,7 @@ def test_work_package_config_to_json_includes_server_defaulted_fields_if_specifi "allocationLimitPerYear": 5 } -def test_work_package_config_to_json_for_tap_optimization(httpserver: HTTPServer): - eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False - ) - +def test_work_package_config_to_json_for_tap_optimization(): wp_config = WorkPackageInput( feederConfigs=FeederConfigsInput(configs=[]), intervention=InterventionConfigInput( @@ -1459,3 +1326,23 @@ def test_work_package_config_to_json_for_tap_optimization(httpserver: HTTPServer "qualityAssuranceProcessing": None, "resultProcessorConfig": None, } + + +def _invalid_ca(port): + with trustme.Blob(b"invalid ca").tempfile() as ca_filename: + return EasClient( + LOCALHOST, + port, + verify_certificate=True, + ca_filename=ca_filename + ) + + +def _valid_ca(port, ca: trustme.CA): + with ca.cert_pem.tempfile() as ca_filename: + return EasClient( + LOCALHOST, + port, + verify_certificate=True, + ca_filename=ca_filename + ) diff --git a/test/test_feeder_load_analysis_input.py b/test/test_feeder_load_analysis_input.py index e15cab7..dfc4b81 100644 --- a/test/test_feeder_load_analysis_input.py +++ b/test/test_feeder_load_analysis_input.py @@ -1,4 +1,4 @@ -# Copyright 2025 Zeppelin Bend Pty Ltd +# Copyright 2026 Zeppelin Bend Pty Ltd # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this diff --git a/test/test_patched_client.py b/test/test_patched_client.py new file mode 100644 index 0000000..c6c4814 --- /dev/null +++ b/test/test_patched_client.py @@ -0,0 +1,30 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. +from zepben.eas import EasClient +from zepben.eas.client.patched_generated_client import PatchedClient + + +class MockResponse: + def json(self): + return dict(json="probably") + + def get_data(self, key: str): + return dict(data="probably_also") + + def is_success(self): + return True + + +def test_patched_client_used_in_eas_client(): + + client = EasClient(host="test_host", port=9876) + assert isinstance(client._gql_client, PatchedClient) + + +def test_patched_client_overrides_get_data_to_return_the_whole_json_response(): + + client = EasClient(host="test_host", port=9876) + assert client._gql_client.get_data(MockResponse()) == {'json': 'probably'} From e8bc2ea374d713c904eaa2e68f82ca6e99e50172 Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Wed, 4 Mar 2026 02:04:39 +1100 Subject: [PATCH 03/32] eas_client docstrings not updated Signed-off-by: Max Chesterfield --- src/zepben/eas/client/eas_client.py | 5 +++-- src/zepben/eas/client/patched_generated_client.py | 2 ++ test/test_patched_client.py | 1 + 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/zepben/eas/client/eas_client.py b/src/zepben/eas/client/eas_client.py index e31e3c6..60579ec 100644 --- a/src/zepben/eas/client/eas_client.py +++ b/src/zepben/eas/client/eas_client.py @@ -3,6 +3,9 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. + +__all__ = ["EasClient"] + import ssl from asyncio import get_event_loop from datetime import datetime @@ -12,8 +15,6 @@ from aiohttp import ClientSession -__all__ = ["EasClient"] - from zepben.eas.client.patched_generated_client import PatchedClient as Client from zepben.eas.lib.generated_graphql_client import WorkPackageInput, FeederLoadAnalysisInput, StudyInput, \ diff --git a/src/zepben/eas/client/patched_generated_client.py b/src/zepben/eas/client/patched_generated_client.py index 083afac..bb11e0d 100644 --- a/src/zepben/eas/client/patched_generated_client.py +++ b/src/zepben/eas/client/patched_generated_client.py @@ -4,6 +4,8 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. +__all__ = ['PatchedClient'] + from typing import Any, cast import httpx diff --git a/test/test_patched_client.py b/test/test_patched_client.py index c6c4814..b10f466 100644 --- a/test/test_patched_client.py +++ b/test/test_patched_client.py @@ -3,6 +3,7 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. + from zepben.eas import EasClient from zepben.eas.client.patched_generated_client import PatchedClient From 2a8cb717709f84d16c2a58380973e7a6dcc374ac Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Wed, 4 Mar 2026 18:48:36 +1100 Subject: [PATCH 04/32] I guess returning fields would be ideal Signed-off-by: Max Chesterfield --- src/zepben/eas/client/eas_client.py | 70 +++++++++++++++++++ test/test_eas_client.py | 45 +++++++----- ..._input.py => test_feeder_load_analysis.py} | 0 3 files changed, 96 insertions(+), 19 deletions(-) rename test/{test_feeder_load_analysis_input.py => test_feeder_load_analysis.py} (100%) diff --git a/src/zepben/eas/client/eas_client.py b/src/zepben/eas/client/eas_client.py index 60579ec..755cba1 100644 --- a/src/zepben/eas/client/eas_client.py +++ b/src/zepben/eas/client/eas_client.py @@ -20,6 +20,9 @@ from zepben.eas.lib.generated_graphql_client import WorkPackageInput, FeederLoadAnalysisInput, StudyInput, \ IngestorConfigInput, IngestorRunsFilterInput, IngestorRunsSortCriteriaInput, HcGeneratorConfigInput, \ HcModelConfigInput, OpenDssModelInput, GetOpenDssModelsFilterInput, GetOpenDssModelsSortCriteriaInput +from zepben.eas.lib.generated_graphql_client.custom_fields import FeederLoadAnalysisSpecFields, \ + FeederLoadAnalysisReportFields, IngestionRunFields, HcCalibrationFields, GqlTxTapRecordFields, \ + OpenDssModelPageFields, OpenDssModelFields from zepben.eas.lib.generated_graphql_client.custom_mutations import Mutation from zepben.eas.lib.generated_graphql_client.custom_queries import Query @@ -170,6 +173,28 @@ def get_feeder_load_analysis_report_status(self, report_id: str, full_spec: bool """ return get_event_loop().run_until_complete( self._gql_client.query( + FeederLoadAnalysisReportFields.id, + FeederLoadAnalysisReportFields.name, + FeederLoadAnalysisReportFields.created_at, + FeederLoadAnalysisReportFields.created_by, + FeederLoadAnalysisReportFields.completed_at, + FeederLoadAnalysisReportFields.state, + FeederLoadAnalysisReportFields.errors, + FeederLoadAnalysisReportFields.generation_spec().fields( + FeederLoadAnalysisSpecFields.feeders, + FeederLoadAnalysisSpecFields.substations, + FeederLoadAnalysisSpecFields.sub_geographical_regions, + FeederLoadAnalysisSpecFields.geographical_regions, + FeederLoadAnalysisSpecFields.start_date, + FeederLoadAnalysisSpecFields.end_date, + FeederLoadAnalysisSpecFields.fetch_lv_network, + FeederLoadAnalysisSpecFields.process_feeder_loads, + FeederLoadAnalysisSpecFields.process_coincident_loads, + FeederLoadAnalysisSpecFields.produce_basic_report, + FeederLoadAnalysisSpecFields.produce_conductor_report, + FeederLoadAnalysisSpecFields.aggregate_at_feeder_level, + FeederLoadAnalysisSpecFields.output + ), Query.get_feeder_load_analysis_report_status(report_id, full_spec=full_spec), operation_name="getFeederLoadAnalysisReportStatus", ) @@ -208,6 +233,14 @@ def get_ingestor_run(self, ingestor_run_id: int): """ return get_event_loop().run_until_complete( self._gql_client.query( + IngestionRunFields.id, + IngestionRunFields.container_runtime_type, + IngestionRunFields.payload, + IngestionRunFields.token, + IngestionRunFields.status, + IngestionRunFields.started_at, + IngestionRunFields.status_last_updated_at, + IngestionRunFields.completed_at, Query.get_ingestor_run(ingestor_run_id), operation_name="getIngestorRun", ) @@ -227,6 +260,14 @@ def get_ingestor_run_list( """ return get_event_loop().run_until_complete( self._gql_client.query( + IngestionRunFields.id, + IngestionRunFields.container_runtime_type, + IngestionRunFields.payload, + IngestionRunFields.token, + IngestionRunFields.status, + IngestionRunFields.started_at, + IngestionRunFields.status_last_updated_at, + IngestionRunFields.completed_at, Query.list_ingestor_runs(filter_=query_filter, sort=query_sort), operation_name="listIngestorRuns", ) @@ -280,6 +321,16 @@ def get_hosting_capacity_calibration_run(self, id: str): """ return get_event_loop().run_until_complete( self._gql_client.query( + HcCalibrationFields.id, + HcCalibrationFields.name, + HcCalibrationFields.workflow_id, + HcCalibrationFields.run_id, + HcCalibrationFields.calibration_time_local, + HcCalibrationFields.start_at, + HcCalibrationFields.completed_at, + HcCalibrationFields.status, + HcCalibrationFields.feeders, + HcCalibrationFields.calibration_work_package_config, Query.get_calibration_run(id), operation_name="getCalibrationRun", ) @@ -312,6 +363,13 @@ def get_transformer_tap_settings( """ return get_event_loop().run_until_complete( self._gql_client.query( + GqlTxTapRecordFields.id, + GqlTxTapRecordFields.high_step, + GqlTxTapRecordFields.low_step, + GqlTxTapRecordFields.nominal_tap_num, + GqlTxTapRecordFields.tap_position, + GqlTxTapRecordFields.control_enabled, + GqlTxTapRecordFields.step_voltage_increment, Query.get_transformer_tap_settings( calibration_name=calibration_name, feeder=feeder, @@ -350,6 +408,18 @@ def get_paged_opendss_models( """ return get_event_loop().run_until_complete( self._gql_client.query( + OpenDssModelPageFields.total_count, + OpenDssModelPageFields.offset, + OpenDssModelPageFields.models().fields( + OpenDssModelFields.id, + OpenDssModelFields.name, + OpenDssModelFields.created_at, + OpenDssModelFields.state, + OpenDssModelFields.download_url, + OpenDssModelFields.is_public, + OpenDssModelFields.errors, + OpenDssModelFields.generation_spec + ), Query.paged_open_dss_models(limit=limit, offset=offset, filter_=query_filter, sort=query_sort), operation_name="pagedOpenDssModels", ) diff --git a/test/test_eas_client.py b/test/test_eas_client.py index fd56cef..931b12d 100644 --- a/test/test_eas_client.py +++ b/test/test_eas_client.py @@ -422,8 +422,10 @@ def get_hosting_capacity_run_calibration_request_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - assert query == "query getCalibrationRun($id_0: ID!) { getCalibrationRun(id: $id_0) }" - assert actual_body['variables'] == {"id_0": "calibration-id"} + assert query == ("query getCalibrationRun($id_10: ID!) { id name workflowId runId " + "calibrationTimeLocal startAt completedAt status feeders " + "calibrationWorkPackageConfig getCalibrationRun(id: $id_10) }") + assert actual_body['variables'] == {"id_10": "calibration-id"} return Response(json.dumps({"result": "success"}), status=200, content_type="application/json") @@ -991,8 +993,11 @@ def test_run_opendss_export_valid_certificate_success(httpserver: HTTPServer, ca get_paged_opendss_models_query = """ - query pagedOpenDssModels($limit_0: Int, $offset_0: Long, $filter_0: GetOpenDssModelsFilterInput, $sort_0: GetOpenDssModelsSortCriteriaInput) { - pagedOpenDssModels( limit: $limit_0 offset: $offset_0 filter: $filter_0 sort: $sort_0 ) + query pagedOpenDssModels($limit_3: Int, $offset_3: Long, $filter_3: + GetOpenDssModelsFilterInput, $sort_3: GetOpenDssModelsSortCriteriaInput) { + totalCount offset models { id name createdAt state downloadUrl isPublic + errors generationSpec } pagedOpenDssModels( limit: $limit_3 offset: + $offset_3 filter: $filter_3 sort: $sort_3 ) } """ @@ -1003,14 +1008,14 @@ def get_paged_opendss_models_request_handler(request): assert query == " ".join(line.strip() for line in get_paged_opendss_models_query.strip().splitlines()) assert actual_body['variables'] == { - "limit_0": 5, - "offset_0": 0, - "filter_0": { + "limit_3": 5, + "offset_3": 0, + "filter_3": { "name": "TEST OPENDSS MODEL 1", "isPublic": True, "state": ["COMPLETED"], }, - "sort_0": { + "sort_3": { "state": "ASC", } } @@ -1046,7 +1051,8 @@ def get_paged_opendss_models_no_param_request_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - assert query == 'query pagedOpenDssModels { pagedOpenDssModels }' + assert query == ('query pagedOpenDssModels { totalCount offset models { id name createdAt ' + 'state downloadUrl isPublic errors generationSpec } pagedOpenDssModels }') assert actual_body['variables'] == {} return Response(json.dumps({"result": "success"}), status=200, content_type="application/json") @@ -1131,8 +1137,9 @@ def get_ingestor_run_request_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - assert query == "query getIngestorRun($id_0: Int!) { getIngestorRun(id: $id_0) }" - assert actual_body['variables'] == {"id_0": 1} + assert query == ("query getIngestorRun($id_8: Int!) { id containerRuntimeType payload token " + "status startedAt statusLastUpdatedAt completedAt getIngestorRun(id: $id_8) }") + assert actual_body['variables'] == {"id_8": 1} return Response(json.dumps({"result": "success"}), status=200, content_type="application/json") @@ -1154,7 +1161,8 @@ def get_ingestor_run_list_request_empty_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - get_ingestor_run_list_query = """query listIngestorRuns { listIngestorRuns }""" + get_ingestor_run_list_query = ("query listIngestorRuns { id containerRuntimeType payload token status " + "startedAt statusLastUpdatedAt completedAt listIngestorRuns }") assert query == " ".join(line.strip() for line in get_ingestor_run_list_query.strip().splitlines()) assert actual_body['variables'] == {} @@ -1178,20 +1186,19 @@ def get_ingestor_run_list_request_complete_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - get_ingestor_run_list_query = """ - query listIngestorRuns($filter_0: IngestorRunsFilterInput, $sort_0: IngestorRunsSortCriteriaInput) { - listIngestorRuns(filter: $filter_0, sort: $sort_0) - } - """ + get_ingestor_run_list_query = ("query listIngestorRuns($filter_8: IngestorRunsFilterInput, $sort_8: " + "IngestorRunsSortCriteriaInput) { id containerRuntimeType payload token " + "status startedAt statusLastUpdatedAt completedAt listIngestorRuns(filter: " + "$filter_8, sort: $sort_8) }") assert query == " ".join(line.strip() for line in get_ingestor_run_list_query.strip().splitlines()) assert actual_body['variables'] == { - "filter_0": { + "filter_8": { "id": '4', "status": ["SUCCESS", "STARTED", "FAILED_TO_START"], "completed": True, "containerRuntimeType": ["TEMPORAL_KUBERNETES", "AZURE_CONTAINER_APP_JOB"] }, - "sort_0": { + "sort_8": { "status": "ASC", "startedAt": "DESC", "statusLastUpdatedAt": "ASC", diff --git a/test/test_feeder_load_analysis_input.py b/test/test_feeder_load_analysis.py similarity index 100% rename from test/test_feeder_load_analysis_input.py rename to test/test_feeder_load_analysis.py From 99b79b0872980bd301d7ec83eeb9b34a7df6e5f4 Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Wed, 4 Mar 2026 19:05:36 +1100 Subject: [PATCH 05/32] do you think we should put the fields in the right spot? Signed-off-by: Max Chesterfield --- src/zepben/eas/client/eas_client.py | 151 +++++++++++++++------------- test/test_eas_client.py | 56 +++++------ test/test_integration_testing.py | 5 + 3 files changed, 113 insertions(+), 99 deletions(-) create mode 100644 test/test_integration_testing.py diff --git a/src/zepben/eas/client/eas_client.py b/src/zepben/eas/client/eas_client.py index 755cba1..4039d5e 100644 --- a/src/zepben/eas/client/eas_client.py +++ b/src/zepben/eas/client/eas_client.py @@ -173,29 +173,30 @@ def get_feeder_load_analysis_report_status(self, report_id: str, full_spec: bool """ return get_event_loop().run_until_complete( self._gql_client.query( - FeederLoadAnalysisReportFields.id, - FeederLoadAnalysisReportFields.name, - FeederLoadAnalysisReportFields.created_at, - FeederLoadAnalysisReportFields.created_by, - FeederLoadAnalysisReportFields.completed_at, - FeederLoadAnalysisReportFields.state, - FeederLoadAnalysisReportFields.errors, - FeederLoadAnalysisReportFields.generation_spec().fields( - FeederLoadAnalysisSpecFields.feeders, - FeederLoadAnalysisSpecFields.substations, - FeederLoadAnalysisSpecFields.sub_geographical_regions, - FeederLoadAnalysisSpecFields.geographical_regions, - FeederLoadAnalysisSpecFields.start_date, - FeederLoadAnalysisSpecFields.end_date, - FeederLoadAnalysisSpecFields.fetch_lv_network, - FeederLoadAnalysisSpecFields.process_feeder_loads, - FeederLoadAnalysisSpecFields.process_coincident_loads, - FeederLoadAnalysisSpecFields.produce_basic_report, - FeederLoadAnalysisSpecFields.produce_conductor_report, - FeederLoadAnalysisSpecFields.aggregate_at_feeder_level, - FeederLoadAnalysisSpecFields.output + Query.get_feeder_load_analysis_report_status(report_id, full_spec=full_spec).fields( + FeederLoadAnalysisReportFields.id, + FeederLoadAnalysisReportFields.name, + FeederLoadAnalysisReportFields.created_at, + FeederLoadAnalysisReportFields.created_by, + FeederLoadAnalysisReportFields.completed_at, + FeederLoadAnalysisReportFields.state, + FeederLoadAnalysisReportFields.errors, + FeederLoadAnalysisReportFields.generation_spec().fields( + FeederLoadAnalysisSpecFields.feeders, + FeederLoadAnalysisSpecFields.substations, + FeederLoadAnalysisSpecFields.sub_geographical_regions, + FeederLoadAnalysisSpecFields.geographical_regions, + FeederLoadAnalysisSpecFields.start_date, + FeederLoadAnalysisSpecFields.end_date, + FeederLoadAnalysisSpecFields.fetch_lv_network, + FeederLoadAnalysisSpecFields.process_feeder_loads, + FeederLoadAnalysisSpecFields.process_coincident_loads, + FeederLoadAnalysisSpecFields.produce_basic_report, + FeederLoadAnalysisSpecFields.produce_conductor_report, + FeederLoadAnalysisSpecFields.aggregate_at_feeder_level, + FeederLoadAnalysisSpecFields.output + ), ), - Query.get_feeder_load_analysis_report_status(report_id, full_spec=full_spec), operation_name="getFeederLoadAnalysisReportStatus", ) ) @@ -233,15 +234,16 @@ def get_ingestor_run(self, ingestor_run_id: int): """ return get_event_loop().run_until_complete( self._gql_client.query( - IngestionRunFields.id, - IngestionRunFields.container_runtime_type, - IngestionRunFields.payload, - IngestionRunFields.token, - IngestionRunFields.status, - IngestionRunFields.started_at, - IngestionRunFields.status_last_updated_at, - IngestionRunFields.completed_at, - Query.get_ingestor_run(ingestor_run_id), + Query.get_ingestor_run(ingestor_run_id).fields( + IngestionRunFields.id, + IngestionRunFields.container_runtime_type, + IngestionRunFields.payload, + IngestionRunFields.token, + IngestionRunFields.status, + IngestionRunFields.started_at, + IngestionRunFields.status_last_updated_at, + IngestionRunFields.completed_at, + ), operation_name="getIngestorRun", ) ) @@ -260,15 +262,16 @@ def get_ingestor_run_list( """ return get_event_loop().run_until_complete( self._gql_client.query( - IngestionRunFields.id, - IngestionRunFields.container_runtime_type, - IngestionRunFields.payload, - IngestionRunFields.token, - IngestionRunFields.status, - IngestionRunFields.started_at, - IngestionRunFields.status_last_updated_at, - IngestionRunFields.completed_at, - Query.list_ingestor_runs(filter_=query_filter, sort=query_sort), + Query.list_ingestor_runs(filter_=query_filter, sort=query_sort).fields( + IngestionRunFields.id, + IngestionRunFields.container_runtime_type, + IngestionRunFields.payload, + IngestionRunFields.token, + IngestionRunFields.status, + IngestionRunFields.started_at, + IngestionRunFields.status_last_updated_at, + IngestionRunFields.completed_at, + ), operation_name="listIngestorRuns", ) ) @@ -321,17 +324,18 @@ def get_hosting_capacity_calibration_run(self, id: str): """ return get_event_loop().run_until_complete( self._gql_client.query( - HcCalibrationFields.id, - HcCalibrationFields.name, - HcCalibrationFields.workflow_id, - HcCalibrationFields.run_id, - HcCalibrationFields.calibration_time_local, - HcCalibrationFields.start_at, - HcCalibrationFields.completed_at, - HcCalibrationFields.status, - HcCalibrationFields.feeders, - HcCalibrationFields.calibration_work_package_config, - Query.get_calibration_run(id), + Query.get_calibration_run(id).fields( + HcCalibrationFields.id, + HcCalibrationFields.name, + HcCalibrationFields.workflow_id, + HcCalibrationFields.run_id, + HcCalibrationFields.calibration_time_local, + HcCalibrationFields.start_at, + HcCalibrationFields.completed_at, + HcCalibrationFields.status, + HcCalibrationFields.feeders, + HcCalibrationFields.calibration_work_package_config, + ), operation_name="getCalibrationRun", ) ) @@ -363,17 +367,18 @@ def get_transformer_tap_settings( """ return get_event_loop().run_until_complete( self._gql_client.query( - GqlTxTapRecordFields.id, - GqlTxTapRecordFields.high_step, - GqlTxTapRecordFields.low_step, - GqlTxTapRecordFields.nominal_tap_num, - GqlTxTapRecordFields.tap_position, - GqlTxTapRecordFields.control_enabled, - GqlTxTapRecordFields.step_voltage_increment, Query.get_transformer_tap_settings( calibration_name=calibration_name, feeder=feeder, transformer_mrid=transformer_mrid + ).fields( + GqlTxTapRecordFields.id, + GqlTxTapRecordFields.high_step, + GqlTxTapRecordFields.low_step, + GqlTxTapRecordFields.nominal_tap_num, + GqlTxTapRecordFields.tap_position, + GqlTxTapRecordFields.control_enabled, + GqlTxTapRecordFields.step_voltage_increment, ), operation_name="getTransformerTapSettings", ) @@ -408,19 +413,25 @@ def get_paged_opendss_models( """ return get_event_loop().run_until_complete( self._gql_client.query( - OpenDssModelPageFields.total_count, - OpenDssModelPageFields.offset, - OpenDssModelPageFields.models().fields( - OpenDssModelFields.id, - OpenDssModelFields.name, - OpenDssModelFields.created_at, - OpenDssModelFields.state, - OpenDssModelFields.download_url, - OpenDssModelFields.is_public, - OpenDssModelFields.errors, - OpenDssModelFields.generation_spec + Query.paged_open_dss_models( + limit=limit, + offset=offset, + filter_=query_filter, + sort=query_sort + ).fields( + OpenDssModelPageFields.total_count, + OpenDssModelPageFields.offset, + OpenDssModelPageFields.models().fields( + OpenDssModelFields.id, + OpenDssModelFields.name, + OpenDssModelFields.created_at, + OpenDssModelFields.state, + OpenDssModelFields.download_url, + OpenDssModelFields.is_public, + OpenDssModelFields.errors, + OpenDssModelFields.generation_spec + ), ), - Query.paged_open_dss_models(limit=limit, offset=offset, filter_=query_filter, sort=query_sort), operation_name="pagedOpenDssModels", ) ) diff --git a/test/test_eas_client.py b/test/test_eas_client.py index 931b12d..5d82a4c 100644 --- a/test/test_eas_client.py +++ b/test/test_eas_client.py @@ -422,10 +422,10 @@ def get_hosting_capacity_run_calibration_request_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - assert query == ("query getCalibrationRun($id_10: ID!) { id name workflowId runId " - "calibrationTimeLocal startAt completedAt status feeders " - "calibrationWorkPackageConfig getCalibrationRun(id: $id_10) }") - assert actual_body['variables'] == {"id_10": "calibration-id"} + assert query == ("query getCalibrationRun($id_0: ID!) { getCalibrationRun(id: $id_0) { id name " + "workflowId runId calibrationTimeLocal startAt completedAt status feeders " + "calibrationWorkPackageConfig } }") + assert actual_body['variables'] == {"id_0": "calibration-id"} return Response(json.dumps({"result": "success"}), status=200, content_type="application/json") @@ -992,14 +992,11 @@ def test_run_opendss_export_valid_certificate_success(httpserver: HTTPServer, ca assert res == {"result": "success"} -get_paged_opendss_models_query = """ - query pagedOpenDssModels($limit_3: Int, $offset_3: Long, $filter_3: - GetOpenDssModelsFilterInput, $sort_3: GetOpenDssModelsSortCriteriaInput) { - totalCount offset models { id name createdAt state downloadUrl isPublic - errors generationSpec } pagedOpenDssModels( limit: $limit_3 offset: - $offset_3 filter: $filter_3 sort: $sort_3 ) - } - """ +get_paged_opendss_models_query = ("query pagedOpenDssModels($limit_0: Int, $offset_0: Long, $filter_0: " + "GetOpenDssModelsFilterInput, $sort_0: GetOpenDssModelsSortCriteriaInput) { " + "pagedOpenDssModels( limit: $limit_0 offset: $offset_0 filter: $filter_0 " + "sort: $sort_0 ) { totalCount offset models { id name createdAt state " + "downloadUrl isPublic errors generationSpec } } }") def get_paged_opendss_models_request_handler(request): @@ -1008,14 +1005,14 @@ def get_paged_opendss_models_request_handler(request): assert query == " ".join(line.strip() for line in get_paged_opendss_models_query.strip().splitlines()) assert actual_body['variables'] == { - "limit_3": 5, - "offset_3": 0, - "filter_3": { + "limit_0": 5, + "offset_0": 0, + "filter_0": { "name": "TEST OPENDSS MODEL 1", "isPublic": True, "state": ["COMPLETED"], }, - "sort_3": { + "sort_0": { "state": "ASC", } } @@ -1051,8 +1048,8 @@ def get_paged_opendss_models_no_param_request_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - assert query == ('query pagedOpenDssModels { totalCount offset models { id name createdAt ' - 'state downloadUrl isPublic errors generationSpec } pagedOpenDssModels }') + assert query == ('query pagedOpenDssModels { pagedOpenDssModels { totalCount offset models { id name createdAt ' + 'state downloadUrl isPublic errors generationSpec } } }') assert actual_body['variables'] == {} return Response(json.dumps({"result": "success"}), status=200, content_type="application/json") @@ -1137,9 +1134,10 @@ def get_ingestor_run_request_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - assert query == ("query getIngestorRun($id_8: Int!) { id containerRuntimeType payload token " - "status startedAt statusLastUpdatedAt completedAt getIngestorRun(id: $id_8) }") - assert actual_body['variables'] == {"id_8": 1} + assert query == ("query getIngestorRun($id_0: Int!) { getIngestorRun(id: $id_0) { id " + "containerRuntimeType payload token status startedAt statusLastUpdatedAt " + "completedAt } }") + assert actual_body['variables'] == {"id_0": 1} return Response(json.dumps({"result": "success"}), status=200, content_type="application/json") @@ -1161,8 +1159,8 @@ def get_ingestor_run_list_request_empty_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - get_ingestor_run_list_query = ("query listIngestorRuns { id containerRuntimeType payload token status " - "startedAt statusLastUpdatedAt completedAt listIngestorRuns }") + get_ingestor_run_list_query = ("query listIngestorRuns { listIngestorRuns { id containerRuntimeType payload " + "token status startedAt statusLastUpdatedAt completedAt } }") assert query == " ".join(line.strip() for line in get_ingestor_run_list_query.strip().splitlines()) assert actual_body['variables'] == {} @@ -1186,19 +1184,19 @@ def get_ingestor_run_list_request_complete_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - get_ingestor_run_list_query = ("query listIngestorRuns($filter_8: IngestorRunsFilterInput, $sort_8: " - "IngestorRunsSortCriteriaInput) { id containerRuntimeType payload token " - "status startedAt statusLastUpdatedAt completedAt listIngestorRuns(filter: " - "$filter_8, sort: $sort_8) }") + get_ingestor_run_list_query = ("query listIngestorRuns($filter_0: IngestorRunsFilterInput, $sort_0: " + "IngestorRunsSortCriteriaInput) { listIngestorRuns(filter: $filter_0, sort: " + "$sort_0) { id containerRuntimeType payload token status startedAt " + "statusLastUpdatedAt completedAt } }") assert query == " ".join(line.strip() for line in get_ingestor_run_list_query.strip().splitlines()) assert actual_body['variables'] == { - "filter_8": { + "filter_0": { "id": '4', "status": ["SUCCESS", "STARTED", "FAILED_TO_START"], "completed": True, "containerRuntimeType": ["TEMPORAL_KUBERNETES", "AZURE_CONTAINER_APP_JOB"] }, - "sort_8": { + "sort_0": { "status": "ASC", "startedAt": "DESC", "statusLastUpdatedAt": "ASC", diff --git a/test/test_integration_testing.py b/test/test_integration_testing.py new file mode 100644 index 0000000..4a7146f --- /dev/null +++ b/test/test_integration_testing.py @@ -0,0 +1,5 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. From b22ad10347e5f390d4df4ed073f2c707d7163bed Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Wed, 4 Mar 2026 20:01:37 +1100 Subject: [PATCH 06/32] add some decorators, and integration tests Signed-off-by: Max Chesterfield --- src/zepben/eas/client/decorators.py | 31 +++ src/zepben/eas/client/eas_client.py | 387 +++++++++++++--------------- src/zepben/eas/client/enums.py | 4 + test/test_integration_testing.py | 25 ++ 4 files changed, 246 insertions(+), 201 deletions(-) create mode 100644 src/zepben/eas/client/decorators.py diff --git a/src/zepben/eas/client/decorators.py b/src/zepben/eas/client/decorators.py new file mode 100644 index 0000000..318690c --- /dev/null +++ b/src/zepben/eas/client/decorators.py @@ -0,0 +1,31 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + +__all__ = ['catch_warnings', 'async_func'] + +import functools +import warnings +from asyncio import get_event_loop +from typing import Callable + + +def catch_warnings(func: Callable) -> Callable: + """ + Wrap a function in `warnings.catch_warnings() + """ + @functools.wraps(func) + def wrapper(*args, **kwargs): + with warnings.catch_warnings(): + return func(*args, **kwargs) + return wrapper + + +def async_func(func: Callable) -> Callable: + def wrapper(self, *args, **kwargs): + if self._asynchronous: + return func(self, *args, **kwargs) + return get_event_loop().run_until_complete(func(self, *args, **kwargs)) + return wrapper diff --git a/src/zepben/eas/client/eas_client.py b/src/zepben/eas/client/eas_client.py index 4039d5e..c27988f 100644 --- a/src/zepben/eas/client/eas_client.py +++ b/src/zepben/eas/client/eas_client.py @@ -12,9 +12,8 @@ from http import HTTPStatus import httpx -from aiohttp import ClientSession - +from zepben.eas.client.decorators import async_func, catch_warnings from zepben.eas.client.patched_generated_client import PatchedClient as Client from zepben.eas.lib.generated_graphql_client import WorkPackageInput, FeederLoadAnalysisInput, StudyInput, \ @@ -40,8 +39,7 @@ def __init__( access_token: str | None = None, verify_certificate: bool = True, ca_filename: str | None = None, - session: ClientSession = None, - json_serialiser=None + asynchronous: bool = False, ): """ Construct a client for the Evolve App Server. If the server is HTTPS, authentication may be configured. @@ -61,10 +59,9 @@ def __init__( HTTP/HTTPS parameters: :param verify_certificate: Set this to "False" to disable certificate verification. :param ca_filename: Path to CA file to use for verification. (Optional - by default will use system certs) - :param session: aiohttp ClientSession to use, if not provided a new session will be created for you. You should - typically only use one aiohttp session per application. - :param json_serialiser: JSON serialiser to use for requests e.g. ujson.dumps. (Defaults to json.dumps) + :param asynchronous: all functions will be returned as ``Coroutine``s if True, or ran in an existing event loop if False """ + self._asynchronous = asynchronous self._protocol = protocol self._host = host self._port = port @@ -93,46 +90,46 @@ def close(self): async def aclose(self): # FIXME: __axeit__ ? return - def get_work_package_cost_estimation(self, work_package: WorkPackageInput): + @async_func + @catch_warnings + async def get_work_package_cost_estimation(self, work_package: WorkPackageInput): """ Send request to hosting capacity service to get an estimate cost of supplied work package :param work_package: An instance of the `WorkPackageConfig` data class representing the work package configuration for the run :return: The HTTP response received from the Evolve App Server after attempting to run work package """ - return get_event_loop().run_until_complete( - self._gql_client.query( - Query.get_work_package_cost_estimation(work_package), - operation_name="getWorkPackageCostEstimation", - ) + return await self._gql_client.query( + Query.get_work_package_cost_estimation(work_package), + operation_name="getWorkPackageCostEstimation", ) - def run_hosting_capacity_work_package(self, work_package: WorkPackageInput, work_package_name: str): + @async_func + @catch_warnings + async def run_hosting_capacity_work_package(self, work_package: WorkPackageInput, work_package_name: str): """ Send request to hosting capacity service to run work package :param work_package: An instance of the `WorkPackageConfig` data class representing the work package configuration for the run :return: The HTTP response received from the Evolve App Server after attempting to run work package """ - return get_event_loop().run_until_complete( - self._gql_client.mutation( - Mutation.run_work_package(work_package, work_package_name=work_package_name), - operation_name="runWorkPackage", - ) + return await self._gql_client.mutation( + Mutation.run_work_package(work_package, work_package_name=work_package_name), + operation_name="runWorkPackage", ) - def cancel_hosting_capacity_work_package(self, work_package_id: str): + @async_func + @catch_warnings + async def cancel_hosting_capacity_work_package(self, work_package_id: str): """ Send request to hosting capacity service to cancel a running work package :param work_package_id: The id of the running work package to cancel :return: The HTTP response received from the Evolve App Server after attempting to cancel work package """ - return get_event_loop().run_until_complete( - self._gql_client.mutation( - Mutation.cancel_work_package(work_package_id=work_package_id), - operation_name="cancelWorkPackage" - ) + return await self._gql_client.mutation( + Mutation.cancel_work_package(work_package_id=work_package_id), + operation_name="cancelWorkPackage" ) def get_hosting_capacity_work_packages_progress(self): # FIXME: why is this info not returned by get_work_package_by_id ? @@ -149,21 +146,23 @@ def get_hosting_capacity_work_packages_progress(self): # FIXME: why is this inf ) ) - def run_feeder_load_analysis_report(self, feeder_load_analysis_input: FeederLoadAnalysisInput): + @async_func + @catch_warnings + async def run_feeder_load_analysis_report(self, feeder_load_analysis_input: FeederLoadAnalysisInput): """ Send request to evolve app server to run a feeder load analysis study :param feeder_load_analysis_input:: An instance of the `FeederLoadAnalysisConfig` data class representing the configuration for the run :return: The HTTP response received from the Evolve App Server after attempting to run work package """ - return get_event_loop().run_until_complete( - self._gql_client.mutation( - Mutation.run_feeder_load_analysis(feeder_load_analysis_input), - operation_name="runFeederLoadAnalysisReport" - ) + return await self._gql_client.mutation( + Mutation.run_feeder_load_analysis(feeder_load_analysis_input), + operation_name="runFeederLoadAnalysisReport" ) - def get_feeder_load_analysis_report_status(self, report_id: str, full_spec: bool = False): + @async_func + @catch_warnings + async def get_feeder_load_analysis_report_status(self, report_id: str, full_spec: bool = False): """ Send request to evolve app server to retrieve a feeder load analysis report status @@ -171,84 +170,84 @@ def get_feeder_load_analysis_report_status(self, report_id: str, full_spec: bool :param full_spec: If true the response will include the request sent to generate the report :return: The HTTP response received from the Evolve App Server after requesting a feeder load analysis report status """ - return get_event_loop().run_until_complete( - self._gql_client.query( - Query.get_feeder_load_analysis_report_status(report_id, full_spec=full_spec).fields( - FeederLoadAnalysisReportFields.id, - FeederLoadAnalysisReportFields.name, - FeederLoadAnalysisReportFields.created_at, - FeederLoadAnalysisReportFields.created_by, - FeederLoadAnalysisReportFields.completed_at, - FeederLoadAnalysisReportFields.state, - FeederLoadAnalysisReportFields.errors, - FeederLoadAnalysisReportFields.generation_spec().fields( - FeederLoadAnalysisSpecFields.feeders, - FeederLoadAnalysisSpecFields.substations, - FeederLoadAnalysisSpecFields.sub_geographical_regions, - FeederLoadAnalysisSpecFields.geographical_regions, - FeederLoadAnalysisSpecFields.start_date, - FeederLoadAnalysisSpecFields.end_date, - FeederLoadAnalysisSpecFields.fetch_lv_network, - FeederLoadAnalysisSpecFields.process_feeder_loads, - FeederLoadAnalysisSpecFields.process_coincident_loads, - FeederLoadAnalysisSpecFields.produce_basic_report, - FeederLoadAnalysisSpecFields.produce_conductor_report, - FeederLoadAnalysisSpecFields.aggregate_at_feeder_level, - FeederLoadAnalysisSpecFields.output - ), + return await self._gql_client.query( + Query.get_feeder_load_analysis_report_status(report_id, full_spec=full_spec).fields( + FeederLoadAnalysisReportFields.id, + FeederLoadAnalysisReportFields.name, + FeederLoadAnalysisReportFields.created_at, + FeederLoadAnalysisReportFields.created_by, + FeederLoadAnalysisReportFields.completed_at, + FeederLoadAnalysisReportFields.state, + FeederLoadAnalysisReportFields.errors, + FeederLoadAnalysisReportFields.generation_spec().fields( + FeederLoadAnalysisSpecFields.feeders, + FeederLoadAnalysisSpecFields.substations, + FeederLoadAnalysisSpecFields.sub_geographical_regions, + FeederLoadAnalysisSpecFields.geographical_regions, + FeederLoadAnalysisSpecFields.start_date, + FeederLoadAnalysisSpecFields.end_date, + FeederLoadAnalysisSpecFields.fetch_lv_network, + FeederLoadAnalysisSpecFields.process_feeder_loads, + FeederLoadAnalysisSpecFields.process_coincident_loads, + FeederLoadAnalysisSpecFields.produce_basic_report, + FeederLoadAnalysisSpecFields.produce_conductor_report, + FeederLoadAnalysisSpecFields.aggregate_at_feeder_level, + FeederLoadAnalysisSpecFields.output ), - operation_name="getFeederLoadAnalysisReportStatus", - ) + ), + operation_name="getFeederLoadAnalysisReportStatus", ) - def upload_study(self, study: StudyInput | list[StudyInput]): + @async_func + @catch_warnings + async def upload_study(self, study: StudyInput | list[StudyInput]): """ Uploads a new study to the Evolve App Server :param study: An instance of a data class representing a new study """ - return get_event_loop().run_until_complete( - self._gql_client.mutation( - Mutation.add_studies(study if isinstance(study, list) else [study]), - operation_name="addStudy", - ) + return await self._gql_client.mutation( + Mutation.add_studies(study if isinstance(study, list) else [study]), + operation_name="addStudy", ) - def run_ingestor(self, run_config: list[IngestorConfigInput]): + @async_func + @catch_warnings + async def run_ingestor(self, run_config: list[IngestorConfigInput]): """ Send request to perform an ingestor run :param run_config: A list of IngestorConfigInput :return: The HTTP response received from the Evolve App Server after attempting to run the ingestor """ - return get_event_loop().run_until_complete( - self._gql_client.mutation( - Mutation.execute_ingestor(run_config=run_config), - operation_name="executeIngestor", - ) + return await self._gql_client.mutation( + Mutation.execute_ingestor(run_config=run_config), + operation_name="executeIngestor", ) - def get_ingestor_run(self, ingestor_run_id: int): + @async_func + @catch_warnings + async def get_ingestor_run(self, ingestor_run_id: int): """ Send request to retrieve the record of a particular ingestor run. :param ingestor_run_id: The ID of the ingestor run to retrieve execution information about. :return: The HTTP response received from the Evolve App Server including the ingestor run information (if found). """ - return get_event_loop().run_until_complete( - self._gql_client.query( - Query.get_ingestor_run(ingestor_run_id).fields( - IngestionRunFields.id, - IngestionRunFields.container_runtime_type, - IngestionRunFields.payload, - IngestionRunFields.token, - IngestionRunFields.status, - IngestionRunFields.started_at, - IngestionRunFields.status_last_updated_at, - IngestionRunFields.completed_at, - ), - operation_name="getIngestorRun", - ) + return await self._gql_client.query( + Query.get_ingestor_run(ingestor_run_id).fields( + IngestionRunFields.id, + IngestionRunFields.container_runtime_type, + IngestionRunFields.payload, + IngestionRunFields.token, + IngestionRunFields.status, + IngestionRunFields.started_at, + IngestionRunFields.status_last_updated_at, + IngestionRunFields.completed_at, + ), + operation_name="getIngestorRun", ) - def get_ingestor_run_list( + @async_func + @catch_warnings + async def get_ingestor_run_list( self, query_filter: IngestorRunsFilterInput | None = None, query_sort: IngestorRunsSortCriteriaInput | None = None @@ -260,23 +259,23 @@ def get_ingestor_run_list( :param query_sort: An `IngestorRunsSortCriteriaInput` that can control the order of the returned record based on a number of fields. (Optional) :return: The HTTP response received from the Evolve App Server including all matching ingestor records found. """ - return get_event_loop().run_until_complete( - self._gql_client.query( - Query.list_ingestor_runs(filter_=query_filter, sort=query_sort).fields( - IngestionRunFields.id, - IngestionRunFields.container_runtime_type, - IngestionRunFields.payload, - IngestionRunFields.token, - IngestionRunFields.status, - IngestionRunFields.started_at, - IngestionRunFields.status_last_updated_at, - IngestionRunFields.completed_at, - ), - operation_name="listIngestorRuns", - ) + return await self._gql_client.query( + Query.list_ingestor_runs(filter_=query_filter, sort=query_sort).fields( + IngestionRunFields.id, + IngestionRunFields.container_runtime_type, + IngestionRunFields.payload, + IngestionRunFields.token, + IngestionRunFields.status, + IngestionRunFields.started_at, + IngestionRunFields.status_last_updated_at, + IngestionRunFields.completed_at, + ), + operation_name="listIngestorRuns", ) - def run_hosting_capacity_calibration( + @async_func + @catch_warnings + async def run_hosting_capacity_calibration( self, calibration_name: str, local_calibration_time: datetime, @@ -304,55 +303,55 @@ def run_hosting_capacity_calibration( if generator_config.model: generator_config.model.transformer_tap_settings = transformer_tap_settings - return get_event_loop().run_until_complete( - self._gql_client.mutation( - Mutation.run_calibration( - calibration_name=calibration_name, - calibration_time_local=local_calibration_time, - feeders=feeders, - generator_config=generator_config, - ), - operation_name="runCalibration", - ) + return await self._gql_client.mutation( + Mutation.run_calibration( + calibration_name=calibration_name, + calibration_time_local=local_calibration_time, + feeders=feeders, + generator_config=generator_config, + ), + operation_name="runCalibration", ) - def get_hosting_capacity_calibration_run(self, id: str): + @async_func + @catch_warnings + async def get_hosting_capacity_calibration_run(self, id: str): """ Retrieve information of a hosting capacity calibration run :param id: The calibration run ID :return: The HTTP response received from the Evolve App Server after requesting calibration run info """ - return get_event_loop().run_until_complete( - self._gql_client.query( - Query.get_calibration_run(id).fields( - HcCalibrationFields.id, - HcCalibrationFields.name, - HcCalibrationFields.workflow_id, - HcCalibrationFields.run_id, - HcCalibrationFields.calibration_time_local, - HcCalibrationFields.start_at, - HcCalibrationFields.completed_at, - HcCalibrationFields.status, - HcCalibrationFields.feeders, - HcCalibrationFields.calibration_work_package_config, - ), - operation_name="getCalibrationRun", - ) + return await self._gql_client.query( + Query.get_calibration_run(id).fields( + HcCalibrationFields.id, + HcCalibrationFields.name, + HcCalibrationFields.workflow_id, + HcCalibrationFields.run_id, + HcCalibrationFields.calibration_time_local, + HcCalibrationFields.start_at, + HcCalibrationFields.completed_at, + HcCalibrationFields.status, + HcCalibrationFields.feeders, + HcCalibrationFields.calibration_work_package_config, + ), + operation_name="getCalibrationRun", ) - def get_hosting_capacity_calibration_sets(self): + @async_func + @catch_warnings + async def get_hosting_capacity_calibration_sets(self): """ Retrieve a list of all completed calibration runs initiated through Evolve App Server :return: The HTTP response received from the Evolve App Server after requesting completed calibration runs """ - return get_event_loop().run_until_complete( - self._gql_client.query( - Query.get_calibration_sets(), - operation_name="getCalibrationSets", - ) + return await self._gql_client.query( + Query.get_calibration_sets(), + operation_name="getCalibrationSets", ) - def get_transformer_tap_settings( + @async_func + @catch_warnings + async def get_transformer_tap_settings( self, calibration_name: str, feeder: str | None = None, @@ -365,39 +364,39 @@ def get_transformer_tap_settings( :param transformer_mrid: An optional filter to return only the transformer tap settings for a particular transfomer mrid :return: The HTTP response received from the Evolve App Server after requesting transformer tap settings for the calibration id """ - return get_event_loop().run_until_complete( - self._gql_client.query( - Query.get_transformer_tap_settings( - calibration_name=calibration_name, - feeder=feeder, - transformer_mrid=transformer_mrid - ).fields( - GqlTxTapRecordFields.id, - GqlTxTapRecordFields.high_step, - GqlTxTapRecordFields.low_step, - GqlTxTapRecordFields.nominal_tap_num, - GqlTxTapRecordFields.tap_position, - GqlTxTapRecordFields.control_enabled, - GqlTxTapRecordFields.step_voltage_increment, - ), - operation_name="getTransformerTapSettings", - ) + return await self._gql_client.query( + Query.get_transformer_tap_settings( + calibration_name=calibration_name, + feeder=feeder, + transformer_mrid=transformer_mrid + ).fields( + GqlTxTapRecordFields.id, + GqlTxTapRecordFields.high_step, + GqlTxTapRecordFields.low_step, + GqlTxTapRecordFields.nominal_tap_num, + GqlTxTapRecordFields.tap_position, + GqlTxTapRecordFields.control_enabled, + GqlTxTapRecordFields.step_voltage_increment, + ), + operation_name="getTransformerTapSettings", ) - def run_opendss_export(self, config: OpenDssModelInput): + @async_func + @catch_warnings + async def run_opendss_export(self, config: OpenDssModelInput): """ Send request to run an opendss export :param config: The OpenDssConfig for running the export :return: The HTTP response received from the Evolve App Server after attempting to run the opendss export """ - return get_event_loop().run_until_complete( - self._gql_client.mutation( - Mutation.create_open_dss_model(config), - operation_name="createOpenDssModel", - ) + return await self._gql_client.mutation( + Mutation.create_open_dss_model(config), + operation_name="createOpenDssModel", ) - def get_paged_opendss_models( + @async_func + @catch_warnings + async def get_paged_opendss_models( self, limit: int | None = None, offset: int | None = None, @@ -411,40 +410,32 @@ def get_paged_opendss_models( :param query_sort: The sorting to apply to the query :return: The HTTP response received from the Evolve App Server after requesting opendss export run information """ - return get_event_loop().run_until_complete( - self._gql_client.query( - Query.paged_open_dss_models( - limit=limit, - offset=offset, - filter_=query_filter, - sort=query_sort - ).fields( - OpenDssModelPageFields.total_count, - OpenDssModelPageFields.offset, - OpenDssModelPageFields.models().fields( - OpenDssModelFields.id, - OpenDssModelFields.name, - OpenDssModelFields.created_at, - OpenDssModelFields.state, - OpenDssModelFields.download_url, - OpenDssModelFields.is_public, - OpenDssModelFields.errors, - OpenDssModelFields.generation_spec - ), + return await self._gql_client.query( + Query.paged_open_dss_models( + limit=limit, + offset=offset, + filter_=query_filter, + sort=query_sort + ).fields( + OpenDssModelPageFields.total_count, + OpenDssModelPageFields.offset, + OpenDssModelPageFields.models().fields( + OpenDssModelFields.id, + OpenDssModelFields.name, + OpenDssModelFields.created_at, + OpenDssModelFields.state, + OpenDssModelFields.download_url, + OpenDssModelFields.is_public, + OpenDssModelFields.errors, + OpenDssModelFields.generation_spec ), - operation_name="pagedOpenDssModels", - ) + ), + operation_name="pagedOpenDssModels", ) - def get_opendss_model_download_url(self, run_id: int): - """ - Retrieve a download url for the specified opendss export run id - :param run_id: The opendss export run ID - :return: The HTTP response received from the Evolve App Server after requesting opendss export model download url - """ - return get_event_loop().run_until_complete(self.async_get_opendss_model_download_url(run_id)) - - async def async_get_opendss_model_download_url(self, run_id: int): + @async_func + @catch_warnings + async def get_opendss_model_download_url(self, run_id: int): """ Retrieve a download url for the specified opendss export run id :param run_id: The opendss export run ID @@ -460,15 +451,9 @@ async def async_get_opendss_model_download_url(self, run_id: int): elif not response.ok: response.raise_for_status() - def get_opendss_model(self, model_id: int): - """ - Retrieve information of a OpenDss model export - :param model_id: The OpenDss model export ID - :return: The HTTP response received from the Evolve App Server after requesting the openDss model info - """ - return get_event_loop().run_until_complete(self.async_get_opendss_model(model_id)) - - async def async_get_opendss_model(self, model_id: int): + @async_func + @catch_warnings + async def get_opendss_model(self, model_id: int): """ Retrieve information of a OpenDss model export :param model_id: The OpenDss model export ID @@ -479,7 +464,7 @@ async def async_get_opendss_model(self, model_id: int): page_size = 20 while True: - response = self.get_paged_opendss_models(page_size, offset) + response = await self.get_paged_opendss_models(page_size, offset) total_count = int(response["data"]["pagedOpenDssModels"]["totalCount"]) page_count = len(response["data"]["pagedOpenDssModels"]["models"]) for model in response["data"]["pagedOpenDssModels"]["models"]: diff --git a/src/zepben/eas/client/enums.py b/src/zepben/eas/client/enums.py index 0499f03..e88816f 100644 --- a/src/zepben/eas/client/enums.py +++ b/src/zepben/eas/client/enums.py @@ -8,6 +8,10 @@ from enum import Enum +__doc__ = """ + This file should ONLY contain enums that the gql generator misses. + Ideally it should be non existent. +""" class OpenDssModelState(Enum): COULD_NOT_START = 'COULD_NOT_START' diff --git a/test/test_integration_testing.py b/test/test_integration_testing.py index 4a7146f..911cc9f 100644 --- a/test/test_integration_testing.py +++ b/test/test_integration_testing.py @@ -3,3 +3,28 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. +import asyncio + +from zepben.eas import EasClient + + +def test_can_connect_to_local_eas_non_async(): + client = EasClient( + host="localhost", + port=7654, + protocol="http", + verify_certificate=False, + asynchronous=False, + ) + assert client.get_ingestor_run_list() == {'data': {'listIngestorRuns': []}} + + +def test_can_connect_to_local_eas_async(): + client = EasClient( + host="localhost", + port=7654, + protocol="http", + verify_certificate=False, + asynchronous=True + ) + assert asyncio.run(client.get_ingestor_run_list()) == {'data': {'listIngestorRuns': []}} From 73ea959f9adaa890a8791a7f315a13d0c7f72078 Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Wed, 4 Mar 2026 20:15:39 +1100 Subject: [PATCH 07/32] update readme, make constructing objects not suck by importing the generated client into zepben.eas Signed-off-by: Max Chesterfield --- README.md | 94 ++++++++++++++--------------- src/zepben/eas/__init__.py | 2 + src/zepben/eas/client/eas_client.py | 4 +- 3 files changed, 49 insertions(+), 51 deletions(-) diff --git a/README.md b/README.md index 4af07ab..b6a04d2 100644 --- a/README.md +++ b/README.md @@ -7,43 +7,42 @@ the Evolve App Server and upload studies. ```python from geojson import FeatureCollection -from zepben.eas import EasClient, Study, Result, Section, GeoJsonOverlay +from zepben.eas import EasClient, StudyInput, StudyResultInput, GeoJsonOverlayInput, ResultSectionInput, SectionType eas_client = EasClient( host="", port=1234, access_token="", - client_id="", - username="", - password="", - client_secret="" + asynchronous=False, ) eas_client.upload_study( - Study( + StudyInput( name="", description="", tags=["", ""], results=[ - Result( + StudyResultInput( name="", - geo_json_overlay=GeoJsonOverlay( - data=FeatureCollection( ... ), + geoJsonOverlay=GeoJsonOverlayInput( + data=FeatureCollection(...), styles=["style1"] ), - sections=Section( - type="TABLE", - name="", - description = "
", - columns=[ - { "key": "", "name": "" }, - { "key": "", "name": "" }, - ], - data=[ - { "": "", "": "" }, - { "": "", "": "" } - ] - ) + sections=[ + ResultSectionInput( + type=SectionType.TABLE, + name="
", + description="
", + columns=[ + {"key": "", "name": ""}, + {"key": "", "name": ""}, + ], + data=[ + {"": "", "": ""}, + {"": "", "": ""} + ] + ) + ] ) ], styles=[ @@ -59,52 +58,49 @@ eas_client.close() ``` ## AsyncIO ## -Asyncio is also supported using aiohttp. A session will be created for you when you create an EasClient if not provided via the `session` parameter to EasClient. - -To use the asyncio API use `async_upload_study` like so: +The EasClient can operate in async mode if specified, like so: ```python from aiohttp import ClientSession from geojson import FeatureCollection -from zepben.eas import EasClient, Study, Result, Section, GeoJsonOverlay +from zepben.eas import EasClient, StudyInput, StudyResultInput, GeoJsonOverlayInput, ResultSectionInput, SectionType + async def upload(): eas_client = EasClient( host="", port=1234, access_token="", - client_id="", - username="", - password="", - client_secret="", - session=ClientSession(...) + asynchronous=True, # returns all methods as plain async methods ) - await eas_client.async_upload_study( - Study( + await eas_client.upload_study( + StudyInput( name="", description="", tags=["", ""], results=[ - Result( + StudyResultInput( name="", - geo_json_overlay=GeoJsonOverlay( - data=FeatureCollection( ... ), + geoJsonOverlay=GeoJsonOverlayInput( + data=FeatureCollection(...), styles=["style1"] ), - sections=Section( - type="TABLE", - name="
", - description = "
", - columns=[ - { "key": "", "name": "" }, - { "key": "", "name": "" }, - ], - data=[ - { "": "", "": "" }, - { "": "", "": "" } - ] - ) + sections=[ + ResultSectionInput( + type=SectionType.TABLE, + name="
", + description="
", + columns=[ + {"key": "", "name": ""}, + {"key": "", "name": ""}, + ], + data=[ + {"": "", "": ""}, + {"": "", "": ""} + ] + ) + ] ) ], styles=[ diff --git a/src/zepben/eas/__init__.py b/src/zepben/eas/__init__.py index 71a7abc..b9445f2 100644 --- a/src/zepben/eas/__init__.py +++ b/src/zepben/eas/__init__.py @@ -7,3 +7,5 @@ from zepben.eas.client.eas_client import * from zepben.eas.client.enums import * + +from zepben.eas.lib.generated_graphql_client import * diff --git a/src/zepben/eas/client/eas_client.py b/src/zepben/eas/client/eas_client.py index c27988f..f384c46 100644 --- a/src/zepben/eas/client/eas_client.py +++ b/src/zepben/eas/client/eas_client.py @@ -87,8 +87,8 @@ def __init__( def close(self): return get_event_loop().run_until_complete(self.aclose()) - async def aclose(self): # FIXME: __axeit__ ? - return + async def aclose(self): + await self._gql_client.http_client.aclose() @async_func @catch_warnings From 6262507ffecf0b6833dc2df40e325af93aa0dfda Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Wed, 4 Mar 2026 20:19:18 +1100 Subject: [PATCH 08/32] allowing args for the client is asking for breaking changes. Signed-off-by: Max Chesterfield --- src/zepben/eas/client/eas_client.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/zepben/eas/client/eas_client.py b/src/zepben/eas/client/eas_client.py index f384c46..6c2ca12 100644 --- a/src/zepben/eas/client/eas_client.py +++ b/src/zepben/eas/client/eas_client.py @@ -33,6 +33,7 @@ class EasClient: def __init__( self, + *, host: str, port: int, protocol: str = "https", From 77e1173a48145e52b1082ae844f261354defa5ca Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Tue, 10 Mar 2026 17:27:27 +1100 Subject: [PATCH 09/32] do some docs Signed-off-by: Max Chesterfield --- pyproject.toml | 10 +- .../lib/generated_graphql_client/__init__.py | 2 - .../async_base_client.py | 2 - .../generated_graphql_client/base_model.py | 2 - .../base_operation.py | 2 - .../lib/generated_graphql_client/client.py | 2 - .../generated_graphql_client/custom_fields.py | 111 +++++++++++++++++- .../custom_mutations.py | 36 +++++- .../custom_queries.py | 64 +++++++++- .../custom_typing_fields.py | 2 - .../eas/lib/generated_graphql_client/enums.py | 3 - .../generated_graphql_client/exceptions.py | 2 - .../generated_graphql_client/input_types.py | 91 +++++++++++++- 13 files changed, 299 insertions(+), 30 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index f71572e..bce4066 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,14 +51,18 @@ test = [ "trustme==0.9.0" ] eas-codegen = [ - "ariadne-codegen" + "ariadne-codegen @ git+https://github.com/chestm007/ariadne-codegen" # This could break a pypi upload. Waiting on https://github.com/mirumee/ariadne-codegen/pull/413 to be merged. ] [tool.setuptools.packages.find] where = ["src/"] [tool.ariadne-codegen] -remote_schema_url = "http://127.0.0.1:7654/api/graphql" +remote_schema_url = "http://127.0.0.1:7654/api/graphql" # Set to address of Evolve App Server enable_custom_operations=true target_package_path='src/zepben/eas/lib' -target_package_name='generated_graphql_client' \ No newline at end of file +target_package_name='generated_graphql_client' +include_comments=false +introspection_descriptions=true +#introspection_schema_description=true +#introspection_directive_is_repeatable=true diff --git a/src/zepben/eas/lib/generated_graphql_client/__init__.py b/src/zepben/eas/lib/generated_graphql_client/__init__.py index 91b3738..24681c4 100644 --- a/src/zepben/eas/lib/generated_graphql_client/__init__.py +++ b/src/zepben/eas/lib/generated_graphql_client/__init__.py @@ -1,5 +1,3 @@ -# Generated by ariadne-codegen - from .async_base_client import AsyncBaseClient from .base_model import BaseModel, Upload from .client import Client diff --git a/src/zepben/eas/lib/generated_graphql_client/async_base_client.py b/src/zepben/eas/lib/generated_graphql_client/async_base_client.py index 48e6914..9eb804a 100644 --- a/src/zepben/eas/lib/generated_graphql_client/async_base_client.py +++ b/src/zepben/eas/lib/generated_graphql_client/async_base_client.py @@ -1,5 +1,3 @@ -# Generated by ariadne-codegen - import asyncio import enum import json diff --git a/src/zepben/eas/lib/generated_graphql_client/base_model.py b/src/zepben/eas/lib/generated_graphql_client/base_model.py index a93b416..68e2f9e 100644 --- a/src/zepben/eas/lib/generated_graphql_client/base_model.py +++ b/src/zepben/eas/lib/generated_graphql_client/base_model.py @@ -1,5 +1,3 @@ -# Generated by ariadne-codegen - from io import IOBase from pydantic import BaseModel as PydanticBaseModel diff --git a/src/zepben/eas/lib/generated_graphql_client/base_operation.py b/src/zepben/eas/lib/generated_graphql_client/base_operation.py index 6433b07..65708d7 100644 --- a/src/zepben/eas/lib/generated_graphql_client/base_operation.py +++ b/src/zepben/eas/lib/generated_graphql_client/base_operation.py @@ -1,5 +1,3 @@ -# Generated by ariadne-codegen - from typing import Any, Optional, Union from graphql import ( diff --git a/src/zepben/eas/lib/generated_graphql_client/client.py b/src/zepben/eas/lib/generated_graphql_client/client.py index 4e89027..bb11629 100644 --- a/src/zepben/eas/lib/generated_graphql_client/client.py +++ b/src/zepben/eas/lib/generated_graphql_client/client.py @@ -1,5 +1,3 @@ -# Generated by ariadne-codegen - from typing import Any from graphql import ( diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_fields.py b/src/zepben/eas/lib/generated_graphql_client/custom_fields.py index e691349..8ef01d9 100644 --- a/src/zepben/eas/lib/generated_graphql_client/custom_fields.py +++ b/src/zepben/eas/lib/generated_graphql_client/custom_fields.py @@ -1,10 +1,10 @@ -# Generated by ariadne-codegen - -from typing import Any, Union +from typing import Any, Optional, Union +from . import SerializationType from .base_operation import GraphQLField from .custom_typing_fields import ( AppOptionsGraphQLField, + ColumnGraphQLField, CoordinateGraphQLField, CustomerDetailsGraphQLField, CustomerDetailsResponseGraphQLField, @@ -52,6 +52,7 @@ ProcessedDiffGraphQLField, ProcessedDiffPageGraphQLField, RemoveAppOptionResultGraphQLField, + ResultSectionGraphQLField, ScenarioConfigurationGraphQLField, SincalConfigFileGraphQLField, SincalGlobalInputsConfigGraphQLField, @@ -64,6 +65,7 @@ StudyGraphQLField, StudyPageGraphQLField, StudyResultGraphQLField, + TableSectionGraphQLField, UploadUrlResponseGraphQLField, UserCustomerListColumnConfigGraphQLField, VariantGraphQLField, @@ -76,6 +78,8 @@ class AppOptionsFields(GraphQLField): + """Application configuration option.""" + asset_name_format: "AppOptionsGraphQLField" = AppOptionsGraphQLField( "assetNameFormat" ) @@ -93,6 +97,20 @@ def alias(self, alias: str) -> "AppOptionsFields": return self +class ColumnFields(GraphQLField): + key: "ColumnGraphQLField" = ColumnGraphQLField("key") + name: "ColumnGraphQLField" = ColumnGraphQLField("name") + + def fields(self, *subfields: ColumnGraphQLField) -> "ColumnFields": + """Subfields should come from the ColumnFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "ColumnFields": + self._alias = alias + return self + + class CoordinateFields(GraphQLField): latitude: "CoordinateGraphQLField" = CoordinateGraphQLField("latitude") longitude: "CoordinateGraphQLField" = CoordinateGraphQLField("longitude") @@ -108,6 +126,8 @@ def alias(self, alias: str) -> "CoordinateFields": class CustomerDetailsFields(GraphQLField): + """Detailed customer information including both customer-specific and network-specific fields.""" + customer_mrid: "CustomerDetailsGraphQLField" = CustomerDetailsGraphQLField( "customerMrid" ) @@ -200,12 +220,16 @@ def alias(self, alias: str) -> "CustomerDetailsResponseFields": class CustomerListColumnConfigFields(GraphQLField): + """Defines a column available for configuration in the customer list table.""" + column_name: "CustomerListColumnConfigGraphQLField" = ( CustomerListColumnConfigGraphQLField("columnName") ) + "The unique name of the column." group: "CustomerListColumnConfigGraphQLField" = ( CustomerListColumnConfigGraphQLField("group") ) + "The group this column belongs to (e.g., PII, NON_PII)." def fields( self, *subfields: CustomerListColumnConfigGraphQLField @@ -251,6 +275,8 @@ def alias(self, alias: str) -> "DurationCurveFields": class DurationCurveByTerminalFields(GraphQLField): + """The duration curve for a terminal of a conducting equipment.""" + @classmethod def duration_curve(cls) -> "DurationCurveFields": return DurationCurveFields("durationCurve") @@ -376,42 +402,55 @@ class FeederLoadAnalysisSpecFields(GraphQLField): aggregate_at_feeder_level: "FeederLoadAnalysisSpecGraphQLField" = ( FeederLoadAnalysisSpecGraphQLField("aggregateAtFeederLevel") ) + "Request for a report which aggregate all downstream load at the feeder level" end_date: "FeederLoadAnalysisSpecGraphQLField" = FeederLoadAnalysisSpecGraphQLField( "endDate" ) + "End date for this analysis" feeders: "FeederLoadAnalysisSpecGraphQLField" = FeederLoadAnalysisSpecGraphQLField( "feeders" ) + "The mRIDs of feeders to solve for feeder load analysis." fetch_lv_network: "FeederLoadAnalysisSpecGraphQLField" = ( FeederLoadAnalysisSpecGraphQLField("fetchLvNetwork") ) + "Whether to stop analysis at distribution transformer" geographical_regions: "FeederLoadAnalysisSpecGraphQLField" = ( FeederLoadAnalysisSpecGraphQLField("geographicalRegions") ) + "The mRIDs of Geographical Region to solve for feeder load analysis." output: "FeederLoadAnalysisSpecGraphQLField" = FeederLoadAnalysisSpecGraphQLField( "output" ) + "The file name of the resulting study" process_coincident_loads: "FeederLoadAnalysisSpecGraphQLField" = ( FeederLoadAnalysisSpecGraphQLField("processCoincidentLoads") ) + "Whether to include values corresponding to conductor event time points in the report" process_feeder_loads: "FeederLoadAnalysisSpecGraphQLField" = ( FeederLoadAnalysisSpecGraphQLField("processFeederLoads") ) + "Whether to include values corresponding to feeder event time points in the report" produce_basic_report: "FeederLoadAnalysisSpecGraphQLField" = ( FeederLoadAnalysisSpecGraphQLField("produceBasicReport") ) + "Request for a basic report" produce_conductor_report: "FeederLoadAnalysisSpecGraphQLField" = ( FeederLoadAnalysisSpecGraphQLField("produceConductorReport") ) + "Request for an extensive report" start_date: "FeederLoadAnalysisSpecGraphQLField" = ( FeederLoadAnalysisSpecGraphQLField("startDate") ) + "Start date for this analysis" sub_geographical_regions: "FeederLoadAnalysisSpecGraphQLField" = ( FeederLoadAnalysisSpecGraphQLField("subGeographicalRegions") ) + "The mRIDs of sub-Geographical Region to solve for feeder load analysis." substations: "FeederLoadAnalysisSpecGraphQLField" = ( FeederLoadAnalysisSpecGraphQLField("substations") ) + "The mRIDs of substations to solve for feeder load analysis." def fields( self, *subfields: FeederLoadAnalysisSpecGraphQLField @@ -661,6 +700,8 @@ def alias(self, alias: str) -> "HcCalibrationFields": class HcModelFields(GraphQLField): + """HC model representation""" + feeder: "HcModelGraphQLField" = HcModelGraphQLField("feeder") scenario: "HcModelGraphQLField" = HcModelGraphQLField("scenario") year: "HcModelGraphQLField" = HcModelGraphQLField("year") @@ -971,6 +1012,8 @@ def alias(self, alias: str) -> "OpenDssModelPageFields": class OpportunitiesByYearFields(GraphQLField): + """Opportunities available for a specific year.""" + @classmethod def available_opportunities(cls) -> "OpportunityFields": return OpportunityFields("availableOpportunities") @@ -1275,6 +1318,8 @@ def alias(self, alias: str) -> "ProcessedDiffPageFields": class RemoveAppOptionResultFields(GraphQLField): + """Result of removing an application option""" + name: "RemoveAppOptionResultGraphQLField" = RemoveAppOptionResultGraphQLField( "name" ) @@ -1294,6 +1339,26 @@ def alias(self, alias: str) -> "RemoveAppOptionResultFields": return self +class ResultSectionInterface(GraphQLField): + name: "ResultSectionGraphQLField" = ResultSectionGraphQLField("name") + type_: "ResultSectionGraphQLField" = ResultSectionGraphQLField("type") + id: "ResultSectionGraphQLField" = ResultSectionGraphQLField("id") + description: "ResultSectionGraphQLField" = ResultSectionGraphQLField("description") + + def fields(self, *subfields: ResultSectionGraphQLField) -> "ResultSectionInterface": + """Subfields should come from the ResultSectionInterface class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "ResultSectionInterface": + self._alias = alias + return self + + def on(self, type_name: str, *subfields: GraphQLField) -> "ResultSectionInterface": + self._inline_fragments[type_name] = subfields + return self + + class ScenarioConfigurationFields(GraphQLField): bess_allocation_id: "ScenarioConfigurationGraphQLField" = ( ScenarioConfigurationGraphQLField("bessAllocationId") @@ -1419,6 +1484,7 @@ class SincalModelFields(GraphQLField): @classmethod def generation_spec(cls) -> "SincalModelGenerationSpecFields": + """JSON exporter generation spec (auth tokens withheld)""" return SincalModelGenerationSpecFields("generationSpec") id: "SincalModelGraphQLField" = SincalModelGraphQLField("id") @@ -1443,6 +1509,7 @@ class SincalModelGenerationSpecFields(GraphQLField): config: "SincalModelGenerationSpecGraphQLField" = ( SincalModelGenerationSpecGraphQLField("config") ) + "JSON export config." equipment_container_mrids: "SincalModelGenerationSpecGraphQLField" = ( SincalModelGenerationSpecGraphQLField("equipmentContainerMrids") ) @@ -1643,6 +1710,41 @@ def alias(self, alias: str) -> "StudyResultFields": return self +class TableSectionFields(GraphQLField): + id: "TableSectionGraphQLField" = TableSectionGraphQLField("id") + name: "TableSectionGraphQLField" = TableSectionGraphQLField("name") + type_: "TableSectionGraphQLField" = TableSectionGraphQLField("type") + + @classmethod + def columns(cls) -> "ColumnFields": + return ColumnFields("columns") + + @classmethod + def data( + cls, *, serialization: Optional[SerializationType] = None + ) -> "TableSectionGraphQLField": + arguments: dict[str, dict[str, Any]] = { + "serialization": {"type": "SerializationType", "value": serialization} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return TableSectionGraphQLField("data", arguments=cleared_arguments) + + description: "TableSectionGraphQLField" = TableSectionGraphQLField("description") + + def fields( + self, *subfields: Union[TableSectionGraphQLField, "ColumnFields"] + ) -> "TableSectionFields": + """Subfields should come from the TableSectionFields class""" + self._subfields.extend(subfields) + return self + + def alias(self, alias: str) -> "TableSectionFields": + self._alias = alias + return self + + class UploadUrlResponseFields(GraphQLField): file_path: "UploadUrlResponseGraphQLField" = UploadUrlResponseGraphQLField( "filePath" @@ -1664,8 +1766,11 @@ def alias(self, alias: str) -> "UploadUrlResponseFields": class UserCustomerListColumnConfigFields(GraphQLField): + """User-specific column configuration for the customer list table.""" + @classmethod def columns(cls) -> "CustomerListColumnConfigFields": + """List of columns configured by the user to display in the customer list table.""" return CustomerListColumnConfigFields("columns") def fields( diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py b/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py index 54985d6..9abeee0 100644 --- a/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py +++ b/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py @@ -1,5 +1,3 @@ -# Generated by ariadne-codegen - from typing import Any, Optional from . import SincalFileType, VariantFileType @@ -27,6 +25,7 @@ class Mutation: @classmethod def add_studies(cls, studies: list[StudyInput]) -> GraphQLField: + """Add new studies to the database and return their IDs""" arguments: dict[str, dict[str, Any]] = { "studies": {"type": "[StudyInput!]!", "value": studies} } @@ -37,6 +36,7 @@ def add_studies(cls, studies: list[StudyInput]) -> GraphQLField: @classmethod def delete_studies(cls, ids: list[str]) -> GraphQLField: + """Delete studies by their IDs and return the IDs of deleted studies""" arguments: dict[str, dict[str, Any]] = {"ids": {"type": "[ID!]!", "value": ids}} cleared_arguments = { key: value for key, value in arguments.items() if value["value"] is not None @@ -45,6 +45,7 @@ def delete_studies(cls, ids: list[str]) -> GraphQLField: @classmethod def create_power_factory_model(cls, input: PowerFactoryModelInput) -> GraphQLField: + """Creates a new powerFactoryModel and returns its ID""" arguments: dict[str, dict[str, Any]] = { "input": {"type": "PowerFactoryModelInput!", "value": input} } @@ -59,6 +60,7 @@ def create_power_factory_model(cls, input: PowerFactoryModelInput) -> GraphQLFie def create_power_factory_model_template( cls, input: PowerFactoryModelInput ) -> GraphQLField: + """Creates a new powerFactoryModel template and returns its ID""" arguments: dict[str, dict[str, Any]] = { "input": {"type": "PowerFactoryModelInput!", "value": input} } @@ -71,6 +73,7 @@ def create_power_factory_model_template( @classmethod def delete_power_factory_model(cls, model_id: str) -> GraphQLField: + """Deletes powerFactoryModel with ID and returns said ID""" arguments: dict[str, dict[str, Any]] = { "modelId": {"type": "ID!", "value": model_id} } @@ -83,6 +86,7 @@ def delete_power_factory_model(cls, model_id: str) -> GraphQLField: @classmethod def delete_power_factory_model_template(cls, template_id: str) -> GraphQLField: + """Deletes powerFactoryModel template with ID and returns its ID""" arguments: dict[str, dict[str, Any]] = { "templateId": {"type": "ID!", "value": template_id} } @@ -97,6 +101,7 @@ def delete_power_factory_model_template(cls, template_id: str) -> GraphQLField: def update_power_factory_model_template( cls, template_id: str, generation_spec: PowerFactoryModelGenerationSpecInput ) -> GraphQLField: + """Updates powerFactoryModel template with ID and returns its ID""" arguments: dict[str, dict[str, Any]] = { "templateId": {"type": "ID!", "value": template_id}, "generationSpec": { @@ -113,6 +118,7 @@ def update_power_factory_model_template( @classmethod def cancel_work_package(cls, work_package_id: str) -> GraphQLField: + """Cancels a hosting capacity work package and returns its ID.""" arguments: dict[str, dict[str, Any]] = { "workPackageId": {"type": "ID!", "value": work_package_id} } @@ -123,6 +129,7 @@ def cancel_work_package(cls, work_package_id: str) -> GraphQLField: @classmethod def delete_work_package(cls, work_package_ids: list[str]) -> GraphQLField: + """Delete one or more hosting capacity work package(s). Returns the list of successfully deleted work package IDs, throws if none can be deleted.""" arguments: dict[str, dict[str, Any]] = { "workPackageIds": {"type": "[String!]!", "value": work_package_ids} } @@ -139,6 +146,7 @@ def edit_diff_package( name: Optional[str] = None, description: Optional[str] = None ) -> GraphQLField: + """Edits a hosting capacity diff package and return boolean. Returns "true" on successful update""" arguments: dict[str, dict[str, Any]] = { "diffId": {"type": "ID!", "value": diff_id}, "name": {"type": "String", "value": name}, @@ -157,6 +165,7 @@ def edit_work_package( name: Optional[str] = None, description: Optional[str] = None ) -> GraphQLField: + """Edits a hosting capacity work package and return boolean. Returns "true" on successful update""" arguments: dict[str, dict[str, Any]] = { "workPackageId": {"type": "ID!", "value": work_package_id}, "name": {"type": "String", "value": name}, @@ -181,6 +190,7 @@ def generate_enhanced_network_performance_diff( season: Optional[str] = None, time_of_day: Optional[str] = None ) -> DiffResultFields: + """Generate and store the differences of enhanced network performance metrics between two work packages and returns the number of entries generated with the ID of this diff package.""" arguments: dict[str, dict[str, Any]] = { "diffId": {"type": "ID!", "value": diff_id}, "diffName": {"type": "String", "value": diff_name}, @@ -212,6 +222,7 @@ def generate_network_performance_diff( feeder: Optional[str] = None, year: Optional[int] = None ) -> DiffResultFields: + """Generate and store the differences of network performance metrics between two work packages and returns the number of entries generated with the ID of this diff package.""" arguments: dict[str, dict[str, Any]] = { "diffId": {"type": "ID!", "value": diff_id}, "diffName": {"type": "String", "value": diff_name}, @@ -230,6 +241,7 @@ def generate_network_performance_diff( @classmethod def process_input_database(cls, file_path: str) -> GraphQLField: + """Processes the input database specified by the given filepath.""" arguments: dict[str, dict[str, Any]] = { "filePath": {"type": "String!", "value": file_path} } @@ -249,6 +261,7 @@ def run_calibration( feeders: Optional[list[str]] = None, generator_config: Optional[HcGeneratorConfigInput] = None ) -> GraphQLField: + """Runs a calibration and returns a run ID.""" arguments: dict[str, dict[str, Any]] = { "calibrationName": {"type": "String!", "value": calibration_name}, "calibrationTimeLocal": { @@ -270,6 +283,7 @@ def run_calibration( def run_work_package( cls, input: WorkPackageInput, work_package_name: str ) -> GraphQLField: + """Runs a hosting capacity work package and returns its ID.""" arguments: dict[str, dict[str, Any]] = { "input": {"type": "WorkPackageInput!", "value": input}, "workPackageName": {"type": "String!", "value": work_package_name}, @@ -281,10 +295,12 @@ def run_work_package( @classmethod def lock_network_model_database(cls) -> GraphQLField: + """Lock EWB to the currently loaded network-model date.""" return GraphQLField(field_name="lockNetworkModelDatabase") @classmethod def switch_network_model_database(cls, date: str) -> GraphQLField: + """Lock EWB to the provided network-model date and reload the EWB server.""" arguments: dict[str, dict[str, Any]] = { "date": {"type": "String!", "value": date} } @@ -297,10 +313,12 @@ def switch_network_model_database(cls, date: str) -> GraphQLField: @classmethod def unlock_network_model_database(cls) -> GraphQLField: + """Unlock EWB network-model date (Note: This does not reload the EWB server).""" return GraphQLField(field_name="unlockNetworkModelDatabase") @classmethod def create_sincal_model(cls, input: SincalModelInput) -> GraphQLField: + """Launches Sincal Exporter with specified config. Returns ID of model.""" arguments: dict[str, dict[str, Any]] = { "input": {"type": "SincalModelInput!", "value": input} } @@ -311,6 +329,7 @@ def create_sincal_model(cls, input: SincalModelInput) -> GraphQLField: @classmethod def create_sincal_model_preset(cls, input: SincalModelInput) -> GraphQLField: + """Creates a new sincalModel preset and returns its ID""" arguments: dict[str, dict[str, Any]] = { "input": {"type": "SincalModelInput!", "value": input} } @@ -323,6 +342,7 @@ def create_sincal_model_preset(cls, input: SincalModelInput) -> GraphQLField: @classmethod def delete_sincal_model(cls, model_id: str) -> GraphQLField: + """Deletes sincalModel with ID and returns said ID""" arguments: dict[str, dict[str, Any]] = { "modelId": {"type": "ID!", "value": model_id} } @@ -333,6 +353,7 @@ def delete_sincal_model(cls, model_id: str) -> GraphQLField: @classmethod def delete_sincal_model_preset(cls, preset_id: str) -> GraphQLField: + """Deletes sincalModel preset with ID and returns its ID""" arguments: dict[str, dict[str, Any]] = { "presetId": {"type": "ID!", "value": preset_id} } @@ -347,6 +368,7 @@ def delete_sincal_model_preset(cls, preset_id: str) -> GraphQLField: def update_sincal_model_config_file_path( cls, file_path: str, file_type: SincalFileType ) -> GraphQLField: + """Updates the file path of one of the sincalModel input files. Returns true on success.""" arguments: dict[str, dict[str, Any]] = { "filePath": {"type": "String!", "value": file_path}, "fileType": {"type": "SincalFileType!", "value": file_type}, @@ -362,6 +384,7 @@ def update_sincal_model_config_file_path( def update_sincal_model_preset( cls, preset_id: str, generation_spec: SincalModelGenerationSpecInput ) -> GraphQLField: + """Updates sincalModel preset with ID and returns its ID""" arguments: dict[str, dict[str, Any]] = { "presetId": {"type": "ID!", "value": preset_id}, "generationSpec": { @@ -380,6 +403,7 @@ def update_sincal_model_preset( def execute_ingestor( cls, *, run_config: Optional[list[IngestorConfigInput]] = None ) -> GraphQLField: + """Start ingestor job.""" arguments: dict[str, dict[str, Any]] = { "runConfig": {"type": "[IngestorConfigInput!]", "value": run_config} } @@ -390,6 +414,7 @@ def execute_ingestor( @classmethod def run_feeder_load_analysis(cls, input: FeederLoadAnalysisInput) -> GraphQLField: + """Runs a feeder load analysis job.""" arguments: dict[str, dict[str, Any]] = { "input": {"type": "FeederLoadAnalysisInput!", "value": input} } @@ -402,6 +427,7 @@ def run_feeder_load_analysis(cls, input: FeederLoadAnalysisInput) -> GraphQLFiel @classmethod def create_open_dss_model(cls, input: OpenDssModelInput) -> GraphQLField: + """Launches OpenDSS Exporter with specified config. Returns ID of model.""" arguments: dict[str, dict[str, Any]] = { "input": {"type": "OpenDssModelInput!", "value": input} } @@ -414,6 +440,7 @@ def create_open_dss_model(cls, input: OpenDssModelInput) -> GraphQLField: @classmethod def delete_open_dss_model(cls, model_id: str) -> GraphQLField: + """Deletes openDSS model with ID and returns said ID""" arguments: dict[str, dict[str, Any]] = { "modelId": {"type": "ID!", "value": model_id} } @@ -428,6 +455,7 @@ def delete_open_dss_model(cls, model_id: str) -> GraphQLField: def save_user_customer_list_column_config( cls, columns: list[str] ) -> UserCustomerListColumnConfigFields: + """Update user's column configuration for the customer list to customize displayed columns.""" arguments: dict[str, dict[str, Any]] = { "columns": {"type": "[String!]!", "value": columns} } @@ -440,6 +468,7 @@ def save_user_customer_list_column_config( @classmethod def clear_app_option(cls, name: str) -> RemoveAppOptionResultFields: + """Reset an application option to its default value""" arguments: dict[str, dict[str, Any]] = { "name": {"type": "String!", "value": name} } @@ -452,6 +481,7 @@ def clear_app_option(cls, name: str) -> RemoveAppOptionResultFields: @classmethod def set_app_option(cls, app_options: AppOptionsInput) -> GraphQLField: + """Set an application option""" arguments: dict[str, dict[str, Any]] = { "appOptions": {"type": "AppOptionsInput!", "value": app_options} } @@ -464,6 +494,7 @@ def set_app_option(cls, app_options: AppOptionsInput) -> GraphQLField: def finalize_variant_processing( cls, variant_upload_id: str, submitted_variants: list[str] ) -> GraphQLField: + """Finalize variant processing with the specified file by supplying a list of finalized variants""" arguments: dict[str, dict[str, Any]] = { "variantUploadId": {"type": "String!", "value": variant_upload_id}, "submittedVariants": {"type": "[String!]!", "value": submitted_variants}, @@ -479,6 +510,7 @@ def finalize_variant_processing( def start_variant_processing( cls, prefix: str, file_type: VariantFileType ) -> GraphQLField: + """Start variant processing with the specified file. Returns an ID which can be used to track progress of the processing""" arguments: dict[str, dict[str, Any]] = { "prefix": {"type": "String!", "value": prefix}, "fileType": {"type": "VariantFileType!", "value": file_type}, diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_queries.py b/src/zepben/eas/lib/generated_graphql_client/custom_queries.py index b760ef3..7ec7848 100644 --- a/src/zepben/eas/lib/generated_graphql_client/custom_queries.py +++ b/src/zepben/eas/lib/generated_graphql_client/custom_queries.py @@ -1,5 +1,3 @@ -# Generated by ariadne-codegen - from typing import Any, Optional from . import HostingCapacityFileType, WorkflowStatus, ContainerType, SincalFileType, VariantFileType @@ -80,6 +78,7 @@ def paged_studies( filter_: Optional[GetStudiesFilterInput] = None, sort: Optional[GetStudiesSortCriteriaInput] = None ) -> StudyPageFields: + """Retrieve a page of studies, with optional limit and offset, and optional filtering""" arguments: dict[str, dict[str, Any]] = { "limit": {"type": "Int", "value": limit}, "offset": {"type": "Long", "value": offset}, @@ -93,6 +92,7 @@ def paged_studies( @classmethod def results_by_id(cls, ids: list[str]) -> StudyResultFields: + """Retrieve a list of results by IDs""" arguments: dict[str, dict[str, Any]] = {"ids": {"type": "[ID!]!", "value": ids}} cleared_arguments = { key: value for key, value in arguments.items() if value["value"] is not None @@ -101,6 +101,7 @@ def results_by_id(cls, ids: list[str]) -> StudyResultFields: @classmethod def studies(cls, *, filter_: Optional[GetStudiesFilterInput] = None) -> StudyFields: + """Retrieve a list of studies, with optional filtering""" arguments: dict[str, dict[str, Any]] = { "filter": {"type": "GetStudiesFilterInput", "value": filter_} } @@ -111,6 +112,7 @@ def studies(cls, *, filter_: Optional[GetStudiesFilterInput] = None) -> StudyFie @classmethod def studies_by_id(cls, ids: list[str]) -> StudyFields: + """Retrieve a list of studies by IDs""" arguments: dict[str, dict[str, Any]] = {"ids": {"type": "[ID!]!", "value": ids}} cleared_arguments = { key: value for key, value in arguments.items() if value["value"] is not None @@ -119,6 +121,7 @@ def studies_by_id(cls, ids: list[str]) -> StudyFields: @classmethod def styles_by_id(cls, ids: list[str]) -> GraphQLField: + """Retrieve a list of style layers by IDs""" arguments: dict[str, dict[str, Any]] = {"ids": {"type": "[ID!]!", "value": ids}} cleared_arguments = { key: value for key, value in arguments.items() if value["value"] is not None @@ -127,6 +130,7 @@ def styles_by_id(cls, ids: list[str]) -> GraphQLField: @classmethod def current_user(cls) -> GqlUserResponseFields: + """Get information about the current user""" return GqlUserResponseFields(field_name="currentUser") @classmethod @@ -138,6 +142,7 @@ def paged_power_factory_model_templates( filter_: Optional[GetPowerFactoryModelTemplatesFilterInput] = None, sort: Optional[GetPowerFactoryModelTemplatesSortCriteriaInput] = None ) -> PowerFactoryModelTemplatePageFields: + """Retrieve a page of powerFactoryModel templates, with optional limit and offset, and optional filtering""" arguments: dict[str, dict[str, Any]] = { "limit": {"type": "Int", "value": limit}, "offset": {"type": "Long", "value": offset}, @@ -166,6 +171,7 @@ def paged_power_factory_models( filter_: Optional[GetPowerFactoryModelsFilterInput] = None, sort: Optional[GetPowerFactoryModelsSortCriteriaInput] = None ) -> PowerFactoryModelPageFields: + """Retrieve a page of powerFactoryModels, with optional limit and offset, and optional filtering""" arguments: dict[str, dict[str, Any]] = { "limit": {"type": "Int", "value": limit}, "offset": {"type": "Long", "value": offset}, @@ -181,6 +187,7 @@ def paged_power_factory_models( @classmethod def power_factory_model_by_id(cls, model_id: str) -> PowerFactoryModelFields: + """Retrieve a powerFactoryModel by ID""" arguments: dict[str, dict[str, Any]] = { "modelId": {"type": "ID!", "value": model_id} } @@ -195,6 +202,7 @@ def power_factory_model_by_id(cls, model_id: str) -> PowerFactoryModelFields: def power_factory_model_template_by_id( cls, template_id: str ) -> PowerFactoryModelTemplateFields: + """Retrieve a powerFactoryModel template by ID""" arguments: dict[str, dict[str, Any]] = { "templateId": {"type": "ID!", "value": template_id} } @@ -209,6 +217,7 @@ def power_factory_model_template_by_id( def power_factory_model_templates_by_ids( cls, template_ids: list[str] ) -> PowerFactoryModelTemplateFields: + """Retrieve a list of powerFactoryModel templates by IDs""" arguments: dict[str, dict[str, Any]] = { "templateIds": {"type": "[ID!]!", "value": template_ids} } @@ -223,6 +232,7 @@ def power_factory_model_templates_by_ids( def power_factory_models_by_ids( cls, model_ids: list[str] ) -> PowerFactoryModelFields: + """Retrieve a list of powerFactoryModels by IDs""" arguments: dict[str, dict[str, Any]] = { "modelIds": {"type": "[ID!]!", "value": model_ids} } @@ -235,14 +245,17 @@ def power_factory_models_by_ids( @classmethod def get_active_work_packages(cls) -> GraphQLField: + """Retrieve a list of currently active (running, scheduled, pending) work packages""" return GraphQLField(field_name="getActiveWorkPackages") @classmethod def get_all_work_packages_authors(cls) -> GqlUserFields: + """Retrieve all users that have created work packages.""" return GqlUserFields(field_name="getAllWorkPackagesAuthors") @classmethod def get_calibration_run(cls, id: str) -> HcCalibrationFields: + """Retrieve calibration run details by ID""" arguments: dict[str, dict[str, Any]] = {"id": {"type": "ID!", "value": id}} cleared_arguments = { key: value for key, value in arguments.items() if value["value"] is not None @@ -253,6 +266,7 @@ def get_calibration_run(cls, id: str) -> HcCalibrationFields: @classmethod def get_calibration_sets(cls) -> GraphQLField: + """Retrieve available distribution transformer tap calibration sets.""" return GraphQLField(field_name="getCalibrationSets") @classmethod @@ -264,6 +278,7 @@ def get_duration_curves( year: int, conducting_equipment_mrid: str, ) -> DurationCurveByTerminalFields: + """Retrieve duration curves for a single piece of equipment in a specific SYF.""" arguments: dict[str, dict[str, Any]] = { "workPackageId": {"type": "String!", "value": work_package_id}, "scenario": {"type": "String!", "value": scenario}, @@ -285,6 +300,7 @@ def get_duration_curves( def get_opportunities( cls, *, year: Optional[int] = None ) -> OpportunitiesByYearFields: + """Retrieve all Opportunities available for a specific year.""" arguments: dict[str, dict[str, Any]] = {"year": {"type": "Int", "value": year}} cleared_arguments = { key: value for key, value in arguments.items() if value["value"] is not None @@ -295,6 +311,7 @@ def get_opportunities( @classmethod def get_opportunities_for_equipment(cls, m_rid: str) -> OpportunityFields: + """Retrieve Opportunities by attached conducting equipment mRID.""" arguments: dict[str, dict[str, Any]] = { "mRID": {"type": "String!", "value": m_rid} } @@ -307,6 +324,7 @@ def get_opportunities_for_equipment(cls, m_rid: str) -> OpportunityFields: @classmethod def get_opportunity(cls, id: str) -> OpportunityFields: + """Retrieve Opportunities by id.""" arguments: dict[str, dict[str, Any]] = {"id": {"type": "String!", "value": id}} cleared_arguments = { key: value for key, value in arguments.items() if value["value"] is not None @@ -319,6 +337,7 @@ def get_opportunity(cls, id: str) -> OpportunityFields: def get_opportunity_locations( cls, *, year: Optional[int] = None ) -> OpportunityLocationFields: + """Retrieve all opportunity locations available for a specific year.""" arguments: dict[str, dict[str, Any]] = {"year": {"type": "Int", "value": year}} cleared_arguments = { key: value for key, value in arguments.items() if value["value"] is not None @@ -335,6 +354,7 @@ def get_scenario_configurations( offset: Optional[Any] = None, filter_: Optional[HcScenarioConfigsFilterInput] = None ) -> HcScenarioConfigsPageFields: + """Retrieve a page scenario configurations from the hosting capacity input database.""" arguments: dict[str, dict[str, Any]] = { "limit": {"type": "Int", "value": limit}, "offset": {"type": "Long", "value": offset}, @@ -355,6 +375,7 @@ def get_transformer_tap_settings( feeder: Optional[str] = None, transformer_mrid: Optional[str] = None ) -> GqlTxTapRecordFields: + """Retrieve distribution transformer tap settings from a calibration set in the hosting capacity input database.""" arguments: dict[str, dict[str, Any]] = { "calibrationName": {"type": "String!", "value": calibration_name}, "feeder": {"type": "String", "value": feeder}, @@ -371,6 +392,7 @@ def get_transformer_tap_settings( def get_work_package_by_id( cls, id: str, *, with_groupings: Optional[bool] = None ) -> HcWorkPackageFields: + """Retrieve a hosting capacity work package by ID, withGroupings: Whether to include model groupings in the work package progress details, default value is false""" arguments: dict[str, dict[str, Any]] = { "id": {"type": "ID!", "value": id}, "withGroupings": {"type": "Boolean", "value": with_groupings}, @@ -384,6 +406,7 @@ def get_work_package_by_id( @classmethod def get_work_package_cost_estimation(cls, input: WorkPackageInput) -> GraphQLField: + """Returns an estimated cost of the submitted hosting capacity work package.""" arguments: dict[str, dict[str, Any]] = { "input": {"type": "WorkPackageInput!", "value": input} } @@ -396,6 +419,7 @@ def get_work_package_cost_estimation(cls, input: WorkPackageInput) -> GraphQLFie @classmethod def get_work_package_tree(cls, id: str) -> WorkPackageTreeFields: + """Retrieve a work package tree with its ancestors and immediate children.""" arguments: dict[str, dict[str, Any]] = {"id": {"type": "ID!", "value": id}} cleared_arguments = { key: value for key, value in arguments.items() if value["value"] is not None @@ -414,6 +438,7 @@ def get_work_packages( sort: Optional[HcWorkPackagesSortCriteriaInput] = None, with_groupings: Optional[bool] = None ) -> HcWorkPackagePageFields: + """Retrieve a page of hosting capacity work packages, with optional limit and offset, and optional filtering""" arguments: dict[str, dict[str, Any]] = { "limit": {"type": "Int", "value": limit}, "offset": {"type": "Long", "value": offset}, @@ -432,6 +457,7 @@ def get_work_packages( def hosting_capacity_file_upload_url( cls, filename: str, file_type: HostingCapacityFileType ) -> UploadUrlResponseFields: + """Generate a pre-signed URL to upload hosting capacity file to the storage location. Returns the pre-signed URL along with the final file path as it will be referenced by EAS""" arguments: dict[str, dict[str, Any]] = { "filename": {"type": "String!", "value": filename}, "fileType": {"type": "HostingCapacityFileType!", "value": file_type}, @@ -451,6 +477,7 @@ def list_calibration_runs( calibration_time: Optional[Any] = None, status: Optional[WorkflowStatus] = None ) -> HcCalibrationFields: + """Retrieve all calibration runs initiated through EAS""" arguments: dict[str, dict[str, Any]] = { "name": {"type": "String", "value": name}, "calibrationTime": {"type": "LocalDateTime", "value": calibration_time}, @@ -465,6 +492,7 @@ def list_calibration_runs( @classmethod def get_processed_diff(cls, diff_id: str) -> ProcessedDiffFields: + """Retrieve processed diff of hosting capacity work packages with diffId""" arguments: dict[str, dict[str, Any]] = { "diffId": {"type": "ID!", "value": diff_id} } @@ -484,6 +512,7 @@ def get_processed_diffs( filter_: Optional[ProcessedDiffFilterInput] = None, sort: Optional[ProcessedDiffSortCriteriaInput] = None ) -> ProcessedDiffPageFields: + """Retrieve a page of processed diffs, with optional limit and offset, and optional filtering""" arguments: dict[str, dict[str, Any]] = { "limit": {"type": "Int", "value": limit}, "offset": {"type": "Long", "value": offset}, @@ -499,10 +528,12 @@ def get_processed_diffs( @classmethod def get_all_jobs(cls) -> IngestionJobFields: + """Gets the ID and metadata of all ingestion jobs in reverse chronological order.""" return IngestionJobFields(field_name="getAllJobs") @classmethod def get_distinct_metric_names(cls, job_id: str) -> GraphQLField: + """Gets all possible values of metricName for a specific job.""" arguments: dict[str, dict[str, Any]] = { "jobId": {"type": "String!", "value": job_id} } @@ -517,6 +548,7 @@ def get_distinct_metric_names(cls, job_id: str) -> GraphQLField: def get_metrics( cls, job_id: str, container_type: ContainerType, container_id: str ) -> MetricFields: + """Gets the metrics for a network container emitted in an ingestion job.""" arguments: dict[str, dict[str, Any]] = { "jobId": {"type": "String!", "value": job_id}, "containerType": {"type": "ContainerType!", "value": container_type}, @@ -529,10 +561,12 @@ def get_metrics( @classmethod def get_newest_job(cls) -> IngestionJobFields: + """Gets the ID and metadata of the newest ingestion job. If no job exists, this returns null.""" return IngestionJobFields(field_name="getNewestJob") @classmethod def get_sources(cls, job_id: str) -> JobSourceFields: + """Gets the data sources used in an ingestion job.""" arguments: dict[str, dict[str, Any]] = { "jobId": {"type": "String!", "value": job_id} } @@ -550,6 +584,7 @@ def paged_sincal_model_presets( filter_: Optional[GetSincalModelPresetsFilterInput] = None, sort: Optional[GetSincalModelPresetsSortCriteriaInput] = None ) -> SincalModelPresetPageFields: + """Retrieve a page of sincalModel presets, with optional limit and offset, and optional filtering. A default preset with null ID will also be included in the response, which may result in the number of presets returned exceeding the desired page size (limit).""" arguments: dict[str, dict[str, Any]] = { "limit": {"type": "Int", "value": limit}, "offset": {"type": "Long", "value": offset}, @@ -572,6 +607,7 @@ def paged_sincal_models( filter_: Optional[GetSincalModelsFilterInput] = None, sort: Optional[GetSincalModelsSortCriteriaInput] = None ) -> SincalModelPageFields: + """Retrieve a page of sincalModels, with optional limit and offset, and optional filtering""" arguments: dict[str, dict[str, Any]] = { "limit": {"type": "Int", "value": limit}, "offset": {"type": "Long", "value": offset}, @@ -587,6 +623,7 @@ def paged_sincal_models( @classmethod def sincal_model_by_id(cls, model_id: str) -> SincalModelFields: + """Retrieve a sincalModel by ID""" arguments: dict[str, dict[str, Any]] = { "modelId": {"type": "ID!", "value": model_id} } @@ -601,6 +638,7 @@ def sincal_model_by_id(cls, model_id: str) -> SincalModelFields: def sincal_model_config_upload_url( cls, filename: str, file_type: SincalFileType ) -> UploadUrlResponseFields: + """Generate a pre-signed URL to upload a sincal configuration file to the input storage location. Returns the pre-signed URL along with the final file path as it will be referenced by EAS. This does not update the sincal configuration. To make use of a newly uploaded configuration file, pass the `filePath` returned by this query to the `updateSincalModelConfigFilePath()` mutation.""" arguments: dict[str, dict[str, Any]] = { "filename": {"type": "String!", "value": filename}, "fileType": {"type": "SincalFileType!", "value": file_type}, @@ -614,10 +652,12 @@ def sincal_model_config_upload_url( @classmethod def sincal_model_global_config(cls) -> SincalGlobalInputsConfigFields: + """Retrieve the current sincalModel input file paths.""" return SincalGlobalInputsConfigFields(field_name="sincalModelGlobalConfig") @classmethod def sincal_model_preset_by_id(cls, preset_id: str) -> SincalModelPresetFields: + """Retrieve a sincalModel preset by ID""" arguments: dict[str, dict[str, Any]] = { "presetId": {"type": "ID!", "value": preset_id} } @@ -632,6 +672,7 @@ def sincal_model_preset_by_id(cls, preset_id: str) -> SincalModelPresetFields: def sincal_model_presets_by_ids( cls, preset_ids: list[str] ) -> SincalModelPresetFields: + """Retrieve a list of sincalModel presets by IDs""" arguments: dict[str, dict[str, Any]] = { "presetIds": {"type": "[ID!]!", "value": preset_ids} } @@ -644,6 +685,7 @@ def sincal_model_presets_by_ids( @classmethod def sincal_models_by_ids(cls, model_ids: list[str]) -> SincalModelFields: + """Retrieve a list of sincalModels by IDs""" arguments: dict[str, dict[str, Any]] = { "modelIds": {"type": "[ID!]!", "value": model_ids} } @@ -656,6 +698,7 @@ def sincal_models_by_ids(cls, model_ids: list[str]) -> SincalModelFields: @classmethod def create_machine_api_key(cls, roles: list[str], token_name: str) -> GraphQLField: + """Create a new JWT auth token for a machine with the specified roles.""" arguments: dict[str, dict[str, Any]] = { "roles": {"type": "[String!]!", "value": roles}, "tokenName": {"type": "String!", "value": token_name}, @@ -669,6 +712,7 @@ def create_machine_api_key(cls, roles: list[str], token_name: str) -> GraphQLFie @classmethod def create_user_api_key(cls, roles: list[str], token_name: str) -> GraphQLField: + """Create the JWT auth token for the current user with specified roles.""" arguments: dict[str, dict[str, Any]] = { "roles": {"type": "[String!]!", "value": roles}, "tokenName": {"type": "String!", "value": token_name}, @@ -680,24 +724,29 @@ def create_user_api_key(cls, roles: list[str], token_name: str) -> GraphQLField: @classmethod def get_machine_tokens(cls) -> MachineUserFields: + """Gets all machine token users with their username and display name.""" return MachineUserFields(field_name="getMachineTokens") @classmethod def get_public_geo_view_config(cls) -> GraphQLField: + """Retrieve the GeoViewConfig used to config the EWB public map tile endpoint. Returns NUll if not enabled.""" return GraphQLField(field_name="getPublicGeoViewConfig") @classmethod def get_all_external_roles(cls) -> GraphQLField: + """Get all external roles from EAS.""" return GraphQLField(field_name="getAllExternalRoles") @classmethod def get_network_models(cls) -> NetworkModelsFields: + """Get all EWB network models""" return NetworkModelsFields(field_name="getNetworkModels") @classmethod def get_feeder_load_analysis_report_status( cls, report_id: str, full_spec: bool ) -> FeederLoadAnalysisReportFields: + """Retrieve the status of a feeder load analysis job.""" arguments: dict[str, dict[str, Any]] = { "reportId": {"type": "ID!", "value": report_id}, "fullSpec": {"type": "Boolean!", "value": full_spec}, @@ -711,6 +760,7 @@ def get_feeder_load_analysis_report_status( @classmethod def get_ingestor_run(cls, id: int) -> IngestionRunFields: + """Retrieve ingestor run details by ID""" arguments: dict[str, dict[str, Any]] = {"id": {"type": "Int!", "value": id}} cleared_arguments = { key: value for key, value in arguments.items() if value["value"] is not None @@ -726,6 +776,7 @@ def list_ingestor_runs( filter_: Optional[IngestorRunsFilterInput] = None, sort: Optional[IngestorRunsSortCriteriaInput] = None ) -> IngestionRunFields: + """Retrieve all ingestor runs initiated through EAS""" arguments: dict[str, dict[str, Any]] = { "filter": {"type": "IngestorRunsFilterInput", "value": filter_}, "sort": {"type": "IngestorRunsSortCriteriaInput", "value": sort}, @@ -746,6 +797,7 @@ def list_ingestor_runs_paged( filter_: Optional[IngestorRunsFilterInput] = None, sort: Optional[IngestorRunsSortCriteriaInput] = None ) -> IngestorRunPageFields: + """Retrieve all ingestor runs initiated through EAS""" arguments: dict[str, dict[str, Any]] = { "limit": {"type": "Int", "value": limit}, "offset": {"type": "Long", "value": offset}, @@ -768,6 +820,7 @@ def paged_open_dss_models( filter_: Optional[GetOpenDssModelsFilterInput] = None, sort: Optional[GetOpenDssModelsSortCriteriaInput] = None ) -> OpenDssModelPageFields: + """Retrieve a page of OpenDSS models, with optional limit and offset, and optional filtering""" arguments: dict[str, dict[str, Any]] = { "limit": {"type": "Int", "value": limit}, "offset": {"type": "Long", "value": offset}, @@ -785,6 +838,7 @@ def paged_open_dss_models( def get_user_permitted_customer_list_column_config( cls, ) -> UserCustomerListColumnConfigFields: + """Fetches the user permitted column configuration for the customer list view.""" return UserCustomerListColumnConfigFields( field_name="getUserPermittedCustomerListColumnConfig" ) @@ -793,12 +847,14 @@ def get_user_permitted_customer_list_column_config( def get_user_saved_customer_list_column_config( cls, ) -> UserCustomerListColumnConfigFields: + """Fetches the user's column configuration for the customer list view.""" return UserCustomerListColumnConfigFields( field_name="getUserSavedCustomerListColumnConfig" ) @classmethod def get_customer_list(cls, m_ri_ds: list[str]) -> CustomerDetailsResponseFields: + """Retrieve the list of customers and their details.""" arguments: dict[str, dict[str, Any]] = { "mRIDs": {"type": "[String!]!", "value": m_ri_ds} } @@ -813,6 +869,7 @@ def get_customer_list(cls, m_ri_ds: list[str]) -> CustomerDetailsResponseFields: def get_customer_list_by_nmis( cls, nmis: list[str] ) -> CustomerDetailsResponseFields: + """Retrieve customer details using NMIs as input.""" arguments: dict[str, dict[str, Any]] = { "nmis": {"type": "[String!]!", "value": nmis} } @@ -825,12 +882,14 @@ def get_customer_list_by_nmis( @classmethod def get_app_options(cls) -> AppOptionsFields: + """Get App Options""" return AppOptionsFields(field_name="getAppOptions") @classmethod def get_presigned_upload_url_for_variant( cls, filename: str, file_type: VariantFileType ) -> UploadUrlResponseFields: + """Generate a pre-signed URL to upload variant files to the cloud storage. Returns the pre-signed URL along with the final file path as it will be referenced by EAS""" arguments: dict[str, dict[str, Any]] = { "filename": {"type": "String!", "value": filename}, "fileType": {"type": "VariantFileType!", "value": file_type}, @@ -844,6 +903,7 @@ def get_presigned_upload_url_for_variant( @classmethod def get_variant_upload_info(cls, job_id: str) -> VariantWorkPackageFields: + """Retrieves status of a variant ingestion workflow""" arguments: dict[str, dict[str, Any]] = { "jobID": {"type": "String!", "value": job_id} } diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_typing_fields.py b/src/zepben/eas/lib/generated_graphql_client/custom_typing_fields.py index bdee65d..3cd62f6 100644 --- a/src/zepben/eas/lib/generated_graphql_client/custom_typing_fields.py +++ b/src/zepben/eas/lib/generated_graphql_client/custom_typing_fields.py @@ -1,5 +1,3 @@ -# Generated by ariadne-codegen - from .base_operation import GraphQLField diff --git a/src/zepben/eas/lib/generated_graphql_client/enums.py b/src/zepben/eas/lib/generated_graphql_client/enums.py index 2d28db4..5a153a4 100644 --- a/src/zepben/eas/lib/generated_graphql_client/enums.py +++ b/src/zepben/eas/lib/generated_graphql_client/enums.py @@ -1,6 +1,3 @@ -# Generated by ariadne-codegen -# Source: http://127.0.0.1:7654/api/graphql - from enum import Enum diff --git a/src/zepben/eas/lib/generated_graphql_client/exceptions.py b/src/zepben/eas/lib/generated_graphql_client/exceptions.py index 6dcc565..e217e9b 100644 --- a/src/zepben/eas/lib/generated_graphql_client/exceptions.py +++ b/src/zepben/eas/lib/generated_graphql_client/exceptions.py @@ -1,5 +1,3 @@ -# Generated by ariadne-codegen - from typing import Any, Optional, Union import httpx diff --git a/src/zepben/eas/lib/generated_graphql_client/input_types.py b/src/zepben/eas/lib/generated_graphql_client/input_types.py index 3037e00..be71ecd 100644 --- a/src/zepben/eas/lib/generated_graphql_client/input_types.py +++ b/src/zepben/eas/lib/generated_graphql_client/input_types.py @@ -1,6 +1,3 @@ -# Generated by ariadne-codegen -# Source: http://127.0.0.1:7654/api/graphql - from typing import Any, Optional from pydantic import Field @@ -22,6 +19,8 @@ class AppOptionsInput(BaseModel): + """Input for updating application configuration options.""" + asset_name_format: Optional[str] = Field(alias="assetNameFormat", default=None) pole_string_format: Optional[str] = Field(alias="poleStringFormat", default=None) @@ -30,77 +29,115 @@ class CandidateGenerationConfigInput(BaseModel): average_voltage_spread_threshold: Optional[int] = Field( alias="averageVoltageSpreadThreshold", default=None ) + "The threshold for average line voltage spread under the transformer over the year, in volts. Voltage spread at each timestep is calculated by taking the difference between the maximum and minimum phase-to-phase voltage over the nodes under the transformer, for each phase, then taking the maximum of that difference across all phases. When the average voltage spread exceeds this threshold, it indicates that the transformer is experiencing a significant voltage swing that may impact system stability. Only used when `type` is `TAP_OPTIMIZATION`." intervention_criteria_name: Optional[str] = Field( alias="interventionCriteriaName", default=None ) + "The ID of the set of criteria used to select intervention candidates from enhanced metrics of the base work package run. Only used when `type` is `CRITERIA`." tap_weighting_factor_lower_threshold: Optional[float] = Field( alias="tapWeightingFactorLowerThreshold", default=None ) + "The minimum threshold for the tap weighting factor, used to determine when a positive tap adjustment (increasing voltage) is prioritized. If the tap weighting factor falls below this threshold, it indicates that the voltage is significantly under the desired range and requires corrective action. This setting is usually negative. Only used when `type` is `TAP_OPTIMIZATION`." tap_weighting_factor_upper_threshold: Optional[float] = Field( alias="tapWeightingFactorUpperThreshold", default=None ) + "The maximum threshold for the tap weighting factor, used to determine when a negative tap adjustment (decreasing voltage) is prioritized. If the tap weighting factor exceeds this threshold, it indicates that the voltage is significantly over the desired range and requires corrective action. This setting is usually positive. Only used when `type` is `TAP_OPTIMIZATION`." type_: CandidateGenerationType = Field(alias="type") + "The type of method for generating the intervention candidates." voltage_over_limit_hours_threshold: Optional[int] = Field( alias="voltageOverLimitHoursThreshold", default=None ) + "The threshold for number of hours a transformer is above the nominal voltage range. Only used when `type` is `TAP_OPTIMIZATION`." voltage_under_limit_hours_threshold: Optional[int] = Field( alias="voltageUnderLimitHoursThreshold", default=None ) + "The threshold for number of hours a transformer is below the nominal voltage range. Only used when `type` is `TAP_OPTIMIZATION`." class DvmsConfigInput(BaseModel): lower_limit: float = Field(alias="lowerLimit") + "The lower limit of voltage (p.u.) considered acceptable for the purposes of DVMS." lower_percentile: float = Field(alias="lowerPercentile") + "The lowest percentile of customers' voltages to consider when applying DVMS." max_iterations: int = Field(alias="maxIterations") + "The number of iterations to attempt DVMS for each timestep before moving on." regulator_config: "DvmsRegulatorConfigInput" = Field(alias="regulatorConfig") + "Configures the voltage regulator to apply if the zone is already satisfactory according to the above limits." upper_limit: float = Field(alias="upperLimit") + "The upper limit of voltage (p.u.) considered acceptable for the purposes of DVMS." upper_percentile: float = Field(alias="upperPercentile") + "The highest percentile of customers' voltages to consider when applying DVMS." class DvmsRegulatorConfigInput(BaseModel): allow_push_to_limit: bool = Field(alias="allowPushToLimit") + "If this is true, we allow the regulator to push some number of customers outside the specified limits for DVMS, with the limit of customers given by lowerPercentile and upperPercentile in DvmsConfig." max_tap_change_per_step: int = Field(alias="maxTapChangePerStep") + "The maximum number of tap steps to move (in either direction) for each timestep." pu_deadband_percent: float = Field(alias="puDeadbandPercent") + "Width of window of voltages considered acceptable for the average customer voltage, in %p.u." pu_target: float = Field(alias="puTarget") + "Voltage p.u. to move the average customer voltage towards." class FeederConfigInput(BaseModel): feeder: str + "The mRIDs of feeder to solve as part of this work package. Each feeder will be solved independently." fixed_time: Optional["FixedTimeInput"] = Field(alias="fixedTime", default=None) + "Fixed Time setting for load retrieval, can not be set with time period setting." scenarios: list[str] + "The IDs of scenarios to solve for." time_period: Optional["TimePeriodInput"] = Field(alias="timePeriod", default=None) + "Time period setting for load retrieval, can not be set with fixed time setting." years: list[int] + "The years to solve for. This is primarily used for fetching scenario data and calculating load growth." class FeederConfigsInput(BaseModel): configs: list["FeederConfigInput"] + "The mRIDs of feeders to solve for this work package. Each feeder will be solved independently." class FeederLoadAnalysisInput(BaseModel): aggregate_at_feeder_level: bool = Field(alias="aggregateAtFeederLevel") + "Request for a report which aggregate all downstream load at the feeder level" end_date: str = Field(alias="endDate") + "End date for this analysis" feeders: Optional[list[str]] = None + "The mRIDs of feeders to solve for feeder load analysis." fetch_lv_network: bool = Field(alias="fetchLvNetwork") + "Whether to stop analysis at distribution transformer" fla_forecast_config: Optional["FlaForecastConfigInput"] = Field( alias="flaForecastConfig", default=None ) + "Configuration for forecast FLA study" geographical_regions: Optional[list[str]] = Field( alias="geographicalRegions", default=None ) + "The mRIDs of Geographical Region to solve for feeder load analysis." output: Optional[str] = None + "The file name of the resulting study" process_coincident_loads: bool = Field(alias="processCoincidentLoads") + "Whether to include values corresponding to conductor event time points in the report" process_feeder_loads: bool = Field(alias="processFeederLoads") + "Whether to include values corresponding to feeder event time points in the report" produce_conductor_report: bool = Field(alias="produceConductorReport") + "Request for an extensive report" start_date: str = Field(alias="startDate") + "Start date for this analysis" sub_geographical_regions: Optional[list[str]] = Field( alias="subGeographicalRegions", default=None ) + "The mRIDs of sub-Geographical Region to solve for feeder load analysis." substations: Optional[list[str]] = None + "The mRIDs of substations to solve for feeder load analysis." class FixedTimeInput(BaseModel): load_time: Any = Field(alias="loadTime") + "The fixed time point to use for load retrieval." overrides: Optional[list["FixedTimeLoadOverrideInput"]] = None + "The list of load override profiles." class FixedTimeLoadOverrideInput(BaseModel): @@ -123,20 +160,30 @@ class FlaForecastConfigInput(BaseModel): bess_upgrade_threshold: Optional[int] = Field( alias="bessUpgradeThreshold", default=None ) + "Watts threshold to indicate if a customer site will gain additional battery during scenario application" pv_upgrade_threshold: Optional[int] = Field( alias="pvUpgradeThreshold", default=None ) + "Watts threshold to indicate if a customer site will gain additional pv during scenario application" scenario_id: str = Field(alias="scenarioID") + "The id of forecast scenario" seed: Optional[int] = None + "Seed for scenario application" year: int + "The year for forecast model" class ForecastConfigInput(BaseModel): feeders: list[str] + "The mRIDs of feeders to solve for this work package. Each feeder will be solved independently." fixed_time: Optional["FixedTimeInput"] = Field(alias="fixedTime", default=None) + "Fixed Time setting for load retrieval, can not be set with time period setting." scenarios: list[str] + "The IDs of scenarios to solve for." time_period: Optional["TimePeriodInput"] = Field(alias="timePeriod", default=None) + "Time period setting for load retrieval, can not be set with fixed time setting." years: list[int] + "The years to solve for. This is primarily used for fetching scenario data and calculating load growth." class GeoJsonOverlayInput(BaseModel): @@ -276,6 +323,7 @@ class HcEnhancedMetricsConfigInput(BaseModel): class HcExecutorConfigInput(BaseModel): value: Optional[str] = None + "Placeholder parameter, currently ignored." class HcGeneratorConfigInput(BaseModel): @@ -479,7 +527,9 @@ class HcResultProcessorConfigInput(BaseModel): class HcScenarioConfigsFilterInput(BaseModel): id: Optional[str] = None + "Search for scenario configurations by Id. Returns partial matches." name: Optional[str] = None + "Search for scenario configurations by name. Returns partial matches." class HcSolveConfigInput(BaseModel): @@ -513,9 +563,13 @@ class HcSwitchMeterPlacementConfigInput(BaseModel): class HcWorkPackagesFilterInput(BaseModel): created_by: Optional[list[str]] = Field(alias="createdBy", default=None) + "Search for work package by the username or email of the User that created the work package." id: Optional[str] = None + "Search for work package by Id." name: Optional[str] = None + "Search for work package by name. Returns partial matches." search_text: Optional[str] = Field(alias="searchText", default=None) + "Search for work package by user input text. Returns partial matches." class HcWorkPackagesSortCriteriaInput(BaseModel): @@ -542,12 +596,18 @@ class IngestorConfigInput(BaseModel): class IngestorRunsFilterInput(BaseModel): + """Include results based on filters. A logical AND is applied between the supplied filters""" + completed: Optional[bool] = None + "Filter results by whether they are in a completed state or not." container_runtime_type: Optional[list[IngestorRuntimeKind]] = Field( alias="containerRuntimeType", default=None ) + "Filter results by containerRunTimeType." id: Optional[str] = None + "Filter results by Id." status: Optional[list[IngestorRunState]] = None + "Filter results by the current status of the ingestor run." class IngestorRunsSortCriteriaInput(BaseModel): @@ -564,22 +624,31 @@ class IngestorRunsSortCriteriaInput(BaseModel): class InterventionConfigInput(BaseModel): allocation_criteria: Optional[str] = Field(alias="allocationCriteria", default=None) + "The ID of the set of criteria used to select an intervention instance for each candidate." allocation_limit_per_year: Optional[int] = Field( alias="allocationLimitPerYear", default=None ) + "The maximum number of interventions that can be applied per year. Defaults to 1 million." base_work_package_id: str = Field(alias="baseWorkPackageId") + "ID of the work package that this intervention is based on. The new work package should process a subset of its feeders, scenarios, and years." candidate_generation: Optional["CandidateGenerationConfigInput"] = Field( alias="candidateGeneration", default=None ) + "The method of generating candidates for the intervention. This does not need to be specified for certain interventions, e.g. PHASE_REBALANCING." dvms: Optional["DvmsConfigInput"] = None + "The config for DVMS. This must be specified if interventionType = DVMS." intervention_type: InterventionClass = Field(alias="interventionType") + "The class of intervention to apply." phase_rebalance_proportions: Optional["PhaseRebalanceProportionsInput"] = Field( alias="phaseRebalanceProportions", default=None ) + "The proportions to use for phase rebalancing. If this is unspecified and interventionType = PHASE_REBALANCING, phases will be rebalanced to equal proportions." specific_allocation_instance: Optional[str] = Field( alias="specificAllocationInstance", default=None ) + "The specific instance of intervention to use for every allocation. If this is unspecified, all instances of the intervention class will be considered when choosing one for each candidate." year_range: Optional["YearRangeInput"] = Field(alias="yearRange", default=None) + "The range of years to search for and apply interventions. All years within this range should be included in the work package. Defaults to 1AD to 9999AD." class OpenDssCommonConfigInput(BaseModel): @@ -641,7 +710,9 @@ class PowerFactoryModelInput(BaseModel): class ProcessedDiffFilterInput(BaseModel): type_: Optional[str] = Field(alias="type", default=None) + "Search for processed diffs by whether its network metrics or network metrics enhanced." w_p_id: Optional[str] = Field(alias="wPId", default=None) + "Search for processed diffs by work package Id." class ProcessedDiffSortCriteriaInput(BaseModel): @@ -666,7 +737,9 @@ class SincalModelGenerationSpecInput(BaseModel): forecast_spec: Optional["GqlSincalModelForecastSpecInput"] = Field( alias="forecastSpec", default=None ) + "Configuration for forecast models" frontend_config: str = Field(alias="frontendConfig") + "JSON frontend export config." class SincalModelInput(BaseModel): @@ -701,8 +774,11 @@ class StudyResultInput(BaseModel): class TimePeriodInput(BaseModel): end_time: Any = Field(alias="endTime") + "The ending time for load data retrieval." overrides: Optional[list["TimePeriodLoadOverrideInput"]] = None + "The list of load override profiles." start_time: Any = Field(alias="startTime") + "The starting time for load data retrieval." class TimePeriodLoadOverrideInput(BaseModel): @@ -725,27 +801,36 @@ class WorkPackageInput(BaseModel): executor_config: Optional["HcExecutorConfigInput"] = Field( alias="executorConfig", default=None ) + "Config exclusive to the OpenDSS executor." feeder_configs: Optional["FeederConfigsInput"] = Field( alias="feederConfigs", default=None ) + "The list of feeder configurations for this work package, can not be set if forecast configurations exists." forecast_config: Optional["ForecastConfigInput"] = Field( alias="forecastConfig", default=None ) + "The forecast configurations for this work package, can not be set if feeder configurations exists." generator_config: Optional["HcGeneratorConfigInput"] = Field( alias="generatorConfig", default=None ) + "Config exclusive to the OpenDSS model generator." intervention: Optional["InterventionConfigInput"] = None + "An optional intervention to use for this work package. Interventions are applied per feeder-scenario." quality_assurance_processing: Optional[bool] = Field( alias="qualityAssuranceProcessing", default=None ) + "Fetch load from a single timestamp. This will result in a single timestamp of results." result_processor_config: Optional["HcResultProcessorConfigInput"] = Field( alias="resultProcessorConfig", default=None ) + "Config exclusive to the result processor." class YearRangeInput(BaseModel): max_year: int = Field(alias="maxYear") + "The maximum year in this range (inclusive)" min_year: int = Field(alias="minYear") + "The minimum year in this range (inclusive)" DvmsConfigInput.model_rebuild() From 75e86995e73fa0e4bacb73716ff390309652571f Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Tue, 10 Mar 2026 17:29:59 +1100 Subject: [PATCH 10/32] narrow import path for some statements Signed-off-by: Max Chesterfield --- src/zepben/eas/lib/generated_graphql_client/custom_mutations.py | 2 +- src/zepben/eas/lib/generated_graphql_client/custom_queries.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py b/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py index 9abeee0..f08802d 100644 --- a/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py +++ b/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py @@ -1,6 +1,5 @@ from typing import Any, Optional -from . import SincalFileType, VariantFileType from .custom_fields import ( DiffResultFields, RemoveAppOptionResultFields, @@ -20,6 +19,7 @@ StudyInput, WorkPackageInput, ) +from .enums import SincalFileType, VariantFileType class Mutation: diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_queries.py b/src/zepben/eas/lib/generated_graphql_client/custom_queries.py index 7ec7848..f8fea64 100644 --- a/src/zepben/eas/lib/generated_graphql_client/custom_queries.py +++ b/src/zepben/eas/lib/generated_graphql_client/custom_queries.py @@ -1,6 +1,5 @@ from typing import Any, Optional -from . import HostingCapacityFileType, WorkflowStatus, ContainerType, SincalFileType, VariantFileType from .custom_fields import ( AppOptionsFields, CustomerDetailsResponseFields, @@ -66,6 +65,7 @@ ProcessedDiffSortCriteriaInput, WorkPackageInput, ) +from .enums import HostingCapacityFileType, WorkflowStatus, ContainerType, SincalFileType, VariantFileType class Query: From 4dfdf65a7251d4238efe5568233fffa32fb9cff7 Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Tue, 10 Mar 2026 17:30:18 +1100 Subject: [PATCH 11/32] pass kwargs to client. Signed-off-by: Max Chesterfield --- test/test_eas_client.py | 96 ++++++++++++++++++++--------------------- 1 file changed, 48 insertions(+), 48 deletions(-) diff --git a/test/test_eas_client.py b/test/test_eas_client.py index 5d82a4c..999ff2d 100644 --- a/test/test_eas_client.py +++ b/test/test_eas_client.py @@ -59,8 +59,8 @@ def json(self): def test_create_eas_client_success(): eas_client = EasClient( - mock_host, - mock_port, + host=mock_host, + port=mock_port, protocol=mock_protocol, verify_certificate=mock_verify_certificate ) @@ -73,8 +73,8 @@ def test_create_eas_client_success(): def test_get_request_headers_adds_access_token_in_auth_header(): eas_client = EasClient( - mock_host, - mock_port, + host=mock_host, + port=mock_port, access_token=mock_access_token, ) @@ -106,8 +106,8 @@ def httpserver_ssl_context(localhost_cert): def test_get_work_package_cost_estimation_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, + host=LOCALHOST, + port=httpserver.port, verify_certificate=False ) @@ -191,8 +191,8 @@ def test_get_work_package_cost_estimation_valid_certificate_success(httpserver: def test_run_hosting_capacity_work_package_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, + host=LOCALHOST, + port=httpserver.port, verify_certificate=False ) @@ -270,8 +270,8 @@ def test_run_hosting_capacity_work_package_valid_certificate_success(httpserver: def test_cancel_hosting_capacity_work_package_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, + host=LOCALHOST, + port=httpserver.port, verify_certificate=False ) @@ -304,8 +304,8 @@ def test_cancel_hosting_capacity_work_package_valid_certificate_success(httpserv def test_get_hosting_capacity_work_package_progress_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, + host=LOCALHOST, + port=httpserver.port, verify_certificate=False ) @@ -338,8 +338,8 @@ def test_get_hosting_capacity_work_package_progress_valid_certificate_success(ht def test_upload_study_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, + host=LOCALHOST, + port=httpserver.port, verify_certificate=False ) @@ -388,8 +388,8 @@ def hosting_capacity_run_calibration_request_handler(request): def test_run_hosting_capacity_calibration_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, + host=LOCALHOST, + port=httpserver.port, verify_certificate=False ) @@ -432,8 +432,8 @@ def get_hosting_capacity_run_calibration_request_handler(request): def test_get_hosting_capacity_calibration_run_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, + host=LOCALHOST, + port=httpserver.port, verify_certificate=False ) @@ -483,8 +483,8 @@ def hosting_capacity_run_calibration_with_calibration_time_request_handler(reque def test_run_hosting_capacity_calibration_with_calibration_time_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, + host=LOCALHOST, + port=httpserver.port, verify_certificate=False ) @@ -505,8 +505,8 @@ def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings httpserver: HTTPServer ): eas_client = EasClient( - LOCALHOST, - httpserver.port, + host=LOCALHOST, + port=httpserver.port, verify_certificate=False ) @@ -547,8 +547,8 @@ def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings httpserver: HTTPServer ): eas_client = EasClient( - LOCALHOST, - httpserver.port, + host=LOCALHOST, + port=httpserver.port, verify_certificate=False ) @@ -589,8 +589,8 @@ def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings httpserver: HTTPServer ): eas_client = EasClient( - LOCALHOST, - httpserver.port, + host=LOCALHOST, + port=httpserver.port, verify_certificate=False ) @@ -608,8 +608,8 @@ def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, + host=LOCALHOST, + port=httpserver.port, verify_certificate=False ) @@ -638,8 +638,8 @@ def get_hosting_capacity_calibration_sets_request_handler(request): def test_get_hosting_capacity_calibration_sets_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, + host=LOCALHOST, + port=httpserver.port, verify_certificate=False ) @@ -953,8 +953,8 @@ def run_opendss_export_request_handler(request): def test_run_opendss_export_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, + host=LOCALHOST, + port=httpserver.port, verify_certificate=False ) @@ -1022,8 +1022,8 @@ def get_paged_opendss_models_request_handler(request): def test_get_paged_opendss_models_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, + host=LOCALHOST, + port=httpserver.port, verify_certificate=False ) @@ -1067,8 +1067,8 @@ def test_get_paged_opendss_models_valid_certificate_success(httpserver: HTTPServ def test_get_opendss_model_download_url_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, + host=LOCALHOST, + port=httpserver.port, verify_certificate=False ) @@ -1117,8 +1117,8 @@ def run_ingestor_request_handler(request): def test_run_ingestor_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, + host=LOCALHOST, + port=httpserver.port, verify_certificate=False ) @@ -1144,8 +1144,8 @@ def get_ingestor_run_request_handler(request): def test_get_ingestor_run_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, + host=LOCALHOST, + port=httpserver.port, verify_certificate=False ) @@ -1169,8 +1169,8 @@ def get_ingestor_run_list_request_empty_handler(request): def test_get_ingestor_run_list_empty_filter_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, + host=LOCALHOST, + port=httpserver.port, verify_certificate=False ) @@ -1210,8 +1210,8 @@ def get_ingestor_run_list_request_complete_handler(request): def test_get_ingestor_run_list_all_filters_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, + host=LOCALHOST, + port=httpserver.port, verify_certificate=False ) @@ -1336,8 +1336,8 @@ def test_work_package_config_to_json_for_tap_optimization(): def _invalid_ca(port): with trustme.Blob(b"invalid ca").tempfile() as ca_filename: return EasClient( - LOCALHOST, - port, + host=LOCALHOST, + port=port, verify_certificate=True, ca_filename=ca_filename ) @@ -1346,8 +1346,8 @@ def _invalid_ca(port): def _valid_ca(port, ca: trustme.CA): with ca.cert_pem.tempfile() as ca_filename: return EasClient( - LOCALHOST, - port, + host=LOCALHOST, + port=port, verify_certificate=True, ca_filename=ca_filename ) From c412aaaf33bebab97436dd7b0e367846ed323f73 Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Tue, 17 Mar 2026 00:46:36 +1100 Subject: [PATCH 12/32] remove unnessecary param Signed-off-by: Max Chesterfield --- pyproject.toml | 5 +- src/zepben/eas/client/eas_client.py | 79 +++++++++++++++-------------- test/test_eas_client.py | 2 +- 3 files changed, 43 insertions(+), 43 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index bce4066..36aa829 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -60,9 +60,8 @@ where = ["src/"] [tool.ariadne-codegen] remote_schema_url = "http://127.0.0.1:7654/api/graphql" # Set to address of Evolve App Server enable_custom_operations=true +include_comments=false target_package_path='src/zepben/eas/lib' target_package_name='generated_graphql_client' -include_comments=false introspection_descriptions=true -#introspection_schema_description=true -#introspection_directive_is_repeatable=true +introspection_input_value_deprecations=true diff --git a/src/zepben/eas/client/eas_client.py b/src/zepben/eas/client/eas_client.py index 6c2ca12..b524eed 100644 --- a/src/zepben/eas/client/eas_client.py +++ b/src/zepben/eas/client/eas_client.py @@ -10,8 +10,10 @@ from asyncio import get_event_loop from datetime import datetime from http import HTTPStatus +from typing import Any import httpx +from graphql import OperationType from zepben.eas.client.decorators import async_func, catch_warnings from zepben.eas.client.patched_generated_client import PatchedClient as Client @@ -19,6 +21,7 @@ from zepben.eas.lib.generated_graphql_client import WorkPackageInput, FeederLoadAnalysisInput, StudyInput, \ IngestorConfigInput, IngestorRunsFilterInput, IngestorRunsSortCriteriaInput, HcGeneratorConfigInput, \ HcModelConfigInput, OpenDssModelInput, GetOpenDssModelsFilterInput, GetOpenDssModelsSortCriteriaInput +from zepben.eas.lib.generated_graphql_client.base_operation import GraphQLField from zepben.eas.lib.generated_graphql_client.custom_fields import FeederLoadAnalysisSpecFields, \ FeederLoadAnalysisReportFields, IngestionRunFields, HcCalibrationFields, GqlTxTapRecordFields, \ OpenDssModelPageFields, OpenDssModelFields @@ -26,7 +29,7 @@ from zepben.eas.lib.generated_graphql_client.custom_queries import Query -class EasClient: +class EasClient(Client): """ A class used to represent a client to the Evolve App Server, with methods that represent requests to its API. """ @@ -79,7 +82,7 @@ def __init__( headers=dict(authorization=f"Bearer {access_token}") if access_token else None, verify=verify, ) - self._gql_client = Client( + super().__init__( f"{self._base_url}/api/graphql", http_client=http_client, ) @@ -89,7 +92,22 @@ def close(self): return get_event_loop().run_until_complete(self.aclose()) async def aclose(self): - await self._gql_client.http_client.aclose() + await self.http_client.aclose() + + async def query(self, *fields: GraphQLField, operation_name: str = None) -> dict[str, Any]: + """Execute a query against the Evolve App Server.""" + return await super().query(*fields, operation_name=operation_name) + + async def mutation(self, *fields: GraphQLField, operation_name: str = None) -> dict[str, Any]: + """Execute a mutation against the Evolve App Server.""" + return await super().mutation(*fields, operation_name=operation_name) + + async def execute_custom_operation(self, *fields: GraphQLField, operation_type: OperationType, operation_name: str = None) -> dict[str, Any]: + return await super().execute_custom_operation( + *fields, + operation_type=operation_type, + operation_name=operation_name or '-'.join(f._field_name for f in fields) + ) @async_func @catch_warnings @@ -100,9 +118,8 @@ async def get_work_package_cost_estimation(self, work_package: WorkPackageInput) :param work_package: An instance of the `WorkPackageConfig` data class representing the work package configuration for the run :return: The HTTP response received from the Evolve App Server after attempting to run work package """ - return await self._gql_client.query( + return await self.query( Query.get_work_package_cost_estimation(work_package), - operation_name="getWorkPackageCostEstimation", ) @async_func @@ -114,9 +131,8 @@ async def run_hosting_capacity_work_package(self, work_package: WorkPackageInput :param work_package: An instance of the `WorkPackageConfig` data class representing the work package configuration for the run :return: The HTTP response received from the Evolve App Server after attempting to run work package """ - return await self._gql_client.mutation( + return await self.mutation( Mutation.run_work_package(work_package, work_package_name=work_package_name), - operation_name="runWorkPackage", ) @async_func @@ -128,9 +144,8 @@ async def cancel_hosting_capacity_work_package(self, work_package_id: str): :param work_package_id: The id of the running work package to cancel :return: The HTTP response received from the Evolve App Server after attempting to cancel work package """ - return await self._gql_client.mutation( + return await self.mutation( Mutation.cancel_work_package(work_package_id=work_package_id), - operation_name="cancelWorkPackage" ) def get_hosting_capacity_work_packages_progress(self): # FIXME: why is this info not returned by get_work_package_by_id ? @@ -139,11 +154,9 @@ def get_hosting_capacity_work_packages_progress(self): # FIXME: why is this inf :return: The HTTP response received from the Evolve App Server after requesting work packages progress info """ - raise NotImplementedError() return get_event_loop().run_until_complete( - self._gql_client.query( - Query.get_work_packages(), - operation_name="getWorkPackagesProgres", + self.query( + Query.get_active_work_packages(), ) ) @@ -156,9 +169,8 @@ async def run_feeder_load_analysis_report(self, feeder_load_analysis_input: Feed :param feeder_load_analysis_input:: An instance of the `FeederLoadAnalysisConfig` data class representing the configuration for the run :return: The HTTP response received from the Evolve App Server after attempting to run work package """ - return await self._gql_client.mutation( + return await self.mutation( Mutation.run_feeder_load_analysis(feeder_load_analysis_input), - operation_name="runFeederLoadAnalysisReport" ) @async_func @@ -171,7 +183,7 @@ async def get_feeder_load_analysis_report_status(self, report_id: str, full_spec :param full_spec: If true the response will include the request sent to generate the report :return: The HTTP response received from the Evolve App Server after requesting a feeder load analysis report status """ - return await self._gql_client.query( + return await self.query( Query.get_feeder_load_analysis_report_status(report_id, full_spec=full_spec).fields( FeederLoadAnalysisReportFields.id, FeederLoadAnalysisReportFields.name, @@ -196,7 +208,6 @@ async def get_feeder_load_analysis_report_status(self, report_id: str, full_spec FeederLoadAnalysisSpecFields.output ), ), - operation_name="getFeederLoadAnalysisReportStatus", ) @async_func @@ -206,9 +217,8 @@ async def upload_study(self, study: StudyInput | list[StudyInput]): Uploads a new study to the Evolve App Server :param study: An instance of a data class representing a new study """ - return await self._gql_client.mutation( + return await self.mutation( Mutation.add_studies(study if isinstance(study, list) else [study]), - operation_name="addStudy", ) @async_func @@ -219,9 +229,8 @@ async def run_ingestor(self, run_config: list[IngestorConfigInput]): :param run_config: A list of IngestorConfigInput :return: The HTTP response received from the Evolve App Server after attempting to run the ingestor """ - return await self._gql_client.mutation( + return await self.mutation( Mutation.execute_ingestor(run_config=run_config), - operation_name="executeIngestor", ) @async_func @@ -232,7 +241,7 @@ async def get_ingestor_run(self, ingestor_run_id: int): :param ingestor_run_id: The ID of the ingestor run to retrieve execution information about. :return: The HTTP response received from the Evolve App Server including the ingestor run information (if found). """ - return await self._gql_client.query( + return await self.query( Query.get_ingestor_run(ingestor_run_id).fields( IngestionRunFields.id, IngestionRunFields.container_runtime_type, @@ -243,7 +252,6 @@ async def get_ingestor_run(self, ingestor_run_id: int): IngestionRunFields.status_last_updated_at, IngestionRunFields.completed_at, ), - operation_name="getIngestorRun", ) @async_func @@ -260,7 +268,7 @@ async def get_ingestor_run_list( :param query_sort: An `IngestorRunsSortCriteriaInput` that can control the order of the returned record based on a number of fields. (Optional) :return: The HTTP response received from the Evolve App Server including all matching ingestor records found. """ - return await self._gql_client.query( + return await self.query( Query.list_ingestor_runs(filter_=query_filter, sort=query_sort).fields( IngestionRunFields.id, IngestionRunFields.container_runtime_type, @@ -271,7 +279,6 @@ async def get_ingestor_run_list( IngestionRunFields.status_last_updated_at, IngestionRunFields.completed_at, ), - operation_name="listIngestorRuns", ) @async_func @@ -304,14 +311,13 @@ async def run_hosting_capacity_calibration( if generator_config.model: generator_config.model.transformer_tap_settings = transformer_tap_settings - return await self._gql_client.mutation( + return await self.mutation( Mutation.run_calibration( calibration_name=calibration_name, calibration_time_local=local_calibration_time, feeders=feeders, generator_config=generator_config, ), - operation_name="runCalibration", ) @async_func @@ -322,7 +328,7 @@ async def get_hosting_capacity_calibration_run(self, id: str): :param id: The calibration run ID :return: The HTTP response received from the Evolve App Server after requesting calibration run info """ - return await self._gql_client.query( + return await self.query( Query.get_calibration_run(id).fields( HcCalibrationFields.id, HcCalibrationFields.name, @@ -335,7 +341,6 @@ async def get_hosting_capacity_calibration_run(self, id: str): HcCalibrationFields.feeders, HcCalibrationFields.calibration_work_package_config, ), - operation_name="getCalibrationRun", ) @async_func @@ -345,9 +350,8 @@ async def get_hosting_capacity_calibration_sets(self): Retrieve a list of all completed calibration runs initiated through Evolve App Server :return: The HTTP response received from the Evolve App Server after requesting completed calibration runs """ - return await self._gql_client.query( + return await self.query( Query.get_calibration_sets(), - operation_name="getCalibrationSets", ) @async_func @@ -365,7 +369,7 @@ async def get_transformer_tap_settings( :param transformer_mrid: An optional filter to return only the transformer tap settings for a particular transfomer mrid :return: The HTTP response received from the Evolve App Server after requesting transformer tap settings for the calibration id """ - return await self._gql_client.query( + return await self.query( Query.get_transformer_tap_settings( calibration_name=calibration_name, feeder=feeder, @@ -379,7 +383,6 @@ async def get_transformer_tap_settings( GqlTxTapRecordFields.control_enabled, GqlTxTapRecordFields.step_voltage_increment, ), - operation_name="getTransformerTapSettings", ) @async_func @@ -390,9 +393,8 @@ async def run_opendss_export(self, config: OpenDssModelInput): :param config: The OpenDssConfig for running the export :return: The HTTP response received from the Evolve App Server after attempting to run the opendss export """ - return await self._gql_client.mutation( + return await self.mutation( Mutation.create_open_dss_model(config), - operation_name="createOpenDssModel", ) @async_func @@ -411,7 +413,7 @@ async def get_paged_opendss_models( :param query_sort: The sorting to apply to the query :return: The HTTP response received from the Evolve App Server after requesting opendss export run information """ - return await self._gql_client.query( + return await self.query( Query.paged_open_dss_models( limit=limit, offset=offset, @@ -431,7 +433,6 @@ async def get_paged_opendss_models( OpenDssModelFields.generation_spec ), ), - operation_name="pagedOpenDssModels", ) @async_func @@ -442,9 +443,9 @@ async def get_opendss_model_download_url(self, run_id: int): :param run_id: The opendss export run ID :return: The HTTP response received from the Evolve App Server after requesting opendss export model download url """ - response = (await self._gql_client.http_client.get( + response = (await self.http_client.get( f"{self._base_url}/api/opendss-model/{run_id}", - headers=self._gql_client.headers, + headers=self.headers, follow_redirects=False )) if response.status_code == HTTPStatus.FOUND: diff --git a/test/test_eas_client.py b/test/test_eas_client.py index 999ff2d..84a4613 100644 --- a/test/test_eas_client.py +++ b/test/test_eas_client.py @@ -78,7 +78,7 @@ def test_get_request_headers_adds_access_token_in_auth_header(): access_token=mock_access_token, ) - headers = eas_client._gql_client.http_client.headers + headers = eas_client.http_client.headers assert headers["authorization"] == f"Bearer {mock_access_token}" From 572e4b14fd3d7c00c2f33dc3e6e6dc9546bf7a19 Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Fri, 20 Mar 2026 11:38:24 +1100 Subject: [PATCH 13/32] convenience method for all fields, opt in for legacy methods, other stuff too Signed-off-by: Max Chesterfield --- README.md | 138 ++++++++-------- src/zepben/eas/__init__.py | 2 + src/zepben/eas/client/decorators.py | 10 +- src/zepben/eas/client/eas_client.py | 247 ++++++++++++++++------------ test/test_eas_client.py | 72 +++++--- test/test_patched_client.py | 18 +- 6 files changed, 284 insertions(+), 203 deletions(-) diff --git a/README.md b/README.md index b6a04d2..f1ca418 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,13 @@ # Evolve App Server Python Client # -This library provides a wrapper to the Evolve App Server's API, allowing users of the evolve SDK to authenticate with +This library provides a wrapper to the Evolve App Server's API, allowing users of the Evolve SDK to authenticate with the Evolve App Server and upload studies. # Usage # ```python from geojson import FeatureCollection -from zepben.eas import EasClient, StudyInput, StudyResultInput, GeoJsonOverlayInput, ResultSectionInput, SectionType +from zepben.eas import EasClient, StudyInput, StudyResultInput, GeoJsonOverlayInput, ResultSectionInput, SectionType, Mutation eas_client = EasClient( host="", @@ -16,40 +16,44 @@ eas_client = EasClient( asynchronous=False, ) -eas_client.upload_study( - StudyInput( - name="", - description="", - tags=["", ""], - results=[ - StudyResultInput( - name="", - geoJsonOverlay=GeoJsonOverlayInput( - data=FeatureCollection(...), - styles=["style1"] - ), - sections=[ - ResultSectionInput( - type=SectionType.TABLE, - name="
", - description="
", - columns=[ - {"key": "", "name": ""}, - {"key": "", "name": ""}, - ], - data=[ - {"": "", "": ""}, - {"": "", "": ""} +eas_client.mutation( + Mutation.add_studies( + [ + StudyInput( + name="", + description="", + tags=["", ""], + results=[ + StudyResultInput( + name="", + geoJsonOverlay=GeoJsonOverlayInput( + data=FeatureCollection(...), + styles=["style1"] + ), + sections=[ + ResultSectionInput( + type=SectionType.TABLE, + name="
", + description="
", + columns=[ + {"key": "", "name": ""}, + {"key": "", "name": ""}, + ], + data=[ + {"": "", "": ""}, + {"": "", "": ""} + ] + ) ] ) + ], + styles=[ + { + "id": "style1", + # other Mapbox GL JS style properties + } ] ) - ], - styles=[ - { - "id": "style1", - # other Mapbox GL JS style properties - } ] ) ) @@ -63,7 +67,7 @@ The EasClient can operate in async mode if specified, like so: ```python from aiohttp import ClientSession from geojson import FeatureCollection -from zepben.eas import EasClient, StudyInput, StudyResultInput, GeoJsonOverlayInput, ResultSectionInput, SectionType +from zepben.eas import EasClient, StudyInput, StudyResultInput, GeoJsonOverlayInput, ResultSectionInput, SectionType, Mutation async def upload(): @@ -74,45 +78,49 @@ async def upload(): asynchronous=True, # returns all methods as plain async methods ) - await eas_client.upload_study( - StudyInput( - name="", - description="", - tags=["", ""], - results=[ - StudyResultInput( - name="", - geoJsonOverlay=GeoJsonOverlayInput( - data=FeatureCollection(...), - styles=["style1"] - ), - sections=[ - ResultSectionInput( - type=SectionType.TABLE, - name="
", - description="
", - columns=[ - {"key": "", "name": ""}, - {"key": "", "name": ""}, - ], - data=[ - {"": "", "": ""}, - {"": "", "": ""} - ] + await eas_client.mutation( + Mutation.add_studies( + [ + StudyInput( + name="", + description="", + tags=["", ""], + results=[ + StudyResultInput( + name="", + geoJsonOverlay=GeoJsonOverlayInput( + data=FeatureCollection(...), + styles=["style1"] + ), + sections=[ + ResultSectionInput( + type=SectionType.TABLE, + name="
", + description="
", + columns=[ + {"key": "", "name": ""}, + {"key": "", "name": ""}, + ], + data=[ + {"": "", "": ""}, + {"": "", "": ""} + ] + ) + ] ) + ], + styles=[ + { + "id": "style1", + # other Mapbox GL JS style properties + } ] ) - ], - styles=[ - { - "id": "style1", - # other Mapbox GL JS style properties - } ] ) ) - await eas_client.aclose() + await eas_client.close() ``` # Development # diff --git a/src/zepben/eas/__init__.py b/src/zepben/eas/__init__.py index b9445f2..b57863e 100644 --- a/src/zepben/eas/__init__.py +++ b/src/zepben/eas/__init__.py @@ -9,3 +9,5 @@ from zepben.eas.client.enums import * from zepben.eas.lib.generated_graphql_client import * +from zepben.eas.lib.generated_graphql_client.custom_mutations import * +from zepben.eas.lib.generated_graphql_client.custom_queries import * diff --git a/src/zepben/eas/client/decorators.py b/src/zepben/eas/client/decorators.py index 318690c..c2e92b0 100644 --- a/src/zepben/eas/client/decorators.py +++ b/src/zepben/eas/client/decorators.py @@ -4,7 +4,7 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. -__all__ = ['catch_warnings', 'async_func'] +__all__ = ['catch_warnings', 'async_func', 'opt_in'] import functools import warnings @@ -29,3 +29,11 @@ def wrapper(self, *args, **kwargs): return func(self, *args, **kwargs) return get_event_loop().run_until_complete(func(self, *args, **kwargs)) return wrapper + +def opt_in(func: Callable) -> Callable: + @functools.wraps(func) + def wrapper(self, *args, **kwargs): + if self._opt_in_legacy: + return func(self, *args, **kwargs) + raise AttributeError(f"'{func.__qualname__}' is a legacy function and must be explicitly opted into.") + return wrapper diff --git a/src/zepben/eas/client/eas_client.py b/src/zepben/eas/client/eas_client.py index b524eed..b739dfa 100644 --- a/src/zepben/eas/client/eas_client.py +++ b/src/zepben/eas/client/eas_client.py @@ -6,28 +6,51 @@ __all__ = ["EasClient"] +import inspect import ssl -from asyncio import get_event_loop from datetime import datetime from http import HTTPStatus -from typing import Any +from types import MethodType +from typing import Any, Generator import httpx from graphql import OperationType +from typing_extensions import deprecated -from zepben.eas.client.decorators import async_func, catch_warnings +from zepben.eas.client.decorators import async_func, catch_warnings, opt_in from zepben.eas.client.patched_generated_client import PatchedClient as Client from zepben.eas.lib.generated_graphql_client import WorkPackageInput, FeederLoadAnalysisInput, StudyInput, \ IngestorConfigInput, IngestorRunsFilterInput, IngestorRunsSortCriteriaInput, HcGeneratorConfigInput, \ HcModelConfigInput, OpenDssModelInput, GetOpenDssModelsFilterInput, GetOpenDssModelsSortCriteriaInput from zepben.eas.lib.generated_graphql_client.base_operation import GraphQLField -from zepben.eas.lib.generated_graphql_client.custom_fields import FeederLoadAnalysisSpecFields, \ - FeederLoadAnalysisReportFields, IngestionRunFields, HcCalibrationFields, GqlTxTapRecordFields, \ - OpenDssModelPageFields, OpenDssModelFields +from zepben.eas.lib.generated_graphql_client.custom_fields import FeederLoadAnalysisReportFields, IngestionRunFields, \ + HcCalibrationFields, GqlTxTapRecordFields, OpenDssModelPageFields, OpenDssModelFields from zepben.eas.lib.generated_graphql_client.custom_mutations import Mutation from zepben.eas.lib.generated_graphql_client.custom_queries import Query +def graph_ql_field_all_fields(cls) -> list[GraphQLField]: + """ + Helper function to list all ``GraphQLField``s that a given class returns + + :param cls: class to check + :return: list of GraphQLField's, ready to pass to ``cls().fields()`` + """ + def _inner() -> Generator[GraphQLField | MethodType, None, None]: + for k in dir(cls): + if k.startswith("_"): + continue + if k == "all_fields": + continue + v = getattr(cls, k) + if isinstance(v, GraphQLField): + yield v + elif inspect.ismethod(v): + yield v().fields(*v().all_fields()) + return list(_inner()) + +GraphQLField.all_fields = classmethod(graph_ql_field_all_fields) + class EasClient(Client): """ @@ -44,6 +67,7 @@ def __init__( verify_certificate: bool = True, ca_filename: str | None = None, asynchronous: bool = False, + enable_legacy_methods: bool = False, ): """ Construct a client for the Evolve App Server. If the server is HTTPS, authentication may be configured. @@ -64,8 +88,11 @@ def __init__( :param verify_certificate: Set this to "False" to disable certificate verification. :param ca_filename: Path to CA file to use for verification. (Optional - by default will use system certs) :param asynchronous: all functions will be returned as ``Coroutine``s if True, or ran in an existing event loop if False + :param enable_legacy_methods: enable legacy methods support. """ + self._opt_in_legacy = enable_legacy_methods self._asynchronous = asynchronous + self._protocol = protocol self._host = host self._port = port @@ -87,17 +114,16 @@ def __init__( http_client=http_client, ) - - def close(self): - return get_event_loop().run_until_complete(self.aclose()) - - async def aclose(self): + @async_func + async def close(self): await self.http_client.aclose() + @async_func async def query(self, *fields: GraphQLField, operation_name: str = None) -> dict[str, Any]: """Execute a query against the Evolve App Server.""" return await super().query(*fields, operation_name=operation_name) + @async_func async def mutation(self, *fields: GraphQLField, operation_name: str = None) -> dict[str, Any]: """Execute a mutation against the Evolve App Server.""" return await super().mutation(*fields, operation_name=operation_name) @@ -111,40 +137,66 @@ async def execute_custom_operation(self, *fields: GraphQLField, operation_type: @async_func @catch_warnings - async def get_work_package_cost_estimation(self, work_package: WorkPackageInput): + async def get_opendss_model_download_url(self, run_id: int): + """ + Retrieve a download url for the specified opendss export run id + :param run_id: The opendss export run ID + :return: The HTTP response received from the Evolve App Server after requesting opendss export model download url + """ + response = (await self.http_client.get( + f"{self._base_url}/api/opendss-model/{run_id}", + headers=self.headers, + follow_redirects=False + )) + if response.status_code == HTTPStatus.FOUND: + return response.headers["Location"] + elif not response.ok: + response.raise_for_status() + + ##################################################### + # Legacy Methods, to be removed in a future release # + ##################################################### + + @deprecated("Use query()/mutation() methods directly instead.") + @catch_warnings + @opt_in + def get_work_package_cost_estimation(self, work_package: WorkPackageInput): """ Send request to hosting capacity service to get an estimate cost of supplied work package :param work_package: An instance of the `WorkPackageConfig` data class representing the work package configuration for the run :return: The HTTP response received from the Evolve App Server after attempting to run work package """ - return await self.query( + return self.query( Query.get_work_package_cost_estimation(work_package), ) - @async_func + @deprecated("Use query()/mutation() methods directly instead.") @catch_warnings - async def run_hosting_capacity_work_package(self, work_package: WorkPackageInput, work_package_name: str): + @opt_in + def run_hosting_capacity_work_package(self, work_package: WorkPackageInput, work_package_name: str): """ Send request to hosting capacity service to run work package :param work_package: An instance of the `WorkPackageConfig` data class representing the work package configuration for the run + :param work_package_name: The name of the work package to run. :return: The HTTP response received from the Evolve App Server after attempting to run work package """ - return await self.mutation( + return self.mutation( Mutation.run_work_package(work_package, work_package_name=work_package_name), ) - @async_func + @deprecated("Use query()/mutation() methods directly instead.") @catch_warnings - async def cancel_hosting_capacity_work_package(self, work_package_id: str): + @opt_in + def cancel_hosting_capacity_work_package(self, work_package_id: str): """ Send request to hosting capacity service to cancel a running work package :param work_package_id: The id of the running work package to cancel :return: The HTTP response received from the Evolve App Server after attempting to cancel work package """ - return await self.mutation( + return self.mutation( Mutation.cancel_work_package(work_package_id=work_package_id), ) @@ -154,28 +206,29 @@ def get_hosting_capacity_work_packages_progress(self): # FIXME: why is this inf :return: The HTTP response received from the Evolve App Server after requesting work packages progress info """ - return get_event_loop().run_until_complete( - self.query( + return self.query( Query.get_active_work_packages(), ) - ) - @async_func + + @deprecated("Use query()/mutation() methods directly instead.") @catch_warnings - async def run_feeder_load_analysis_report(self, feeder_load_analysis_input: FeederLoadAnalysisInput): + @opt_in + def run_feeder_load_analysis_report(self, feeder_load_analysis_input: FeederLoadAnalysisInput): """ Send request to evolve app server to run a feeder load analysis study :param feeder_load_analysis_input:: An instance of the `FeederLoadAnalysisConfig` data class representing the configuration for the run :return: The HTTP response received from the Evolve App Server after attempting to run work package """ - return await self.mutation( + return self.mutation( Mutation.run_feeder_load_analysis(feeder_load_analysis_input), ) - @async_func + @deprecated("Use query()/mutation() methods directly instead.") @catch_warnings - async def get_feeder_load_analysis_report_status(self, report_id: str, full_spec: bool = False): + @opt_in + def get_feeder_load_analysis_report_status(self, report_id: str, full_spec: bool = False): """ Send request to evolve app server to retrieve a feeder load analysis report status @@ -183,92 +236,72 @@ async def get_feeder_load_analysis_report_status(self, report_id: str, full_spec :param full_spec: If true the response will include the request sent to generate the report :return: The HTTP response received from the Evolve App Server after requesting a feeder load analysis report status """ - return await self.query( + return self.query( Query.get_feeder_load_analysis_report_status(report_id, full_spec=full_spec).fields( - FeederLoadAnalysisReportFields.id, - FeederLoadAnalysisReportFields.name, - FeederLoadAnalysisReportFields.created_at, - FeederLoadAnalysisReportFields.created_by, - FeederLoadAnalysisReportFields.completed_at, - FeederLoadAnalysisReportFields.state, - FeederLoadAnalysisReportFields.errors, - FeederLoadAnalysisReportFields.generation_spec().fields( - FeederLoadAnalysisSpecFields.feeders, - FeederLoadAnalysisSpecFields.substations, - FeederLoadAnalysisSpecFields.sub_geographical_regions, - FeederLoadAnalysisSpecFields.geographical_regions, - FeederLoadAnalysisSpecFields.start_date, - FeederLoadAnalysisSpecFields.end_date, - FeederLoadAnalysisSpecFields.fetch_lv_network, - FeederLoadAnalysisSpecFields.process_feeder_loads, - FeederLoadAnalysisSpecFields.process_coincident_loads, - FeederLoadAnalysisSpecFields.produce_basic_report, - FeederLoadAnalysisSpecFields.produce_conductor_report, - FeederLoadAnalysisSpecFields.aggregate_at_feeder_level, - FeederLoadAnalysisSpecFields.output - ), + *FeederLoadAnalysisReportFields.all_fields() ), ) - @async_func + @deprecated("Use query()/mutation() methods directly instead.") @catch_warnings - async def upload_study(self, study: StudyInput | list[StudyInput]): + @opt_in + def upload_study(self, study: StudyInput | list[StudyInput]): """ Uploads a new study to the Evolve App Server + :param study: An instance of a data class representing a new study """ - return await self.mutation( + return self.mutation( Mutation.add_studies(study if isinstance(study, list) else [study]), ) - @async_func + @deprecated("Use query()/mutation() methods directly instead.") @catch_warnings - async def run_ingestor(self, run_config: list[IngestorConfigInput]): + @opt_in + def run_ingestor(self, run_config: list[IngestorConfigInput]): """ Send request to perform an ingestor run + :param run_config: A list of IngestorConfigInput :return: The HTTP response received from the Evolve App Server after attempting to run the ingestor """ - return await self.mutation( + return self.mutation( Mutation.execute_ingestor(run_config=run_config), ) - @async_func + @deprecated("Use query()/mutation() methods directly instead.") @catch_warnings - async def get_ingestor_run(self, ingestor_run_id: int): + @opt_in + def get_ingestor_run(self, ingestor_run_id: int): """ Send request to retrieve the record of a particular ingestor run. + :param ingestor_run_id: The ID of the ingestor run to retrieve execution information about. :return: The HTTP response received from the Evolve App Server including the ingestor run information (if found). """ - return await self.query( + return self.query( Query.get_ingestor_run(ingestor_run_id).fields( - IngestionRunFields.id, - IngestionRunFields.container_runtime_type, - IngestionRunFields.payload, - IngestionRunFields.token, - IngestionRunFields.status, - IngestionRunFields.started_at, - IngestionRunFields.status_last_updated_at, - IngestionRunFields.completed_at, + *IngestionRunFields.all_fields() ), ) - @async_func + @deprecated("Use query()/mutation() methods directly instead.") @catch_warnings - async def get_ingestor_run_list( + @opt_in + def get_ingestor_run_list( self, query_filter: IngestorRunsFilterInput | None = None, query_sort: IngestorRunsSortCriteriaInput | None = None ): """ Send request to retrieve a list of ingestor run records matching the provided filter parameters. + :param query_filter: An `IngestorRunsFilterInput` object. Only records matching the provided values will be returned. If not supplied all records will be returned. (Optional) :param query_sort: An `IngestorRunsSortCriteriaInput` that can control the order of the returned record based on a number of fields. (Optional) :return: The HTTP response received from the Evolve App Server including all matching ingestor records found. """ - return await self.query( + return self.query( Query.list_ingestor_runs(filter_=query_filter, sort=query_sort).fields( IngestionRunFields.id, IngestionRunFields.container_runtime_type, @@ -281,9 +314,10 @@ async def get_ingestor_run_list( ), ) - @async_func + @deprecated("Use query()/mutation() methods directly instead.") @catch_warnings - async def run_hosting_capacity_calibration( + @opt_in + def run_hosting_capacity_calibration( self, calibration_name: str, local_calibration_time: datetime, @@ -292,6 +326,7 @@ async def run_hosting_capacity_calibration( generator_config: HcGeneratorConfigInput | None = None): """ Send request to run hosting capacity calibration + :param calibration_name: A string representation of the calibration name :param local_calibration_time: A datetime representation of the calibration time, in the timezone of your pqv data ("model time"). :param feeders: A list of feeder ID's to run the calibration over. If not supplied then the calibration is run over all feeders in the network. @@ -311,7 +346,7 @@ async def run_hosting_capacity_calibration( if generator_config.model: generator_config.model.transformer_tap_settings = transformer_tap_settings - return await self.mutation( + return self.mutation( Mutation.run_calibration( calibration_name=calibration_name, calibration_time_local=local_calibration_time, @@ -320,15 +355,17 @@ async def run_hosting_capacity_calibration( ), ) - @async_func + @deprecated("Use query()/mutation() methods directly instead.") @catch_warnings - async def get_hosting_capacity_calibration_run(self, id: str): + @opt_in + def get_hosting_capacity_calibration_run(self, id: str): """ Retrieve information of a hosting capacity calibration run + :param id: The calibration run ID :return: The HTTP response received from the Evolve App Server after requesting calibration run info """ - return await self.query( + return self.query( Query.get_calibration_run(id).fields( HcCalibrationFields.id, HcCalibrationFields.name, @@ -343,20 +380,23 @@ async def get_hosting_capacity_calibration_run(self, id: str): ), ) - @async_func + @deprecated("Use query()/mutation() methods directly instead.") @catch_warnings - async def get_hosting_capacity_calibration_sets(self): + @opt_in + def get_hosting_capacity_calibration_sets(self): """ Retrieve a list of all completed calibration runs initiated through Evolve App Server + :return: The HTTP response received from the Evolve App Server after requesting completed calibration runs """ - return await self.query( + return self.query( Query.get_calibration_sets(), ) - @async_func + @deprecated("Use query()/mutation() methods directly instead.") @catch_warnings - async def get_transformer_tap_settings( + @opt_in + def get_transformer_tap_settings( self, calibration_name: str, feeder: str | None = None, @@ -364,12 +404,13 @@ async def get_transformer_tap_settings( ): """ Retrieve distribution transformer tap settings from a calibration set in the hosting capacity input database + :param calibration_name: The (user supplied)name of the calibration run to retrieve transformer tap settings from :param feeder: An optional filter to apply to the returned list of transformer tap settings :param transformer_mrid: An optional filter to return only the transformer tap settings for a particular transfomer mrid :return: The HTTP response received from the Evolve App Server after requesting transformer tap settings for the calibration id """ - return await self.query( + return self.query( Query.get_transformer_tap_settings( calibration_name=calibration_name, feeder=feeder, @@ -385,21 +426,24 @@ async def get_transformer_tap_settings( ), ) - @async_func + @deprecated("Use query()/mutation() methods directly instead.") @catch_warnings - async def run_opendss_export(self, config: OpenDssModelInput): + @opt_in + def run_opendss_export(self, config: OpenDssModelInput): """ Send request to run an opendss export + :param config: The OpenDssConfig for running the export :return: The HTTP response received from the Evolve App Server after attempting to run the opendss export """ - return await self.mutation( + return self.mutation( Mutation.create_open_dss_model(config), ) - @async_func + @deprecated("Use query()/mutation() methods directly instead.") @catch_warnings - async def get_paged_opendss_models( + @opt_in + def get_paged_opendss_models( self, limit: int | None = None, offset: int | None = None, @@ -407,13 +451,14 @@ async def get_paged_opendss_models( query_sort: GetOpenDssModelsSortCriteriaInput | None = None): """ Retrieve a paginated opendss export run information + :param limit: The number of opendss export runs to retrieve :param offset: The number of opendss export runs to skip :param query_filter: The filter to apply to the query :param query_sort: The sorting to apply to the query :return: The HTTP response received from the Evolve App Server after requesting opendss export run information """ - return await self.query( + return self.query( Query.paged_open_dss_models( limit=limit, offset=offset, @@ -435,29 +480,13 @@ async def get_paged_opendss_models( ), ) - @async_func - @catch_warnings - async def get_opendss_model_download_url(self, run_id: int): - """ - Retrieve a download url for the specified opendss export run id - :param run_id: The opendss export run ID - :return: The HTTP response received from the Evolve App Server after requesting opendss export model download url - """ - response = (await self.http_client.get( - f"{self._base_url}/api/opendss-model/{run_id}", - headers=self.headers, - follow_redirects=False - )) - if response.status_code == HTTPStatus.FOUND: - return response.headers["Location"] - elif not response.ok: - response.raise_for_status() - - @async_func + @deprecated("Use query()/mutation() methods directly instead.") @catch_warnings - async def get_opendss_model(self, model_id: int): + @opt_in + def get_opendss_model(self, model_id: int): """ Retrieve information of a OpenDss model export + :param model_id: The OpenDss model export ID :return: The HTTP response received from the Evolve App Server after requesting the openDss model info """ @@ -466,7 +495,7 @@ async def get_opendss_model(self, model_id: int): page_size = 20 while True: - response = await self.get_paged_opendss_models(page_size, offset) + response = self.get_paged_opendss_models(page_size, offset) total_count = int(response["data"]["pagedOpenDssModels"]["totalCount"]) page_count = len(response["data"]["pagedOpenDssModels"]["models"]) for model in response["data"]["pagedOpenDssModels"]["models"]: diff --git a/test/test_eas_client.py b/test/test_eas_client.py index 84a4613..a9e6f61 100644 --- a/test/test_eas_client.py +++ b/test/test_eas_client.py @@ -108,7 +108,8 @@ def test_get_work_package_cost_estimation_no_verify_success(httpserver: HTTPServ eas_client = EasClient( host=LOCALHOST, port=httpserver.port, - verify_certificate=False + verify_certificate=False, + enable_legacy_methods=True, ) httpserver.expect_oneshot_request("/api/graphql").respond_with_json( @@ -193,7 +194,8 @@ def test_run_hosting_capacity_work_package_no_verify_success(httpserver: HTTPSer eas_client = EasClient( host=LOCALHOST, port=httpserver.port, - verify_certificate=False + verify_certificate=False, + enable_legacy_methods=True, ) httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"data": {"runWorkPackage": "workPackageId"}}) @@ -272,7 +274,8 @@ def test_cancel_hosting_capacity_work_package_no_verify_success(httpserver: HTTP eas_client = EasClient( host=LOCALHOST, port=httpserver.port, - verify_certificate=False + verify_certificate=False, + enable_legacy_methods=True, ) httpserver.expect_oneshot_request("/api/graphql").respond_with_json( @@ -306,7 +309,8 @@ def test_get_hosting_capacity_work_package_progress_no_verify_success(httpserver eas_client = EasClient( host=LOCALHOST, port=httpserver.port, - verify_certificate=False + verify_certificate=False, + enable_legacy_methods=True, ) httpserver.expect_oneshot_request("/api/graphql").respond_with_json( @@ -340,7 +344,8 @@ def test_upload_study_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( host=LOCALHOST, port=httpserver.port, - verify_certificate=False + verify_certificate=False, + enable_legacy_methods=True, ) httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) @@ -390,7 +395,8 @@ def test_run_hosting_capacity_calibration_no_verify_success(httpserver: HTTPServ eas_client = EasClient( host=LOCALHOST, port=httpserver.port, - verify_certificate=False + verify_certificate=False, + enable_legacy_methods=True, ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -434,7 +440,8 @@ def test_get_hosting_capacity_calibration_run_no_verify_success(httpserver: HTTP eas_client = EasClient( host=LOCALHOST, port=httpserver.port, - verify_certificate=False + verify_certificate=False, + enable_legacy_methods=True, ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -485,7 +492,8 @@ def test_run_hosting_capacity_calibration_with_calibration_time_no_verify_succes eas_client = EasClient( host=LOCALHOST, port=httpserver.port, - verify_certificate=False + verify_certificate=False, + enable_legacy_methods=True, ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -507,7 +515,8 @@ def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings eas_client = EasClient( host=LOCALHOST, port=httpserver.port, - verify_certificate=False + verify_certificate=False, + enable_legacy_methods=True, ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -549,7 +558,8 @@ def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings eas_client = EasClient( host=LOCALHOST, port=httpserver.port, - verify_certificate=False + verify_certificate=False, + enable_legacy_methods=True, ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -591,7 +601,8 @@ def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings eas_client = EasClient( host=LOCALHOST, port=httpserver.port, - verify_certificate=False + verify_certificate=False, + enable_legacy_methods=True, ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -610,7 +621,8 @@ def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings eas_client = EasClient( host=LOCALHOST, port=httpserver.port, - verify_certificate=False + verify_certificate=False, + enable_legacy_methods=True, ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -640,7 +652,8 @@ def test_get_hosting_capacity_calibration_sets_no_verify_success(httpserver: HTT eas_client = EasClient( host=LOCALHOST, port=httpserver.port, - verify_certificate=False + verify_certificate=False, + enable_legacy_methods=True, ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -955,7 +968,8 @@ def test_run_opendss_export_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( host=LOCALHOST, port=httpserver.port, - verify_certificate=False + verify_certificate=False, + enable_legacy_methods=True, ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler(run_opendss_export_request_handler) @@ -1024,7 +1038,8 @@ def test_get_paged_opendss_models_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( host=LOCALHOST, port=httpserver.port, - verify_certificate=False + verify_certificate=False, + enable_legacy_methods=True, ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -1069,7 +1084,8 @@ def test_get_opendss_model_download_url_no_verify_success(httpserver: HTTPServer eas_client = EasClient( host=LOCALHOST, port=httpserver.port, - verify_certificate=False + verify_certificate=False, + enable_legacy_methods=True, ) httpserver.expect_oneshot_request("/api/opendss-model/1", method="GET").respond_with_response(Response( @@ -1119,7 +1135,8 @@ def test_run_ingestor_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( host=LOCALHOST, port=httpserver.port, - verify_certificate=False + verify_certificate=False, + enable_legacy_methods=True, ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -1134,9 +1151,9 @@ def get_ingestor_run_request_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - assert query == ("query getIngestorRun($id_0: Int!) { getIngestorRun(id: $id_0) { id " - "containerRuntimeType payload token status startedAt statusLastUpdatedAt " - "completedAt } }") + assert query == ("query getIngestorRun($id_0: Int!) { getIngestorRun(id: $id_0) { completedAt " + "containerRuntimeType id payload startedAt status statusLastUpdatedAt token } " + "}") assert actual_body['variables'] == {"id_0": 1} return Response(json.dumps({"result": "success"}), status=200, content_type="application/json") @@ -1146,7 +1163,8 @@ def test_get_ingestor_run_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( host=LOCALHOST, port=httpserver.port, - verify_certificate=False + verify_certificate=False, + enable_legacy_methods=True, ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler(get_ingestor_run_request_handler) @@ -1171,7 +1189,8 @@ def test_get_ingestor_run_list_empty_filter_no_verify_success(httpserver: HTTPSe eas_client = EasClient( host=LOCALHOST, port=httpserver.port, - verify_certificate=False + verify_certificate=False, + enable_legacy_methods=True, ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler(get_ingestor_run_list_request_empty_handler) @@ -1212,7 +1231,8 @@ def test_get_ingestor_run_list_all_filters_no_verify_success(httpserver: HTTPSer eas_client = EasClient( host=LOCALHOST, port=httpserver.port, - verify_certificate=False + verify_certificate=False, + enable_legacy_methods=True, ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -1339,7 +1359,8 @@ def _invalid_ca(port): host=LOCALHOST, port=port, verify_certificate=True, - ca_filename=ca_filename + ca_filename=ca_filename, + enable_legacy_methods=True, ) @@ -1349,5 +1370,6 @@ def _valid_ca(port, ca: trustme.CA): host=LOCALHOST, port=port, verify_certificate=True, - ca_filename=ca_filename + ca_filename=ca_filename, + enable_legacy_methods=True, ) diff --git a/test/test_patched_client.py b/test/test_patched_client.py index b10f466..fd82fdd 100644 --- a/test/test_patched_client.py +++ b/test/test_patched_client.py @@ -4,8 +4,9 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. -from zepben.eas import EasClient +from zepben.eas import EasClient, FeederLoadAnalysisReportFields from zepben.eas.client.patched_generated_client import PatchedClient +from zepben.eas.lib.generated_graphql_client.custom_fields import FeederLoadAnalysisSpecFields class MockResponse: @@ -22,10 +23,21 @@ def is_success(self): def test_patched_client_used_in_eas_client(): client = EasClient(host="test_host", port=9876) - assert isinstance(client._gql_client, PatchedClient) + assert isinstance(client, PatchedClient) def test_patched_client_overrides_get_data_to_return_the_whole_json_response(): client = EasClient(host="test_host", port=9876) - assert client._gql_client.get_data(MockResponse()) == {'json': 'probably'} + assert client.get_data(MockResponse()) == {'json': 'probably'} + + +def test_all_fields(): + _all_fields = FeederLoadAnalysisReportFields.all_fields() + assert FeederLoadAnalysisReportFields.completed_at in _all_fields + + for f in _all_fields: + if isinstance(f, FeederLoadAnalysisSpecFields): + break + else: + assert False \ No newline at end of file From 2c5f7e1fe13d07427451dc73df9f8f70a77712bb Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Fri, 20 Mar 2026 20:44:32 +1100 Subject: [PATCH 14/32] add docs, update pyproject Signed-off-by: Max Chesterfield --- README.md | 42 ++++++++++++++++++++++++++++++++++++++++++ pyproject.toml | 8 ++------ 2 files changed, 44 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index f1ca418..abc895b 100644 --- a/README.md +++ b/README.md @@ -123,6 +123,48 @@ async def upload(): await eas_client.close() ``` +# I'm used to the old client, what do i do? # + +## Migrating existing code ## + +Most of the objects passed into requests are similar. +The new EasClient is fully type hinted and self documenting. + +For example. + +```python +from zepben.eas import EasClient, WorkPackageInput, HcExecutorConfigInput, FeederConfigsInput, FeederConfigInput + +client = EasClient(host='host', port=1234) +client.get_work_package_cost_estimation( + WorkPackageInput( + feederConfigs=FeederConfigsInput( + configs=[ + FeederConfigInput( + feeder='myFeeder', + years=[2024, 2025], + scenarios=['scenario1'] + ) + ] + ) + ) +) +``` + +Hovering over any kwarg or looking at any class definition will show all possible parameters, and their expected types. + +## Enabling legacy convenience methods ## + +Legacy convenience methods can be enabled by passing `enable_legacy_methods` to `__init__` of `EasClient`. eg: + +```python +from zepben.eas import EasClient + +client = EasClient(enable_legacy_methods=True) +``` + +This will enable all `deprecated` and `opt_in` methods on the class, they are disabled by default. + # Development # To regenerate the graphql client you will need to install `zepben.eas` with `eas-codegen` optional dependencies, then run: diff --git a/pyproject.toml b/pyproject.toml index 36aa829..2557f00 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,4 +1,4 @@ -# Copyright 2025 Zeppelin Bend Pty Ltd +# Copyright 2026 Zeppelin Bend Pty Ltd # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this @@ -14,7 +14,7 @@ build-backend = "setuptools.build_meta" [project] name = "zepben.eas" -version = "0.30.0b1" +version = "1.0.0b1" description = "Python SDK for interacting with the Evolve App Server" readme = {file = "README.md", content-type = "text/markdown"} license = "MPL-2.0" @@ -25,10 +25,6 @@ authors = [ ] dependencies = [ "geojson==2.5.0", - "requests<3.0.0,>=2.26.0", - "urllib3==2.5.0", - "zepben.ewb==1.1.0", - "aiohttp[speedups]==3.9.0", ] classifiers = [ "Programming Language :: Python :: 3", From f1eed7c1a57d04764bdaba702cd29691e42b4e15 Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Tue, 24 Mar 2026 10:51:17 +1100 Subject: [PATCH 15/32] switch to actual git release version for ariadne-codegen Signed-off-by: Max Chesterfield --- pyproject.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 2557f00..da65733 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,7 +47,8 @@ test = [ "trustme==0.9.0" ] eas-codegen = [ - "ariadne-codegen @ git+https://github.com/chestm007/ariadne-codegen" # This could break a pypi upload. Waiting on https://github.com/mirumee/ariadne-codegen/pull/413 to be merged. +# "ariadne-codegen @ git+https://github.com/chestm007/ariadne-codegen" # This could break a pypi upload. Waiting on https://github.com/mirumee/ariadne-codegen/pull/413 to be merged. + "ariadne-codegen==0.18.0" ] [tool.setuptools.packages.find] From bd0c49d4c9030e550d73dc56c08dd1a5ccab16e3 Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Thu, 26 Mar 2026 18:51:50 +1100 Subject: [PATCH 16/32] simplify code, fix imports, create stub to show lazy added method Signed-off-by: Max Chesterfield --- pyproject.toml | 6 +- src/zepben/eas/__init__.py | 1 + src/zepben/eas/client/decorators.py | 11 +- src/zepben/eas/client/eas_client.py | 110 +++++++++++++----- .../eas/client/patched_generated_client.py | 44 ------- .../base_operation.pyi | 23 ++++ .../custom_mutations.py | 2 +- .../custom_queries.py | 2 +- test/test_eas_client.py | 4 +- test/test_integration_testing.py | 33 +++++- test/test_patched_client.py | 6 +- 11 files changed, 156 insertions(+), 86 deletions(-) delete mode 100644 src/zepben/eas/client/patched_generated_client.py create mode 100644 src/zepben/eas/lib/generated_graphql_client/base_operation.pyi diff --git a/pyproject.toml b/pyproject.toml index da65733..2162967 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,6 @@ [build-system] requires = [ "setuptools", - "wheel", "build>=1.2.0" ] build-backend = "setuptools.build_meta" @@ -25,6 +24,10 @@ authors = [ ] dependencies = [ "geojson==2.5.0", + "httpx", + "graphql-core==3.2.8", + "pydantic", + "pydantic_core", ] classifiers = [ "Programming Language :: Python :: 3", @@ -47,7 +50,6 @@ test = [ "trustme==0.9.0" ] eas-codegen = [ -# "ariadne-codegen @ git+https://github.com/chestm007/ariadne-codegen" # This could break a pypi upload. Waiting on https://github.com/mirumee/ariadne-codegen/pull/413 to be merged. "ariadne-codegen==0.18.0" ] diff --git a/src/zepben/eas/__init__.py b/src/zepben/eas/__init__.py index b57863e..a6b3ea5 100644 --- a/src/zepben/eas/__init__.py +++ b/src/zepben/eas/__init__.py @@ -11,3 +11,4 @@ from zepben.eas.lib.generated_graphql_client import * from zepben.eas.lib.generated_graphql_client.custom_mutations import * from zepben.eas.lib.generated_graphql_client.custom_queries import * +from zepben.eas.lib.generated_graphql_client.custom_fields import * diff --git a/src/zepben/eas/client/decorators.py b/src/zepben/eas/client/decorators.py index c2e92b0..c705f1e 100644 --- a/src/zepben/eas/client/decorators.py +++ b/src/zepben/eas/client/decorators.py @@ -4,7 +4,7 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. -__all__ = ['catch_warnings', 'async_func', 'opt_in'] +__all__ = ['catch_warnings', 'async_func', 'opt_in', "add_method_to"] import functools import warnings @@ -30,6 +30,15 @@ def wrapper(self, *args, **kwargs): return get_event_loop().run_until_complete(func(self, *args, **kwargs)) return wrapper +def add_method_to(class_to_extend: type) -> Callable: + """ + + :rtype: Callable + """ + def decorator(func: Callable): + setattr(class_to_extend, func.__name__, func) + return decorator + def opt_in(func: Callable) -> Callable: @functools.wraps(func) def wrapper(self, *args, **kwargs): diff --git a/src/zepben/eas/client/eas_client.py b/src/zepben/eas/client/eas_client.py index b739dfa..111e0f2 100644 --- a/src/zepben/eas/client/eas_client.py +++ b/src/zepben/eas/client/eas_client.py @@ -4,52 +4,64 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. +from __future__ import annotations + __all__ = ["EasClient"] +import sys + + +if sys.version_info < (3, 13): + from typing_extensions import deprecated +else: + from warnings import deprecated + import inspect import ssl from datetime import datetime from http import HTTPStatus from types import MethodType -from typing import Any, Generator +from typing import Any, Generator, cast import httpx from graphql import OperationType -from typing_extensions import deprecated -from zepben.eas.client.decorators import async_func, catch_warnings, opt_in -from zepben.eas.client.patched_generated_client import PatchedClient as Client +from zepben.eas.client.decorators import async_func, catch_warnings, opt_in, add_method_to -from zepben.eas.lib.generated_graphql_client import WorkPackageInput, FeederLoadAnalysisInput, StudyInput, \ - IngestorConfigInput, IngestorRunsFilterInput, IngestorRunsSortCriteriaInput, HcGeneratorConfigInput, \ - HcModelConfigInput, OpenDssModelInput, GetOpenDssModelsFilterInput, GetOpenDssModelsSortCriteriaInput +from zepben.eas.lib.generated_graphql_client import GraphQLClientHttpError, GraphQLClientInvalidResponseError, \ + GraphQLClientGraphQLMultiError, Client, WorkPackageInput, FeederLoadAnalysisInput, StudyInput, IngestorConfigInput, \ + IngestorRunsFilterInput, IngestorRunsSortCriteriaInput, HcGeneratorConfigInput, HcModelConfigInput, OpenDssModelInput, \ + GetOpenDssModelsFilterInput, GetOpenDssModelsSortCriteriaInput from zepben.eas.lib.generated_graphql_client.base_operation import GraphQLField from zepben.eas.lib.generated_graphql_client.custom_fields import FeederLoadAnalysisReportFields, IngestionRunFields, \ HcCalibrationFields, GqlTxTapRecordFields, OpenDssModelPageFields, OpenDssModelFields from zepben.eas.lib.generated_graphql_client.custom_mutations import Mutation from zepben.eas.lib.generated_graphql_client.custom_queries import Query -def graph_ql_field_all_fields(cls) -> list[GraphQLField]: + +# noinspection PyDecorator,PyNestedDecorators +@add_method_to(GraphQLField) +@classmethod +def all_fields(cls) -> Generator[GraphQLField | MethodType, None, None]: """ - Helper function to list all ``GraphQLField``s that a given class returns + returns a generator over all ``GraphQLField``s that a given class returns :param cls: class to check - :return: list of GraphQLField's, ready to pass to ``cls().fields()`` + :return: generator over all GraphQLField's in a given class """ - def _inner() -> Generator[GraphQLField | MethodType, None, None]: - for k in dir(cls): - if k.startswith("_"): - continue - if k == "all_fields": - continue - v = getattr(cls, k) - if isinstance(v, GraphQLField): - yield v - elif inspect.ismethod(v): - yield v().fields(*v().all_fields()) - return list(_inner()) - -GraphQLField.all_fields = classmethod(graph_ql_field_all_fields) + for k in dir(cls): + # we only want "public" attrs. + if k.startswith("_"): + continue + # obviously we don't want to return ourselves. + if k == "all_fields": + continue + + v = getattr(cls, k) + if isinstance(v, GraphQLField): + yield v + elif inspect.ismethod(v): + yield v().fields(*v().all_fields()) class EasClient(Client): @@ -132,7 +144,7 @@ async def execute_custom_operation(self, *fields: GraphQLField, operation_type: return await super().execute_custom_operation( *fields, operation_type=operation_type, - operation_name=operation_name or '-'.join(f._field_name for f in fields) + operation_name=operation_name or str('-'.join(f._field_name for f in fields)) ) @async_func @@ -153,10 +165,50 @@ async def get_opendss_model_download_url(self, run_id: int): elif not response.ok: response.raise_for_status() + # This method replaces the implementation in zepben.eas.lib.generated_graphql_client.client.Client to + def get_data(self, response: httpx.Response) -> dict[str, Any]: + if not response.is_success: + raise GraphQLClientHttpError( + status_code=response.status_code, response=response + ) + + # ::Parent Implementation:: + # + # if (not isinstance(response_json, dict)) or ( + # "data" not in response_json and "errors" not in response_json + # ): + # raise GraphQLClientInvalidResponseError(response=response) + # + # data = response_json.get("data") + # errors = response_json.get("errors") + + # ::Start New Implementation:: + try: + response_json = response.json() + except ValueError as exc: + raise GraphQLClientInvalidResponseError(response=response) from exc + + try: + errors = response_json.get("errors") + except AttributeError: + errors = None + # ::End New Implementation:: + + if errors: + raise GraphQLClientGraphQLMultiError.from_errors_dicts( + errors_dicts=errors, data=response_json + ) + + return cast(dict[str, Any], response_json) + ##################################################### # Legacy Methods, to be removed in a future release # ##################################################### + @deprecated("use self.close() instead") + async def aclose(self): + await self.close() + @deprecated("Use query()/mutation() methods directly instead.") @catch_warnings @opt_in @@ -200,6 +252,9 @@ def cancel_hosting_capacity_work_package(self, work_package_id: str): Mutation.cancel_work_package(work_package_id=work_package_id), ) + @deprecated + @catch_warnings + @opt_in def get_hosting_capacity_work_packages_progress(self): # FIXME: why is this info not returned by get_work_package_by_id ? """ Retrieve running work packages progress information from hosting capacity service @@ -207,8 +262,8 @@ def get_hosting_capacity_work_packages_progress(self): # FIXME: why is this inf :return: The HTTP response received from the Evolve App Server after requesting work packages progress info """ return self.query( - Query.get_active_work_packages(), - ) + Query.get_active_work_packages(), + ) @deprecated("Use query()/mutation() methods directly instead.") @@ -495,6 +550,7 @@ def get_opendss_model(self, model_id: int): page_size = 20 while True: + # noinspection PyDeprecation response = self.get_paged_opendss_models(page_size, offset) total_count = int(response["data"]["pagedOpenDssModels"]["totalCount"]) page_count = len(response["data"]["pagedOpenDssModels"]["models"]) diff --git a/src/zepben/eas/client/patched_generated_client.py b/src/zepben/eas/client/patched_generated_client.py deleted file mode 100644 index bb11e0d..0000000 --- a/src/zepben/eas/client/patched_generated_client.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2026 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. - -__all__ = ['PatchedClient'] - -from typing import Any, cast - -import httpx -from ariadne_codegen.client_generators.dependencies.exceptions import GraphQLClientHttpError, \ - GraphQLClientInvalidResponseError, GraphQLClientGraphQLMultiError - -from zepben.eas.lib.generated_graphql_client import Client - - -class PatchedClient(Client): - """ - Used to override autogenerated client code, rather than having to maintain a diff to be applied after generating. - """ - - def get_data(self, response: httpx.Response) -> dict[str, Any]: - if not response.is_success: - raise GraphQLClientHttpError( - status_code=response.status_code, response=response - ) - - try: - response_json = response.json() - except ValueError as exc: - raise GraphQLClientInvalidResponseError(response=response) from exc - - try: - errors = response_json.get("errors") - except AttributeError: - errors = None - - if errors: - raise GraphQLClientGraphQLMultiError.from_errors_dicts( - errors_dicts=errors, data=response_json - ) - - return cast(dict[str, Any], response_json) diff --git a/src/zepben/eas/lib/generated_graphql_client/base_operation.pyi b/src/zepben/eas/lib/generated_graphql_client/base_operation.pyi new file mode 100644 index 0000000..fca940a --- /dev/null +++ b/src/zepben/eas/lib/generated_graphql_client/base_operation.pyi @@ -0,0 +1,23 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + +from _typeshed import Incomplete +from types import MethodType +from typing import Generator, Any + +def __getattr__(name) -> Incomplete: ... + +class GraphQLField: + def __getattr__(self, name: str) -> Incomplete: ... + + @classmethod + def all_fields(cls) -> Generator[GraphQLField | MethodType, None, None]: + """ + Returns a generator over all ``GraphQLField``s that a given class returns + + :param cls: class to check + :return: generator over all GraphQLField's in a given class + """ diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py b/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py index f08802d..9566591 100644 --- a/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py +++ b/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py @@ -19,7 +19,7 @@ StudyInput, WorkPackageInput, ) -from .enums import SincalFileType, VariantFileType +from . import SincalFileType, VariantFileType class Mutation: diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_queries.py b/src/zepben/eas/lib/generated_graphql_client/custom_queries.py index f8fea64..7623f4c 100644 --- a/src/zepben/eas/lib/generated_graphql_client/custom_queries.py +++ b/src/zepben/eas/lib/generated_graphql_client/custom_queries.py @@ -65,7 +65,7 @@ ProcessedDiffSortCriteriaInput, WorkPackageInput, ) -from .enums import HostingCapacityFileType, WorkflowStatus, ContainerType, SincalFileType, VariantFileType +from . import HostingCapacityFileType, ContainerType, SincalFileType, VariantFileType, WorkflowStatus class Query: diff --git a/test/test_eas_client.py b/test/test_eas_client.py index a9e6f61..49553c5 100644 --- a/test/test_eas_client.py +++ b/test/test_eas_client.py @@ -16,9 +16,7 @@ from pytest_httpserver import HTTPServer from werkzeug import Response -from zepben.eas import EasClient -from zepben.eas.client.enums import OpenDssModelState -from zepben.eas.lib.generated_graphql_client import WorkPackageInput, ForecastConfigInput, TimePeriodInput, \ +from zepben.eas import EasClient, OpenDssModelState, WorkPackageInput, ForecastConfigInput, TimePeriodInput, \ FeederConfigInput, FeederConfigsInput, FixedTimeInput, FixedTimeLoadOverrideInput, TimePeriodLoadOverrideInput, \ StudyInput, StudyResultInput, InterventionConfigInput, YearRangeInput, InterventionClass, \ CandidateGenerationConfigInput, CandidateGenerationType, HcGeneratorConfigInput, HcModelConfigInput, \ diff --git a/test/test_integration_testing.py b/test/test_integration_testing.py index 911cc9f..6adca14 100644 --- a/test/test_integration_testing.py +++ b/test/test_integration_testing.py @@ -5,7 +5,10 @@ # file, You can obtain one at https://mozilla.org/MPL/2.0/. import asyncio -from zepben.eas import EasClient +import pytest + +from zepben.eas import EasClient, OpenDssModelInput, OpenDssModelGenerationSpecInput, OpenDssModelOptionsInput, \ + OpenDssModulesConfigInput, OpenDssCommonConfigInput def test_can_connect_to_local_eas_non_async(): @@ -15,16 +18,40 @@ def test_can_connect_to_local_eas_non_async(): protocol="http", verify_certificate=False, asynchronous=False, + enable_legacy_methods=True, ) assert client.get_ingestor_run_list() == {'data': {'listIngestorRuns': []}} -def test_can_connect_to_local_eas_async(): +def test_can_connect_to_local_eas_async_asyncio_run_calling(): client = EasClient( host="localhost", port=7654, protocol="http", verify_certificate=False, - asynchronous=True + asynchronous=True, + enable_legacy_methods = True, ) assert asyncio.run(client.get_ingestor_run_list()) == {'data': {'listIngestorRuns': []}} + + +@pytest.mark.asyncio +async def test_can_connect_to_local_eas_async_calling_func(): + client = EasClient( + host="localhost", + port=7654, + protocol="http", + verify_certificate=False, + asynchronous=True, + enable_legacy_methods=True, + ) + assert await client.get_ingestor_run_list() == {'data': {'listIngestorRuns': []}} + print(await client.run_opendss_export( + OpenDssModelInput( + generationSpec=OpenDssModelGenerationSpecInput( + modelOptions=OpenDssModelOptionsInput(feeder='feeder', scenario='foo', year=1), + modulesConfiguration=OpenDssModulesConfigInput(common=OpenDssCommonConfigInput()), + ) + ) + )) + diff --git a/test/test_patched_client.py b/test/test_patched_client.py index fd82fdd..e6f2f90 100644 --- a/test/test_patched_client.py +++ b/test/test_patched_client.py @@ -4,9 +4,7 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. -from zepben.eas import EasClient, FeederLoadAnalysisReportFields -from zepben.eas.client.patched_generated_client import PatchedClient -from zepben.eas.lib.generated_graphql_client.custom_fields import FeederLoadAnalysisSpecFields +from zepben.eas import FeederLoadAnalysisReportFields, EasClient, FeederLoadAnalysisSpecFields class MockResponse: @@ -23,7 +21,7 @@ def is_success(self): def test_patched_client_used_in_eas_client(): client = EasClient(host="test_host", port=9876) - assert isinstance(client, PatchedClient) + assert isinstance(client, EasClient) def test_patched_client_overrides_get_data_to_return_the_whole_json_response(): From 98f64dac5e831cd8ddc5eba2025b9a3afbca43f2 Mon Sep 17 00:00:00 2001 From: Anthony Charlton Date: Mon, 30 Mar 2026 15:05:04 +1100 Subject: [PATCH 17/32] Added details on installing code gen for non python people. Signed-off-by: Anthony Charlton --- README.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index abc895b..b0f6b21 100644 --- a/README.md +++ b/README.md @@ -167,8 +167,14 @@ This will enable all `deprecated` and `opt_in` methods on the class, they are di # Development # -To regenerate the graphql client you will need to install `zepben.eas` with `eas-codegen` optional dependencies, then run: +To regenerate the graphql client you will need to install `zepben.eas` with `eas-codegen` optional dependencies: + +```shell +pip install zepben.eas[eas-codegen] +``` + +With these installed and EAS running locally on port 7654, you can then generate the client: ```shell ariadne-codegen -``` \ No newline at end of file +``` From 7118afad402ca4cab741ae45f6b5cc7c47adf7bc Mon Sep 17 00:00:00 2001 From: Anthony Charlton Date: Mon, 30 Mar 2026 15:05:28 +1100 Subject: [PATCH 18/32] Added missing licence headers. Signed-off-by: Anthony Charlton --- src/zepben/eas/lib/__init__.py | 4 ++++ src/zepben/eas/lib/generated_graphql_client/__init__.py | 6 ++++++ .../eas/lib/generated_graphql_client/async_base_client.py | 6 ++++++ src/zepben/eas/lib/generated_graphql_client/base_model.py | 6 ++++++ .../eas/lib/generated_graphql_client/base_operation.py | 6 ++++++ .../eas/lib/generated_graphql_client/base_operation.pyi | 4 ++++ src/zepben/eas/lib/generated_graphql_client/client.py | 6 ++++++ .../eas/lib/generated_graphql_client/custom_fields.py | 6 ++++++ .../eas/lib/generated_graphql_client/custom_mutations.py | 6 ++++++ .../eas/lib/generated_graphql_client/custom_queries.py | 6 ++++++ .../lib/generated_graphql_client/custom_typing_fields.py | 6 ++++++ src/zepben/eas/lib/generated_graphql_client/enums.py | 6 ++++++ src/zepben/eas/lib/generated_graphql_client/exceptions.py | 6 ++++++ src/zepben/eas/lib/generated_graphql_client/input_types.py | 6 ++++++ 14 files changed, 80 insertions(+) diff --git a/src/zepben/eas/lib/__init__.py b/src/zepben/eas/lib/__init__.py index 4a7146f..a717dcd 100644 --- a/src/zepben/eas/lib/__init__.py +++ b/src/zepben/eas/lib/__init__.py @@ -3,3 +3,7 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. diff --git a/src/zepben/eas/lib/generated_graphql_client/__init__.py b/src/zepben/eas/lib/generated_graphql_client/__init__.py index 24681c4..dac28ac 100644 --- a/src/zepben/eas/lib/generated_graphql_client/__init__.py +++ b/src/zepben/eas/lib/generated_graphql_client/__init__.py @@ -1,3 +1,9 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + from .async_base_client import AsyncBaseClient from .base_model import BaseModel, Upload from .client import Client diff --git a/src/zepben/eas/lib/generated_graphql_client/async_base_client.py b/src/zepben/eas/lib/generated_graphql_client/async_base_client.py index 9eb804a..b212a17 100644 --- a/src/zepben/eas/lib/generated_graphql_client/async_base_client.py +++ b/src/zepben/eas/lib/generated_graphql_client/async_base_client.py @@ -1,3 +1,9 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + import asyncio import enum import json diff --git a/src/zepben/eas/lib/generated_graphql_client/base_model.py b/src/zepben/eas/lib/generated_graphql_client/base_model.py index 68e2f9e..2086ef4 100644 --- a/src/zepben/eas/lib/generated_graphql_client/base_model.py +++ b/src/zepben/eas/lib/generated_graphql_client/base_model.py @@ -1,3 +1,9 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + from io import IOBase from pydantic import BaseModel as PydanticBaseModel diff --git a/src/zepben/eas/lib/generated_graphql_client/base_operation.py b/src/zepben/eas/lib/generated_graphql_client/base_operation.py index 65708d7..ea19037 100644 --- a/src/zepben/eas/lib/generated_graphql_client/base_operation.py +++ b/src/zepben/eas/lib/generated_graphql_client/base_operation.py @@ -1,3 +1,9 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + from typing import Any, Optional, Union from graphql import ( diff --git a/src/zepben/eas/lib/generated_graphql_client/base_operation.pyi b/src/zepben/eas/lib/generated_graphql_client/base_operation.pyi index fca940a..baf331b 100644 --- a/src/zepben/eas/lib/generated_graphql_client/base_operation.pyi +++ b/src/zepben/eas/lib/generated_graphql_client/base_operation.pyi @@ -3,6 +3,10 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. from _typeshed import Incomplete from types import MethodType diff --git a/src/zepben/eas/lib/generated_graphql_client/client.py b/src/zepben/eas/lib/generated_graphql_client/client.py index bb11629..f4c4d83 100644 --- a/src/zepben/eas/lib/generated_graphql_client/client.py +++ b/src/zepben/eas/lib/generated_graphql_client/client.py @@ -1,3 +1,9 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + from typing import Any from graphql import ( diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_fields.py b/src/zepben/eas/lib/generated_graphql_client/custom_fields.py index 8ef01d9..8851350 100644 --- a/src/zepben/eas/lib/generated_graphql_client/custom_fields.py +++ b/src/zepben/eas/lib/generated_graphql_client/custom_fields.py @@ -1,3 +1,9 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + from typing import Any, Optional, Union from . import SerializationType diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py b/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py index 9566591..4d5953d 100644 --- a/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py +++ b/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py @@ -1,3 +1,9 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + from typing import Any, Optional from .custom_fields import ( diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_queries.py b/src/zepben/eas/lib/generated_graphql_client/custom_queries.py index 7623f4c..6f6e290 100644 --- a/src/zepben/eas/lib/generated_graphql_client/custom_queries.py +++ b/src/zepben/eas/lib/generated_graphql_client/custom_queries.py @@ -1,3 +1,9 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + from typing import Any, Optional from .custom_fields import ( diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_typing_fields.py b/src/zepben/eas/lib/generated_graphql_client/custom_typing_fields.py index 3cd62f6..2c55e85 100644 --- a/src/zepben/eas/lib/generated_graphql_client/custom_typing_fields.py +++ b/src/zepben/eas/lib/generated_graphql_client/custom_typing_fields.py @@ -1,3 +1,9 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + from .base_operation import GraphQLField diff --git a/src/zepben/eas/lib/generated_graphql_client/enums.py b/src/zepben/eas/lib/generated_graphql_client/enums.py index 5a153a4..f5ef70b 100644 --- a/src/zepben/eas/lib/generated_graphql_client/enums.py +++ b/src/zepben/eas/lib/generated_graphql_client/enums.py @@ -1,3 +1,9 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + from enum import Enum diff --git a/src/zepben/eas/lib/generated_graphql_client/exceptions.py b/src/zepben/eas/lib/generated_graphql_client/exceptions.py index e217e9b..2543235 100644 --- a/src/zepben/eas/lib/generated_graphql_client/exceptions.py +++ b/src/zepben/eas/lib/generated_graphql_client/exceptions.py @@ -1,3 +1,9 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + from typing import Any, Optional, Union import httpx diff --git a/src/zepben/eas/lib/generated_graphql_client/input_types.py b/src/zepben/eas/lib/generated_graphql_client/input_types.py index be71ecd..dfe636c 100644 --- a/src/zepben/eas/lib/generated_graphql_client/input_types.py +++ b/src/zepben/eas/lib/generated_graphql_client/input_types.py @@ -1,3 +1,9 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + from typing import Any, Optional from pydantic import Field From 3cdd4b61e0891eac51bdf369d4404e363eacc985 Mon Sep 17 00:00:00 2001 From: Anthony Charlton Date: Mon, 30 Mar 2026 15:10:41 +1100 Subject: [PATCH 19/32] Fixed duplicate licence headers. Signed-off-by: Anthony Charlton --- src/zepben/eas/lib/__init__.py | 4 ---- .../eas/lib/generated_graphql_client/base_operation.pyi | 4 ---- 2 files changed, 8 deletions(-) diff --git a/src/zepben/eas/lib/__init__.py b/src/zepben/eas/lib/__init__.py index a717dcd..4a7146f 100644 --- a/src/zepben/eas/lib/__init__.py +++ b/src/zepben/eas/lib/__init__.py @@ -3,7 +3,3 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. diff --git a/src/zepben/eas/lib/generated_graphql_client/base_operation.pyi b/src/zepben/eas/lib/generated_graphql_client/base_operation.pyi index baf331b..fca940a 100644 --- a/src/zepben/eas/lib/generated_graphql_client/base_operation.pyi +++ b/src/zepben/eas/lib/generated_graphql_client/base_operation.pyi @@ -3,10 +3,6 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. from _typeshed import Incomplete from types import MethodType From e809895c0fb38af7cd88fcd9f2cebdc0d9553bec Mon Sep 17 00:00:00 2001 From: Anthony Charlton Date: Mon, 30 Mar 2026 15:58:16 +1100 Subject: [PATCH 20/32] Added missing deprecation warning Signed-off-by: Anthony Charlton --- src/zepben/eas/client/eas_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/zepben/eas/client/eas_client.py b/src/zepben/eas/client/eas_client.py index 111e0f2..4cd1c1c 100644 --- a/src/zepben/eas/client/eas_client.py +++ b/src/zepben/eas/client/eas_client.py @@ -252,7 +252,7 @@ def cancel_hosting_capacity_work_package(self, work_package_id: str): Mutation.cancel_work_package(work_package_id=work_package_id), ) - @deprecated + @deprecated("Use query()/mutation() methods directly instead.") @catch_warnings @opt_in def get_hosting_capacity_work_packages_progress(self): # FIXME: why is this info not returned by get_work_package_by_id ? From 59fdd5600f4e2934b70ac371f4a203f4152bf314 Mon Sep 17 00:00:00 2001 From: Anthony Charlton Date: Mon, 30 Mar 2026 15:58:44 +1100 Subject: [PATCH 21/32] Swapped to use newer methods for starting asyncio loops Signed-off-by: Anthony Charlton --- src/zepben/eas/client/decorators.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/zepben/eas/client/decorators.py b/src/zepben/eas/client/decorators.py index c705f1e..fb0c321 100644 --- a/src/zepben/eas/client/decorators.py +++ b/src/zepben/eas/client/decorators.py @@ -6,9 +6,9 @@ __all__ = ['catch_warnings', 'async_func', 'opt_in', "add_method_to"] +import asyncio import functools import warnings -from asyncio import get_event_loop from typing import Callable @@ -27,7 +27,11 @@ def async_func(func: Callable) -> Callable: def wrapper(self, *args, **kwargs): if self._asynchronous: return func(self, *args, **kwargs) - return get_event_loop().run_until_complete(func(self, *args, **kwargs)) + + try: + return asyncio.get_running_loop().run_until_complete(func(self, *args, **kwargs)) + except RuntimeError: + return asyncio.run(func(self, *args, **kwargs)) return wrapper def add_method_to(class_to_extend: type) -> Callable: From 7f59b12c28bb5befcbd629a7ed35d978d9107e53 Mon Sep 17 00:00:00 2001 From: Anthony Charlton Date: Mon, 30 Mar 2026 15:58:59 +1100 Subject: [PATCH 22/32] Disabled integration tests for local debugging. Signed-off-by: Anthony Charlton --- test/test_integration_testing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/test_integration_testing.py b/test/test_integration_testing.py index 6adca14..50deea5 100644 --- a/test/test_integration_testing.py +++ b/test/test_integration_testing.py @@ -11,6 +11,7 @@ OpenDssModulesConfigInput, OpenDssCommonConfigInput +@pytest.mark.skip("Local testing if you really want it...") def test_can_connect_to_local_eas_non_async(): client = EasClient( host="localhost", @@ -23,6 +24,7 @@ def test_can_connect_to_local_eas_non_async(): assert client.get_ingestor_run_list() == {'data': {'listIngestorRuns': []}} +@pytest.mark.skip("Local testing if you really want it...") def test_can_connect_to_local_eas_async_asyncio_run_calling(): client = EasClient( host="localhost", @@ -35,6 +37,7 @@ def test_can_connect_to_local_eas_async_asyncio_run_calling(): assert asyncio.run(client.get_ingestor_run_list()) == {'data': {'listIngestorRuns': []}} +@pytest.mark.skip("Local testing if you really want it...") @pytest.mark.asyncio async def test_can_connect_to_local_eas_async_calling_func(): client = EasClient( @@ -54,4 +57,3 @@ async def test_can_connect_to_local_eas_async_calling_func(): ) ) )) - From 5c73dac942f675310f634baca3c3d6a550e9d85a Mon Sep 17 00:00:00 2001 From: Anthony Charlton Date: Mon, 30 Mar 2026 15:59:28 +1100 Subject: [PATCH 23/32] Removed unused import Signed-off-by: Anthony Charlton --- src/zepben/eas/lib/generated_graphql_client/base_operation.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/zepben/eas/lib/generated_graphql_client/base_operation.pyi b/src/zepben/eas/lib/generated_graphql_client/base_operation.pyi index fca940a..7a3a603 100644 --- a/src/zepben/eas/lib/generated_graphql_client/base_operation.pyi +++ b/src/zepben/eas/lib/generated_graphql_client/base_operation.pyi @@ -6,7 +6,7 @@ from _typeshed import Incomplete from types import MethodType -from typing import Generator, Any +from typing import Generator def __getattr__(name) -> Incomplete: ... From 7c1731f2015daed54534904ba67bff4a569c4549 Mon Sep 17 00:00:00 2001 From: Anthony Charlton Date: Mon, 30 Mar 2026 17:10:24 +1100 Subject: [PATCH 24/32] WIP - GraphQL Query Typing Signed-off-by: Anthony Charlton --- .../custom_queries.pyi | 30 +++++++++++++++++++ test/test_integration_testing.py | 19 ++++++++++-- 2 files changed, 47 insertions(+), 2 deletions(-) create mode 100644 src/zepben/eas/lib/generated_graphql_client/custom_queries.pyi diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_queries.pyi b/src/zepben/eas/lib/generated_graphql_client/custom_queries.pyi new file mode 100644 index 0000000..4db764c --- /dev/null +++ b/src/zepben/eas/lib/generated_graphql_client/custom_queries.pyi @@ -0,0 +1,30 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. +from typing import Generic, TypeVar, Optional + +from zepben.eas import IngestionRunGraphQLField, IngestionRunFields, IngestorRunsFilterInput, IngestorRunsSortCriteriaInput + +TGraphQLQueryField = TypeVar("TGraphQLQueryField") +TGraphQLField = TypeVar("TGraphQLField") + + +class GraphQLQuery(Generic[TGraphQLQueryField, TGraphQLField]): + ... + + def fields(self, *fields: TGraphQLField): + ... + + +class Query: + + @classmethod + def list_ingestor_runs( + cls, + *, + filter_: Optional[IngestorRunsFilterInput] = None, + sort: Optional[IngestorRunsSortCriteriaInput] = None + ) -> GraphQLQuery[IngestionRunFields, IngestionRunGraphQLField]: + ... diff --git a/test/test_integration_testing.py b/test/test_integration_testing.py index 50deea5..c359bb2 100644 --- a/test/test_integration_testing.py +++ b/test/test_integration_testing.py @@ -4,11 +4,12 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. import asyncio +from typing import TypeVar import pytest from zepben.eas import EasClient, OpenDssModelInput, OpenDssModelGenerationSpecInput, OpenDssModelOptionsInput, \ - OpenDssModulesConfigInput, OpenDssCommonConfigInput + OpenDssModulesConfigInput, OpenDssCommonConfigInput, Query, IngestionRunFields, GraphQLQuery, IngestionJobFields @pytest.mark.skip("Local testing if you really want it...") @@ -32,7 +33,7 @@ def test_can_connect_to_local_eas_async_asyncio_run_calling(): protocol="http", verify_certificate=False, asynchronous=True, - enable_legacy_methods = True, + enable_legacy_methods=True, ) assert asyncio.run(client.get_ingestor_run_list()) == {'data': {'listIngestorRuns': []}} @@ -57,3 +58,17 @@ async def test_can_connect_to_local_eas_async_calling_func(): ) ) )) + + +T = TypeVar("T") +R = TypeVar("R") + + +def my_query(query: GraphQLQuery[T, R], field: R, *additional_fields: R) -> T: + return query.fields(field, *additional_fields) + + +def test_do_things(): + my_query(Query.list_ingestor_runs(filter_=None, sort=None), IngestionRunFields.completed_at, IngestionRunFields.status) + my_query(Query.list_ingestor_runs(filter_=None, sort=None), IngestionJobFields.application, 1) + my_query(Query.list_ingestor_runs(filter_=None, sort=None)) From e772d0af726b211d9bc07f6df5a2a784623e791e Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Tue, 31 Mar 2026 14:32:17 +1100 Subject: [PATCH 25/32] code gen via ast ftw Signed-off-by: Max Chesterfield --- .../custom_queries.pyi | 276 ++++++++++++++++-- .../custom_queries.pyi.bak | 24 ++ test/test_integration_testing.py | 70 ++++- 3 files changed, 345 insertions(+), 25 deletions(-) create mode 100644 src/zepben/eas/lib/generated_graphql_client/custom_queries.pyi.bak diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_queries.pyi b/src/zepben/eas/lib/generated_graphql_client/custom_queries.pyi index 4db764c..67d94b8 100644 --- a/src/zepben/eas/lib/generated_graphql_client/custom_queries.pyi +++ b/src/zepben/eas/lib/generated_graphql_client/custom_queries.pyi @@ -1,30 +1,264 @@ -# Copyright 2026 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. -from typing import Generic, TypeVar, Optional - -from zepben.eas import IngestionRunGraphQLField, IngestionRunFields, IngestorRunsFilterInput, IngestorRunsSortCriteriaInput - -TGraphQLQueryField = TypeVar("TGraphQLQueryField") -TGraphQLField = TypeVar("TGraphQLField") - +from typing import Any, Optional +from .custom_fields import AppOptionsFields, CustomerDetailsResponseFields, DurationCurveByTerminalFields, FeederLoadAnalysisReportFields, GqlTxTapRecordFields, GqlUserFields, GqlUserResponseFields, HcCalibrationFields, HcScenarioConfigsPageFields, HcWorkPackageFields, HcWorkPackagePageFields, IngestionJobFields, IngestionRunFields, IngestorRunPageFields, JobSourceFields, MachineUserFields, MetricFields, NetworkModelsFields, OpenDssModelPageFields, OpportunitiesByYearFields, OpportunityFields, OpportunityLocationFields, PowerFactoryModelFields, PowerFactoryModelPageFields, PowerFactoryModelTemplateFields, PowerFactoryModelTemplatePageFields, ProcessedDiffFields, ProcessedDiffPageFields, SincalGlobalInputsConfigFields, SincalModelFields, SincalModelPageFields, SincalModelPresetFields, SincalModelPresetPageFields, StudyFields, StudyPageFields, StudyResultFields, UploadUrlResponseFields, UserCustomerListColumnConfigFields, VariantWorkPackageFields, WorkPackageTreeFields +from .custom_typing_fields import GraphQLField +from .input_types import GetOpenDssModelsFilterInput, GetOpenDssModelsSortCriteriaInput, GetPowerFactoryModelsFilterInput, GetPowerFactoryModelsSortCriteriaInput, GetPowerFactoryModelTemplatesFilterInput, GetPowerFactoryModelTemplatesSortCriteriaInput, GetSincalModelPresetsFilterInput, GetSincalModelPresetsSortCriteriaInput, GetSincalModelsFilterInput, GetSincalModelsSortCriteriaInput, GetStudiesFilterInput, GetStudiesSortCriteriaInput, HcScenarioConfigsFilterInput, HcWorkPackagesFilterInput, HcWorkPackagesSortCriteriaInput, IngestorRunsFilterInput, IngestorRunsSortCriteriaInput, ProcessedDiffFilterInput, ProcessedDiffSortCriteriaInput, WorkPackageInput +from . import HostingCapacityFileType, ContainerType, SincalFileType, VariantFileType, WorkflowStatus +from zepben.eas import StudyPageGraphQLField, StudyResultGraphQLField, StudyGraphQLField, StudyGraphQLField, GraphQLField, GqlUserResponseGraphQLField, PowerFactoryModelTemplatePageGraphQLField, PowerFactoryModelPageGraphQLField, PowerFactoryModelGraphQLField, PowerFactoryModelTemplateGraphQLField, PowerFactoryModelTemplateGraphQLField, PowerFactoryModelGraphQLField, GraphQLField, GqlUserGraphQLField, HcCalibrationGraphQLField, GraphQLField, DurationCurveByTerminalGraphQLField, OpportunitiesByYearGraphQLField, OpportunityGraphQLField, OpportunityGraphQLField, OpportunityLocationGraphQLField, HcScenarioConfigsPageGraphQLField, GqlTxTapRecordGraphQLField, HcWorkPackageGraphQLField, GraphQLField, WorkPackageTreeGraphQLField, HcWorkPackagePageGraphQLField, UploadUrlResponseGraphQLField, HcCalibrationGraphQLField, ProcessedDiffGraphQLField, ProcessedDiffPageGraphQLField, IngestionJobGraphQLField, GraphQLField, MetricGraphQLField, IngestionJobGraphQLField, JobSourceGraphQLField, SincalModelPresetPageGraphQLField, SincalModelPageGraphQLField, SincalModelGraphQLField, UploadUrlResponseGraphQLField, SincalGlobalInputsConfigGraphQLField, SincalModelPresetGraphQLField, SincalModelPresetGraphQLField, SincalModelGraphQLField, GraphQLField, GraphQLField, MachineUserGraphQLField, GraphQLField, GraphQLField, NetworkModelsGraphQLField, FeederLoadAnalysisReportGraphQLField, IngestionRunGraphQLField, IngestionRunGraphQLField, IngestorRunPageGraphQLField, OpenDssModelPageGraphQLField, UserCustomerListColumnConfigGraphQLField, UserCustomerListColumnConfigGraphQLField, CustomerDetailsResponseGraphQLField, CustomerDetailsResponseGraphQLField, AppOptionsGraphQLField, UploadUrlResponseGraphQLField, VariantWorkPackageGraphQLField +from typing import Generic, TypeVar +TGraphQLQueryField = TypeVar('TGraphQLQueryField') +TGraphQLField = TypeVar('TGraphQLField') class GraphQLQuery(Generic[TGraphQLQueryField, TGraphQLField]): - ... def fields(self, *fields: TGraphQLField): ... - class Query: @classmethod - def list_ingestor_runs( - cls, - *, - filter_: Optional[IngestorRunsFilterInput] = None, - sort: Optional[IngestorRunsSortCriteriaInput] = None - ) -> GraphQLQuery[IngestionRunFields, IngestionRunGraphQLField]: - ... + def paged_studies(cls, *, limit: Optional[int]=None, offset: Optional[Any]=None, filter_: Optional[GetStudiesFilterInput]=None, sort: Optional[GetStudiesSortCriteriaInput]=None) -> "GraphQLQuery[StudyPageFields, StudyPageGraphQLField]": + pass + + @classmethod + def results_by_id(cls, ids: list[str]) -> "GraphQLQuery[StudyResultFields, StudyResultGraphQLField]": + pass + + @classmethod + def studies(cls, *, filter_: Optional[GetStudiesFilterInput]=None) -> "GraphQLQuery[StudyFields, StudyGraphQLField]": + pass + + @classmethod + def studies_by_id(cls, ids: list[str]) -> "GraphQLQuery[StudyFields, StudyGraphQLField]": + pass + + @classmethod + def styles_by_id(cls, ids: list[str]) -> "GraphQLQuery[GraphQLField, GraphQLField]": + pass + + @classmethod + def current_user(cls) -> "GraphQLQuery[GqlUserResponseFields, GqlUserResponseGraphQLField]": + pass + + @classmethod + def paged_power_factory_model_templates(cls, *, limit: Optional[int]=None, offset: Optional[Any]=None, filter_: Optional[GetPowerFactoryModelTemplatesFilterInput]=None, sort: Optional[GetPowerFactoryModelTemplatesSortCriteriaInput]=None) -> "GraphQLQuery[PowerFactoryModelTemplatePageFields, PowerFactoryModelTemplatePageGraphQLField]": + pass + + @classmethod + def paged_power_factory_models(cls, *, limit: Optional[int]=None, offset: Optional[Any]=None, filter_: Optional[GetPowerFactoryModelsFilterInput]=None, sort: Optional[GetPowerFactoryModelsSortCriteriaInput]=None) -> "GraphQLQuery[PowerFactoryModelPageFields, PowerFactoryModelPageGraphQLField]": + pass + + @classmethod + def power_factory_model_by_id(cls, model_id: str) -> "GraphQLQuery[PowerFactoryModelFields, PowerFactoryModelGraphQLField]": + pass + + @classmethod + def power_factory_model_template_by_id(cls, template_id: str) -> "GraphQLQuery[PowerFactoryModelTemplateFields, PowerFactoryModelTemplateGraphQLField]": + pass + + @classmethod + def power_factory_model_templates_by_ids(cls, template_ids: list[str]) -> "GraphQLQuery[PowerFactoryModelTemplateFields, PowerFactoryModelTemplateGraphQLField]": + pass + + @classmethod + def power_factory_models_by_ids(cls, model_ids: list[str]) -> "GraphQLQuery[PowerFactoryModelFields, PowerFactoryModelGraphQLField]": + pass + + @classmethod + def get_active_work_packages(cls) -> "GraphQLQuery[GraphQLField, GraphQLField]": + pass + + @classmethod + def get_all_work_packages_authors(cls) -> "GraphQLQuery[GqlUserFields, GqlUserGraphQLField]": + pass + + @classmethod + def get_calibration_run(cls, id: str) -> "GraphQLQuery[HcCalibrationFields, HcCalibrationGraphQLField]": + pass + + @classmethod + def get_calibration_sets(cls) -> "GraphQLQuery[GraphQLField, GraphQLField]": + pass + + @classmethod + def get_duration_curves(cls, work_package_id: str, scenario: str, feeder: str, year: int, conducting_equipment_mrid: str) -> "GraphQLQuery[DurationCurveByTerminalFields, DurationCurveByTerminalGraphQLField]": + pass + + @classmethod + def get_opportunities(cls, *, year: Optional[int]=None) -> "GraphQLQuery[OpportunitiesByYearFields, OpportunitiesByYearGraphQLField]": + pass + + @classmethod + def get_opportunities_for_equipment(cls, m_rid: str) -> "GraphQLQuery[OpportunityFields, OpportunityGraphQLField]": + pass + + @classmethod + def get_opportunity(cls, id: str) -> "GraphQLQuery[OpportunityFields, OpportunityGraphQLField]": + pass + + @classmethod + def get_opportunity_locations(cls, *, year: Optional[int]=None) -> "GraphQLQuery[OpportunityLocationFields, OpportunityLocationGraphQLField]": + pass + + @classmethod + def get_scenario_configurations(cls, *, limit: Optional[int]=None, offset: Optional[Any]=None, filter_: Optional[HcScenarioConfigsFilterInput]=None) -> "GraphQLQuery[HcScenarioConfigsPageFields, HcScenarioConfigsPageGraphQLField]": + pass + + @classmethod + def get_transformer_tap_settings(cls, calibration_name: str, *, feeder: Optional[str]=None, transformer_mrid: Optional[str]=None) -> "GraphQLQuery[GqlTxTapRecordFields, GqlTxTapRecordGraphQLField]": + pass + + @classmethod + def get_work_package_by_id(cls, id: str, *, with_groupings: Optional[bool]=None) -> "GraphQLQuery[HcWorkPackageFields, HcWorkPackageGraphQLField]": + pass + + @classmethod + def get_work_package_cost_estimation(cls, input: WorkPackageInput) -> "GraphQLQuery[GraphQLField, GraphQLField]": + pass + + @classmethod + def get_work_package_tree(cls, id: str) -> "GraphQLQuery[WorkPackageTreeFields, WorkPackageTreeGraphQLField]": + pass + + @classmethod + def get_work_packages(cls, *, limit: Optional[int]=None, offset: Optional[Any]=None, filter_: Optional[HcWorkPackagesFilterInput]=None, sort: Optional[HcWorkPackagesSortCriteriaInput]=None, with_groupings: Optional[bool]=None) -> "GraphQLQuery[HcWorkPackagePageFields, HcWorkPackagePageGraphQLField]": + pass + + @classmethod + def hosting_capacity_file_upload_url(cls, filename: str, file_type: HostingCapacityFileType) -> "GraphQLQuery[UploadUrlResponseFields, UploadUrlResponseGraphQLField]": + pass + + @classmethod + def list_calibration_runs(cls, *, name: Optional[str]=None, calibration_time: Optional[Any]=None, status: Optional[WorkflowStatus]=None) -> "GraphQLQuery[HcCalibrationFields, HcCalibrationGraphQLField]": + pass + + @classmethod + def get_processed_diff(cls, diff_id: str) -> "GraphQLQuery[ProcessedDiffFields, ProcessedDiffGraphQLField]": + pass + + @classmethod + def get_processed_diffs(cls, *, limit: Optional[int]=None, offset: Optional[Any]=None, filter_: Optional[ProcessedDiffFilterInput]=None, sort: Optional[ProcessedDiffSortCriteriaInput]=None) -> "GraphQLQuery[ProcessedDiffPageFields, ProcessedDiffPageGraphQLField]": + pass + + @classmethod + def get_all_jobs(cls) -> "GraphQLQuery[IngestionJobFields, IngestionJobGraphQLField]": + pass + + @classmethod + def get_distinct_metric_names(cls, job_id: str) -> "GraphQLQuery[GraphQLField, GraphQLField]": + pass + + @classmethod + def get_metrics(cls, job_id: str, container_type: ContainerType, container_id: str) -> "GraphQLQuery[MetricFields, MetricGraphQLField]": + pass + + @classmethod + def get_newest_job(cls) -> "GraphQLQuery[IngestionJobFields, IngestionJobGraphQLField]": + pass + + @classmethod + def get_sources(cls, job_id: str) -> "GraphQLQuery[JobSourceFields, JobSourceGraphQLField]": + pass + + @classmethod + def paged_sincal_model_presets(cls, *, limit: Optional[int]=None, offset: Optional[Any]=None, filter_: Optional[GetSincalModelPresetsFilterInput]=None, sort: Optional[GetSincalModelPresetsSortCriteriaInput]=None) -> "GraphQLQuery[SincalModelPresetPageFields, SincalModelPresetPageGraphQLField]": + pass + + @classmethod + def paged_sincal_models(cls, *, limit: Optional[int]=None, offset: Optional[Any]=None, filter_: Optional[GetSincalModelsFilterInput]=None, sort: Optional[GetSincalModelsSortCriteriaInput]=None) -> "GraphQLQuery[SincalModelPageFields, SincalModelPageGraphQLField]": + pass + + @classmethod + def sincal_model_by_id(cls, model_id: str) -> "GraphQLQuery[SincalModelFields, SincalModelGraphQLField]": + pass + + @classmethod + def sincal_model_config_upload_url(cls, filename: str, file_type: SincalFileType) -> "GraphQLQuery[UploadUrlResponseFields, UploadUrlResponseGraphQLField]": + pass + + @classmethod + def sincal_model_global_config(cls) -> "GraphQLQuery[SincalGlobalInputsConfigFields, SincalGlobalInputsConfigGraphQLField]": + pass + + @classmethod + def sincal_model_preset_by_id(cls, preset_id: str) -> "GraphQLQuery[SincalModelPresetFields, SincalModelPresetGraphQLField]": + pass + + @classmethod + def sincal_model_presets_by_ids(cls, preset_ids: list[str]) -> "GraphQLQuery[SincalModelPresetFields, SincalModelPresetGraphQLField]": + pass + + @classmethod + def sincal_models_by_ids(cls, model_ids: list[str]) -> "GraphQLQuery[SincalModelFields, SincalModelGraphQLField]": + pass + + @classmethod + def create_machine_api_key(cls, roles: list[str], token_name: str) -> "GraphQLQuery[GraphQLField, GraphQLField]": + pass + + @classmethod + def create_user_api_key(cls, roles: list[str], token_name: str) -> "GraphQLQuery[GraphQLField, GraphQLField]": + pass + + @classmethod + def get_machine_tokens(cls) -> "GraphQLQuery[MachineUserFields, MachineUserGraphQLField]": + pass + + @classmethod + def get_public_geo_view_config(cls) -> "GraphQLQuery[GraphQLField, GraphQLField]": + pass + + @classmethod + def get_all_external_roles(cls) -> "GraphQLQuery[GraphQLField, GraphQLField]": + pass + + @classmethod + def get_network_models(cls) -> "GraphQLQuery[NetworkModelsFields, NetworkModelsGraphQLField]": + pass + + @classmethod + def get_feeder_load_analysis_report_status(cls, report_id: str, full_spec: bool) -> "GraphQLQuery[FeederLoadAnalysisReportFields, FeederLoadAnalysisReportGraphQLField]": + pass + + @classmethod + def get_ingestor_run(cls, id: int) -> "GraphQLQuery[IngestionRunFields, IngestionRunGraphQLField]": + pass + + @classmethod + def list_ingestor_runs(cls, *, filter_: Optional[IngestorRunsFilterInput]=None, sort: Optional[IngestorRunsSortCriteriaInput]=None) -> "GraphQLQuery[IngestionRunFields, IngestionRunGraphQLField]": + pass + + @classmethod + def list_ingestor_runs_paged(cls, *, limit: Optional[int]=None, offset: Optional[Any]=None, filter_: Optional[IngestorRunsFilterInput]=None, sort: Optional[IngestorRunsSortCriteriaInput]=None) -> "GraphQLQuery[IngestorRunPageFields, IngestorRunPageGraphQLField]": + pass + + @classmethod + def paged_open_dss_models(cls, *, limit: Optional[int]=None, offset: Optional[Any]=None, filter_: Optional[GetOpenDssModelsFilterInput]=None, sort: Optional[GetOpenDssModelsSortCriteriaInput]=None) -> "GraphQLQuery[OpenDssModelPageFields, OpenDssModelPageGraphQLField]": + pass + + @classmethod + def get_user_permitted_customer_list_column_config(cls) -> "GraphQLQuery[UserCustomerListColumnConfigFields, UserCustomerListColumnConfigGraphQLField]": + pass + + @classmethod + def get_user_saved_customer_list_column_config(cls) -> "GraphQLQuery[UserCustomerListColumnConfigFields, UserCustomerListColumnConfigGraphQLField]": + pass + + @classmethod + def get_customer_list(cls, m_ri_ds: list[str]) -> "GraphQLQuery[CustomerDetailsResponseFields, CustomerDetailsResponseGraphQLField]": + pass + + @classmethod + def get_customer_list_by_nmis(cls, nmis: list[str]) -> "GraphQLQuery[CustomerDetailsResponseFields, CustomerDetailsResponseGraphQLField]": + pass + + @classmethod + def get_app_options(cls) -> "GraphQLQuery[AppOptionsFields, AppOptionsGraphQLField]": + pass + + @classmethod + def get_presigned_upload_url_for_variant(cls, filename: str, file_type: VariantFileType) -> "GraphQLQuery[UploadUrlResponseFields, UploadUrlResponseGraphQLField]": + pass + + @classmethod + def get_variant_upload_info(cls, job_id: str) -> "GraphQLQuery[VariantWorkPackageFields, VariantWorkPackageGraphQLField]": + pass \ No newline at end of file diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_queries.pyi.bak b/src/zepben/eas/lib/generated_graphql_client/custom_queries.pyi.bak new file mode 100644 index 0000000..341d379 --- /dev/null +++ b/src/zepben/eas/lib/generated_graphql_client/custom_queries.pyi.bak @@ -0,0 +1,24 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. +from _typeshed import Incomplete +from typing import Generic, TypeVar, Optional + +from zepben.eas import IngestionRunGraphQLField, IngestionRunFields, IngestorRunsFilterInput, IngestorRunsSortCriteriaInput + +TGraphQLQueryField = TypeVar("TGraphQLQueryField") +TGraphQLField = TypeVar("TGraphQLField") + +def __getattr__(name) -> Incomplete: ... + +class GraphQLQuery(Generic[TGraphQLQueryField, TGraphQLField]): + def fields(self, *fields: TGraphQLField): ... + + +class Query: + def __getattr__(self, name: str) -> Incomplete: ... + + @classmethod + def list_ingestor_runs(cls, *, filter_: Optional[IngestorRunsFilterInput] = None, sort: Optional[IngestorRunsSortCriteriaInput] = None ) -> GraphQLQuery[IngestionRunFields, IngestionRunGraphQLField]: ... diff --git a/test/test_integration_testing.py b/test/test_integration_testing.py index c359bb2..5615ff7 100644 --- a/test/test_integration_testing.py +++ b/test/test_integration_testing.py @@ -3,14 +3,17 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. +import ast import asyncio -from typing import TypeVar +from typing import TypeVar, TYPE_CHECKING import pytest from zepben.eas import EasClient, OpenDssModelInput, OpenDssModelGenerationSpecInput, OpenDssModelOptionsInput, \ - OpenDssModulesConfigInput, OpenDssCommonConfigInput, Query, IngestionRunFields, GraphQLQuery, IngestionJobFields + OpenDssModulesConfigInput, OpenDssCommonConfigInput, Query, IngestionRunFields, IngestionJobFields +if TYPE_CHECKING: + from zepben.eas import GraphQLQuery @pytest.mark.skip("Local testing if you really want it...") def test_can_connect_to_local_eas_non_async(): @@ -64,11 +67,70 @@ async def test_can_connect_to_local_eas_async_calling_func(): R = TypeVar("R") -def my_query(query: GraphQLQuery[T, R], field: R, *additional_fields: R) -> T: +def my_query(query: "GraphQLQuery[T, R]", field: R, *additional_fields: R) -> T: return query.fields(field, *additional_fields) +ast.ClassDef( + "MyClass", + [], + [], + [ + ast.FunctionDef( + "MyFunction", + ast.arguments( + posonlyargs=[], + args=[], + vararg=None, + kwonlyargs=[], + kw_defaults=[], + defaults=[], + ), + [], + [], + None, + ) + ], + [], +) def test_do_things(): + from zepben.eas.lib.generated_graphql_client import custom_queries + with open(custom_queries.__file__) as f: + orig_ast = ast.parse( + f.read(), + ) + n = None + extra_imports = [] + + for i, b in enumerate(orig_ast.body): + if isinstance(b, ast.ClassDef): + if n is None: + n = i + for func in b.body: + if isinstance(func, ast.FunctionDef): + func.body = [ast.Pass()] + extra_imports.append(func.returns.id.replace("Fields", "GraphQLField")) + func.returns = ast.Name(f'\"GraphQLQuery[{func.returns.id}, {func.returns.id.replace("Fields", "GraphQLField")}]\"') + + orig_ast.body.insert(n, ast.parse( + """ +class GraphQLQuery(Generic[TGraphQLQueryField, TGraphQLField]): + def fields(self, *fields: TGraphQLField): ... + """ + ).body[0]) + orig_ast.body.insert(n, ast.parse( + 'TGraphQLField = TypeVar("TGraphQLField")' + ).body[0]) + orig_ast.body.insert(n, ast.parse( + 'TGraphQLQueryField = TypeVar("TGraphQLQueryField")' + ).body[0]) + + orig_ast.body.insert(n, ast.parse("from typing import Generic, TypeVar").body[0]) + orig_ast.body.insert(n, ast.parse(f"from zepben.eas import {', '.join(extra_imports)}").body[0]) + + with open(custom_queries.__file__ + 'i', 'w') as f: + f.write(ast.unparse(orig_ast)) + my_query(Query.list_ingestor_runs(filter_=None, sort=None), IngestionRunFields.completed_at, IngestionRunFields.status) my_query(Query.list_ingestor_runs(filter_=None, sort=None), IngestionJobFields.application, 1) - my_query(Query.list_ingestor_runs(filter_=None, sort=None)) + # my_query(Query.list_ingestor_runs(filter_=None, sort=None)) From 58ca2fddf86f50fd02f476c9ea617f420e7d3630 Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Tue, 31 Mar 2026 14:52:16 +1100 Subject: [PATCH 26/32] try that Signed-off-by: Max Chesterfield --- src/zepben/eas/client/eas_client.py | 13 ++++++--- test/test_integration_testing.py | 43 ++++++----------------------- 2 files changed, 18 insertions(+), 38 deletions(-) diff --git a/src/zepben/eas/client/eas_client.py b/src/zepben/eas/client/eas_client.py index 4cd1c1c..8438c90 100644 --- a/src/zepben/eas/client/eas_client.py +++ b/src/zepben/eas/client/eas_client.py @@ -21,7 +21,7 @@ from datetime import datetime from http import HTTPStatus from types import MethodType -from typing import Any, Generator, cast +from typing import Any, Generator, cast, TypeVar, TYPE_CHECKING import httpx from graphql import OperationType @@ -38,6 +38,12 @@ from zepben.eas.lib.generated_graphql_client.custom_mutations import Mutation from zepben.eas.lib.generated_graphql_client.custom_queries import Query +if TYPE_CHECKING: + from zepben.eas import GraphQLQuery + +T = TypeVar("T") +R = TypeVar("R") + # noinspection PyDecorator,PyNestedDecorators @add_method_to(GraphQLField) @@ -130,10 +136,9 @@ def __init__( async def close(self): await self.http_client.aclose() - @async_func - async def query(self, *fields: GraphQLField, operation_name: str = None) -> dict[str, Any]: + async def do_query(self, query: GraphQLQuery[T, R], field: R, *additional_fields: R, operation_name: str = None) -> T: """Execute a query against the Evolve App Server.""" - return await super().query(*fields, operation_name=operation_name) + return await super().query(query.fields(field, *additional_fields), operation_name=operation_name) @async_func async def mutation(self, *fields: GraphQLField, operation_name: str = None) -> dict[str, Any]: diff --git a/test/test_integration_testing.py b/test/test_integration_testing.py index 5615ff7..f20fbf8 100644 --- a/test/test_integration_testing.py +++ b/test/test_integration_testing.py @@ -62,38 +62,8 @@ async def test_can_connect_to_local_eas_async_calling_func(): ) )) - -T = TypeVar("T") -R = TypeVar("R") - - -def my_query(query: "GraphQLQuery[T, R]", field: R, *additional_fields: R) -> T: - return query.fields(field, *additional_fields) - -ast.ClassDef( - "MyClass", - [], - [], - [ - ast.FunctionDef( - "MyFunction", - ast.arguments( - posonlyargs=[], - args=[], - vararg=None, - kwonlyargs=[], - kw_defaults=[], - defaults=[], - ), - [], - [], - None, - ) - ], - [], -) - -def test_do_things(): +@pytest.mark.asyncio +async def test_do_things(): from zepben.eas.lib.generated_graphql_client import custom_queries with open(custom_queries.__file__) as f: orig_ast = ast.parse( @@ -131,6 +101,11 @@ def fields(self, *fields: TGraphQLField): ... with open(custom_queries.__file__ + 'i', 'w') as f: f.write(ast.unparse(orig_ast)) - my_query(Query.list_ingestor_runs(filter_=None, sort=None), IngestionRunFields.completed_at, IngestionRunFields.status) - my_query(Query.list_ingestor_runs(filter_=None, sort=None), IngestionJobFields.application, 1) + client = EasClient( + host="localhost", + port=7654, + asynchronous=True + ) + await client.do_query(Query.list_ingestor_runs(filter_=None, sort=None), IngestionRunFields.completed_at, IngestionRunFields.status) + await client.do_query(Query.list_ingestor_runs(filter_=None, sort=None), IngestionJobFields.application, 1) # my_query(Query.list_ingestor_runs(filter_=None, sort=None)) From 375a30d74c668ae05143ea7de73e38d62a0fdfa4 Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Wed, 1 Apr 2026 11:59:44 +1100 Subject: [PATCH 27/32] :shrug: anthony said "it is that simple" Signed-off-by: Max Chesterfield --- pyproject.toml | 3 + src/zepben/eas/generate_client.py | 170 ++++ .../eas/lib/ariadne_plugins/__init__.py | 5 + .../custom_query_type_hinter.py | 44 + .../lib/generated_graphql_client/__init__.py | 6 - .../async_base_client.py | 20 +- .../generated_graphql_client/base_model.py | 6 - .../base_operation.py | 6 - .../lib/generated_graphql_client/client.py | 8 +- .../generated_graphql_client/custom_fields.py | 44 +- .../custom_mutations.py | 18 +- .../custom_queries.py | 929 +++++------------- .../custom_typing_fields.py | 6 - .../eas/lib/generated_graphql_client/enums.py | 6 - .../generated_graphql_client/exceptions.py | 6 - .../generated_graphql_client/input_types.py | 6 - test/test_client_generation.py | 47 + test/test_integration_testing.py | 40 +- 18 files changed, 561 insertions(+), 809 deletions(-) create mode 100644 src/zepben/eas/generate_client.py create mode 100644 src/zepben/eas/lib/ariadne_plugins/__init__.py create mode 100644 src/zepben/eas/lib/ariadne_plugins/custom_query_type_hinter.py create mode 100644 test/test_client_generation.py diff --git a/pyproject.toml b/pyproject.toml index 2162967..26d6aa2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -64,3 +64,6 @@ target_package_path='src/zepben/eas/lib' target_package_name='generated_graphql_client' introspection_descriptions=true introspection_input_value_deprecations=true +plugins=[ + "zepben.eas.lib.ariadne_plugins.custom_query_type_hinter.CustomQueryTypeHinterPlugin" +] diff --git a/src/zepben/eas/generate_client.py b/src/zepben/eas/generate_client.py new file mode 100644 index 0000000..d140536 --- /dev/null +++ b/src/zepben/eas/generate_client.py @@ -0,0 +1,170 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. +import ast +import sys +from typing import Optional + +import click +import ariadne_codegen.client_generators.custom_operation +from ariadne_codegen.plugins.manager import PluginManager +from graphql import assert_valid_schema + + +class ZBPatchedPluginManager(PluginManager): + def generate_custom_module(self, module: ast.Module) -> ast.Module: + return self._apply_plugins_on_object("generate_custom_module", module) + + def generate_custom_method(self, module: ast.FunctionDef) -> ast.FunctionDef: + return self._apply_plugins_on_object("generate_custom_method", module) + + +class ZBPatchedCustomOperationGenerator(ariadne_codegen.client_generators.custom_operation.CustomOperationGenerator): + plugin_manager: ZBPatchedPluginManager + + def _generate_method( + self, operation_name: str, operation_args, final_type, description: Optional[str] = None + ) -> ast.FunctionDef: + return self.plugin_manager.generate_custom_method( + super()._generate_method( + operation_name, + operation_args, + final_type, + description, + ) + ) + + def generate(self) -> ast.Module: + return self.plugin_manager.generate_custom_module( + super().generate( + ) + ) + +ariadne_codegen.client_generators.custom_operation.CustomOperationGenerator = ZBPatchedCustomOperationGenerator + +from ariadne_codegen.client_generators.package import get_package_generator +from ariadne_codegen.config import get_client_settings, get_config_dict, get_graphql_schema_settings +from ariadne_codegen.graphql_schema_generators.schema import ( + generate_graphql_schema_graphql_file, + generate_graphql_schema_python_file, +) +from ariadne_codegen.plugins.explorer import get_plugins_types +from ariadne_codegen.schema import ( + add_mixin_directive_to_schema, + filter_fragments_definitions, + filter_operations_definitions, + get_graphql_queries, + get_graphql_schema_from_path, + get_graphql_schema_from_url, +) +from ariadne_codegen.settings import Strategy, get_validation_rule + + +@click.command() +@click.version_option() +@click.option("--config", default=None, help="Path to custom configuration file.") +@click.argument( + "strategy", + default=Strategy.CLIENT.value, + type=click.Choice([e.value for e in Strategy]), + required=False, +) +def main(strategy=Strategy.CLIENT.value, config=None): + config_dict = get_config_dict(config) + if strategy == Strategy.CLIENT: + client(config_dict) + + if strategy == Strategy.GRAPHQL_SCHEMA: + graphql_schema(config_dict) + + +def client(config_dict): + settings = get_client_settings(config_dict) + + if settings.schema_path: + schema = get_graphql_schema_from_path(settings.schema_path) + else: + schema = get_graphql_schema_from_url( + url=settings.remote_schema_url, + headers=settings.remote_schema_headers, + verify_ssl=settings.remote_schema_verify_ssl, + timeout=settings.remote_schema_timeout, + introspection_settings=settings.introspection_settings, + ) + + plugin_manager = ZBPatchedPluginManager( + schema=schema, + config_dict=config_dict, + plugins_types=get_plugins_types(settings.plugins), + ) + schema = add_mixin_directive_to_schema(schema) + schema = plugin_manager.process_schema(schema) + assert_valid_schema(schema) + + fragments = [] + queries = [] + if settings.queries_path: + definitions = get_graphql_queries( + settings.queries_path, + schema, + [get_validation_rule(e) for e in settings.skip_validation_rules], + ) + queries = filter_operations_definitions(definitions) + fragments = filter_fragments_definitions(definitions) + + sys.stdout.write(settings.used_settings_message) + + package_generator = get_package_generator( + schema=schema, + fragments=fragments, + settings=settings, + plugin_manager=plugin_manager, + ) + for query in queries: + package_generator.add_operation(query) + generated_files = package_generator.generate() + + sys.stdout.write("\nGenerated files:\n " + "\n ".join(generated_files) + "\n") + + +def graphql_schema(config_dict): + settings = get_graphql_schema_settings(config_dict) + + schema = ( + get_graphql_schema_from_path(settings.schema_path) + if settings.schema_path + else get_graphql_schema_from_url( + url=settings.remote_schema_url, + headers=settings.remote_schema_headers, + verify_ssl=settings.remote_schema_verify_ssl, + timeout=settings.remote_schema_timeout, + introspection_settings=settings.introspection_settings, + ) + ) + plugin_manager = ZBPatchedPluginManager( + schema=schema, + config_dict=config_dict, + plugins_types=get_plugins_types(settings.plugins), + ) + schema = plugin_manager.process_schema(schema) + assert_valid_schema(schema) + + sys.stdout.write(settings.used_settings_message) + + if settings.target_file_format == "py": + generate_graphql_schema_python_file( + schema=schema, + target_file_path=settings.target_file_path, + type_map_name=settings.type_map_variable_name, + schema_variable_name=settings.schema_variable_name, + ) + else: + generate_graphql_schema_graphql_file( + schema=schema, + target_file_path=settings.target_file_path, + ) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/zepben/eas/lib/ariadne_plugins/__init__.py b/src/zepben/eas/lib/ariadne_plugins/__init__.py new file mode 100644 index 0000000..4a7146f --- /dev/null +++ b/src/zepben/eas/lib/ariadne_plugins/__init__.py @@ -0,0 +1,5 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. diff --git a/src/zepben/eas/lib/ariadne_plugins/custom_query_type_hinter.py b/src/zepben/eas/lib/ariadne_plugins/custom_query_type_hinter.py new file mode 100644 index 0000000..287c065 --- /dev/null +++ b/src/zepben/eas/lib/ariadne_plugins/custom_query_type_hinter.py @@ -0,0 +1,44 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + +import ast +from ariadne_codegen.plugins.base import Plugin + +class CustomQueryTypeHinterPlugin(Plugin): + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self._current_class = None + self._all_imports = set() + + def generate_custom_method(self, method: ast.FunctionDef) -> ast.FunctionDef: + return method + + def generate_custom_module(self, module: ast.Module, **kwargs) -> ast.Module: + for b in module.body: + if isinstance((class_def := b), ast.ClassDef): + # 1. Target a specific class (e.g., the root 'Query' result) + if class_def.name == "Query": + for method in class_def.body: + method.body = [ast.Pass()] + method.returns = ast.Name( + f'\"GraphQLQuery[{method.returns.id}, {method.returns.id.replace("Fields", "GraphQLField")}]\"' + ) + + print(next(b for b in module.body if isinstance(b, ast.ClassDef)).name) + return module + + def generate_client_import(self, import_: ast.ImportFrom) -> ast.ImportFrom: + if (iname := import_.names[0].name) in ( + 'SincalFileType', + 'VariantFileType', + 'ContainerType', + 'HostingCapacityFileType', + 'WorkflowStatus', + ): + if import_.module is None: + print(f"[ZBEX] Assuming class import {iname} is from module 'enums.py'") + import_.module = 'enums' + return import_ diff --git a/src/zepben/eas/lib/generated_graphql_client/__init__.py b/src/zepben/eas/lib/generated_graphql_client/__init__.py index dac28ac..24681c4 100644 --- a/src/zepben/eas/lib/generated_graphql_client/__init__.py +++ b/src/zepben/eas/lib/generated_graphql_client/__init__.py @@ -1,9 +1,3 @@ -# Copyright 2026 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. - from .async_base_client import AsyncBaseClient from .base_model import BaseModel, Upload from .client import Client diff --git a/src/zepben/eas/lib/generated_graphql_client/async_base_client.py b/src/zepben/eas/lib/generated_graphql_client/async_base_client.py index b212a17..c642b77 100644 --- a/src/zepben/eas/lib/generated_graphql_client/async_base_client.py +++ b/src/zepben/eas/lib/generated_graphql_client/async_base_client.py @@ -1,9 +1,3 @@ -# Copyright 2026 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. - import asyncio import enum import json @@ -341,15 +335,19 @@ async def _send_subscribe( operation_name: Optional[str] = None, variables: Optional[dict[str, Any]] = None, ) -> None: - payload: dict[str, Any] = { - "id": operation_id, - "type": GraphQLTransportWSMessageType.SUBSCRIBE.value, - "payload": {"query": query, "operationName": operation_name}, + payload_inner: dict[str, Any] = { + "query": query, + "operationName": operation_name, } if variables: - payload["payload"]["variables"] = self._convert_dict_to_json_serializable( + payload_inner["variables"] = self._convert_dict_to_json_serializable( variables ) + payload: dict[str, Any] = { + "id": operation_id, + "type": GraphQLTransportWSMessageType.SUBSCRIBE.value, + "payload": payload_inner, + } await websocket.send(json.dumps(payload)) async def _handle_ws_message( diff --git a/src/zepben/eas/lib/generated_graphql_client/base_model.py b/src/zepben/eas/lib/generated_graphql_client/base_model.py index 2086ef4..68e2f9e 100644 --- a/src/zepben/eas/lib/generated_graphql_client/base_model.py +++ b/src/zepben/eas/lib/generated_graphql_client/base_model.py @@ -1,9 +1,3 @@ -# Copyright 2026 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. - from io import IOBase from pydantic import BaseModel as PydanticBaseModel diff --git a/src/zepben/eas/lib/generated_graphql_client/base_operation.py b/src/zepben/eas/lib/generated_graphql_client/base_operation.py index ea19037..65708d7 100644 --- a/src/zepben/eas/lib/generated_graphql_client/base_operation.py +++ b/src/zepben/eas/lib/generated_graphql_client/base_operation.py @@ -1,9 +1,3 @@ -# Copyright 2026 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. - from typing import Any, Optional, Union from graphql import ( diff --git a/src/zepben/eas/lib/generated_graphql_client/client.py b/src/zepben/eas/lib/generated_graphql_client/client.py index f4c4d83..fe0a69f 100644 --- a/src/zepben/eas/lib/generated_graphql_client/client.py +++ b/src/zepben/eas/lib/generated_graphql_client/client.py @@ -1,9 +1,3 @@ -# Copyright 2026 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. - from typing import Any from graphql import ( @@ -109,5 +103,5 @@ async def mutation( return await self.execute_custom_operation( *fields, operation_type=OperationType.MUTATION, - operation_name=operation_name + operation_name=operation_name, ) diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_fields.py b/src/zepben/eas/lib/generated_graphql_client/custom_fields.py index 8851350..d7bb87b 100644 --- a/src/zepben/eas/lib/generated_graphql_client/custom_fields.py +++ b/src/zepben/eas/lib/generated_graphql_client/custom_fields.py @@ -1,9 +1,3 @@ -# Copyright 2026 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. - from typing import Any, Optional, Union from . import SerializationType @@ -214,7 +208,7 @@ def customer_details(cls) -> "CustomerDetailsFields": def fields( self, - *subfields: Union[CustomerDetailsResponseGraphQLField, "CustomerDetailsFields"] + *subfields: Union[CustomerDetailsResponseGraphQLField, "CustomerDetailsFields"], ) -> "CustomerDetailsResponseFields": """Subfields should come from the CustomerDetailsResponseFields class""" self._subfields.extend(subfields) @@ -293,7 +287,7 @@ def duration_curve(cls) -> "DurationCurveFields": def fields( self, - *subfields: Union[DurationCurveByTerminalGraphQLField, "DurationCurveFields"] + *subfields: Union[DurationCurveByTerminalGraphQLField, "DurationCurveFields"], ) -> "DurationCurveByTerminalFields": """Subfields should come from the DurationCurveByTerminalFields class""" self._subfields.extend(subfields) @@ -393,7 +387,7 @@ def fields( self, *subfields: Union[ FeederLoadAnalysisReportGraphQLField, "FeederLoadAnalysisSpecFields" - ] + ], ) -> "FeederLoadAnalysisReportFields": """Subfields should come from the FeederLoadAnalysisReportFields class""" self._subfields.extend(subfields) @@ -487,7 +481,7 @@ def fields( GeoJsonFeatureGraphQLField, "GeoJsonGeometryFields", "GeoJsonPropertiesFields", - ] + ], ) -> "GeoJsonFeatureFields": """Subfields should come from the GeoJsonFeatureFields class""" self._subfields.extend(subfields) @@ -739,7 +733,7 @@ def fields( self, *subfields: Union[ HcScenarioConfigsPageGraphQLField, "ScenarioConfigurationFields" - ] + ], ) -> "HcScenarioConfigsPageFields": """Subfields should come from the HcScenarioConfigsPageFields class""" self._subfields.extend(subfields) @@ -788,7 +782,7 @@ def fields( HcWorkPackageGraphQLField, "GqlUserFields", "WorkPackageProgressDetailsFields", - ] + ], ) -> "HcWorkPackageFields": """Subfields should come from the HcWorkPackageFields class""" self._subfields.extend(subfields) @@ -1082,7 +1076,7 @@ def fields( self, *subfields: Union[ OpportunityGraphQLField, "EquipmentFields", "GeoJsonFeatureFields" - ] + ], ) -> "OpportunityFields": """Subfields should come from the OpportunityFields class""" self._subfields.extend(subfields) @@ -1133,7 +1127,7 @@ def fields( self, *subfields: Union[ PowerFactoryModelGraphQLField, "PowerFactoryModelGenerationSpecFields" - ] + ], ) -> "PowerFactoryModelFields": """Subfields should come from the PowerFactoryModelFields class""" self._subfields.extend(subfields) @@ -1170,7 +1164,7 @@ def fields( "GqlDistributionTransformerConfigFields", "GqlLoadConfigFields", "GqlScenarioConfigFields", - ] + ], ) -> "PowerFactoryModelGenerationSpecFields": """Subfields should come from the PowerFactoryModelGenerationSpecFields class""" self._subfields.extend(subfields) @@ -1196,7 +1190,7 @@ def power_factory_models(cls) -> "PowerFactoryModelFields": def fields( self, - *subfields: Union[PowerFactoryModelPageGraphQLField, "PowerFactoryModelFields"] + *subfields: Union[PowerFactoryModelPageGraphQLField, "PowerFactoryModelFields"], ) -> "PowerFactoryModelPageFields": """Subfields should come from the PowerFactoryModelPageFields class""" self._subfields.extend(subfields) @@ -1231,7 +1225,7 @@ def fields( *subfields: Union[ PowerFactoryModelTemplateGraphQLField, "PowerFactoryModelGenerationSpecFields", - ] + ], ) -> "PowerFactoryModelTemplateFields": """Subfields should come from the PowerFactoryModelTemplateFields class""" self._subfields.extend(subfields) @@ -1259,7 +1253,7 @@ def fields( self, *subfields: Union[ PowerFactoryModelTemplatePageGraphQLField, "PowerFactoryModelTemplateFields" - ] + ], ) -> "PowerFactoryModelTemplatePageFields": """Subfields should come from the PowerFactoryModelTemplatePageFields class""" self._subfields.extend(subfields) @@ -1472,7 +1466,7 @@ def fields( self, *subfields: Union[ SincalGlobalInputsConfigGraphQLField, "SincalConfigFileFields" - ] + ], ) -> "SincalGlobalInputsConfigFields": """Subfields should come from the SincalGlobalInputsConfigFields class""" self._subfields.extend(subfields) @@ -1500,7 +1494,7 @@ def generation_spec(cls) -> "SincalModelGenerationSpecFields": def fields( self, - *subfields: Union[SincalModelGraphQLField, "SincalModelGenerationSpecFields"] + *subfields: Union[SincalModelGraphQLField, "SincalModelGenerationSpecFields"], ) -> "SincalModelFields": """Subfields should come from the SincalModelFields class""" self._subfields.extend(subfields) @@ -1577,7 +1571,7 @@ def fields( self, *subfields: Union[ SincalModelPresetGraphQLField, "SincalModelGenerationSpecFields" - ] + ], ) -> "SincalModelPresetFields": """Subfields should come from the SincalModelPresetFields class""" self._subfields.extend(subfields) @@ -1603,7 +1597,7 @@ def presets(cls) -> "SincalModelPresetFields": def fields( self, - *subfields: Union[SincalModelPresetPageGraphQLField, "SincalModelPresetFields"] + *subfields: Union[SincalModelPresetPageGraphQLField, "SincalModelPresetFields"], ) -> "SincalModelPresetPageFields": """Subfields should come from the SincalModelPresetPageFields class""" self._subfields.extend(subfields) @@ -1705,7 +1699,7 @@ def fields( "GeoJsonOverlayFields", "ResultSectionInterface", "StateOverlayFields", - ] + ], ) -> "StudyResultFields": """Subfields should come from the StudyResultFields class""" self._subfields.extend(subfields) @@ -1783,7 +1777,7 @@ def fields( self, *subfields: Union[ UserCustomerListColumnConfigGraphQLField, "CustomerListColumnConfigFields" - ] + ], ) -> "UserCustomerListColumnConfigFields": """Subfields should come from the UserCustomerListColumnConfigFields class""" self._subfields.extend(subfields) @@ -1933,7 +1927,7 @@ def fields( WorkPackageProgressDetailsGraphQLField, "WorkPackageModelGroupingsFields", "WorkPackageModelTotalsFields", - ] + ], ) -> "WorkPackageProgressDetailsFields": """Subfields should come from the WorkPackageProgressDetailsFields class""" self._subfields.extend(subfields) diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py b/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py index 4d5953d..e8306e0 100644 --- a/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py +++ b/src/zepben/eas/lib/generated_graphql_client/custom_mutations.py @@ -1,9 +1,3 @@ -# Copyright 2026 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. - from typing import Any, Optional from .custom_fields import ( @@ -12,6 +6,7 @@ UserCustomerListColumnConfigFields, ) from .custom_typing_fields import GraphQLField +from .enums import SincalFileType, VariantFileType from .input_types import ( AppOptionsInput, FeederLoadAnalysisInput, @@ -25,7 +20,6 @@ StudyInput, WorkPackageInput, ) -from . import SincalFileType, VariantFileType class Mutation: @@ -150,7 +144,7 @@ def edit_diff_package( diff_id: str, *, name: Optional[str] = None, - description: Optional[str] = None + description: Optional[str] = None, ) -> GraphQLField: """Edits a hosting capacity diff package and return boolean. Returns "true" on successful update""" arguments: dict[str, dict[str, Any]] = { @@ -169,7 +163,7 @@ def edit_work_package( work_package_id: str, *, name: Optional[str] = None, - description: Optional[str] = None + description: Optional[str] = None, ) -> GraphQLField: """Edits a hosting capacity work package and return boolean. Returns "true" on successful update""" arguments: dict[str, dict[str, Any]] = { @@ -194,7 +188,7 @@ def generate_enhanced_network_performance_diff( feeder: Optional[str] = None, year: Optional[int] = None, season: Optional[str] = None, - time_of_day: Optional[str] = None + time_of_day: Optional[str] = None, ) -> DiffResultFields: """Generate and store the differences of enhanced network performance metrics between two work packages and returns the number of entries generated with the ID of this diff package.""" arguments: dict[str, dict[str, Any]] = { @@ -226,7 +220,7 @@ def generate_network_performance_diff( diff_name: Optional[str] = None, scenario: Optional[str] = None, feeder: Optional[str] = None, - year: Optional[int] = None + year: Optional[int] = None, ) -> DiffResultFields: """Generate and store the differences of network performance metrics between two work packages and returns the number of entries generated with the ID of this diff package.""" arguments: dict[str, dict[str, Any]] = { @@ -265,7 +259,7 @@ def run_calibration( *, calibration_time_local: Optional[Any] = None, feeders: Optional[list[str]] = None, - generator_config: Optional[HcGeneratorConfigInput] = None + generator_config: Optional[HcGeneratorConfigInput] = None, ) -> GraphQLField: """Runs a calibration and returns a run ID.""" arguments: dict[str, dict[str, Any]] = { diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_queries.py b/src/zepben/eas/lib/generated_graphql_client/custom_queries.py index 6f6e290..67de0a4 100644 --- a/src/zepben/eas/lib/generated_graphql_client/custom_queries.py +++ b/src/zepben/eas/lib/generated_graphql_client/custom_queries.py @@ -1,9 +1,3 @@ -# Copyright 2026 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. - from typing import Any, Optional from .custom_fields import ( @@ -49,6 +43,13 @@ WorkPackageTreeFields, ) from .custom_typing_fields import GraphQLField +from .enums import ( + ContainerType, + HostingCapacityFileType, + SincalFileType, + VariantFileType, + WorkflowStatus, +) from .input_types import ( GetOpenDssModelsFilterInput, GetOpenDssModelsSortCriteriaInput, @@ -71,7 +72,6 @@ ProcessedDiffSortCriteriaInput, WorkPackageInput, ) -from . import HostingCapacityFileType, ContainerType, SincalFileType, VariantFileType, WorkflowStatus class Query: @@ -82,62 +82,37 @@ def paged_studies( limit: Optional[int] = None, offset: Optional[Any] = None, filter_: Optional[GetStudiesFilterInput] = None, - sort: Optional[GetStudiesSortCriteriaInput] = None - ) -> StudyPageFields: - """Retrieve a page of studies, with optional limit and offset, and optional filtering""" - arguments: dict[str, dict[str, Any]] = { - "limit": {"type": "Int", "value": limit}, - "offset": {"type": "Long", "value": offset}, - "filter": {"type": "GetStudiesFilterInput", "value": filter_}, - "sort": {"type": "GetStudiesSortCriteriaInput", "value": sort}, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return StudyPageFields(field_name="pagedStudies", arguments=cleared_arguments) - - @classmethod - def results_by_id(cls, ids: list[str]) -> StudyResultFields: - """Retrieve a list of results by IDs""" - arguments: dict[str, dict[str, Any]] = {"ids": {"type": "[ID!]!", "value": ids}} - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return StudyResultFields(field_name="resultsById", arguments=cleared_arguments) - - @classmethod - def studies(cls, *, filter_: Optional[GetStudiesFilterInput] = None) -> StudyFields: - """Retrieve a list of studies, with optional filtering""" - arguments: dict[str, dict[str, Any]] = { - "filter": {"type": "GetStudiesFilterInput", "value": filter_} - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return StudyFields(field_name="studies", arguments=cleared_arguments) - - @classmethod - def studies_by_id(cls, ids: list[str]) -> StudyFields: - """Retrieve a list of studies by IDs""" - arguments: dict[str, dict[str, Any]] = {"ids": {"type": "[ID!]!", "value": ids}} - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return StudyFields(field_name="studiesById", arguments=cleared_arguments) - - @classmethod - def styles_by_id(cls, ids: list[str]) -> GraphQLField: - """Retrieve a list of style layers by IDs""" - arguments: dict[str, dict[str, Any]] = {"ids": {"type": "[ID!]!", "value": ids}} - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return GraphQLField(field_name="stylesById", arguments=cleared_arguments) - - @classmethod - def current_user(cls) -> GqlUserResponseFields: - """Get information about the current user""" - return GqlUserResponseFields(field_name="currentUser") + sort: Optional[GetStudiesSortCriteriaInput] = None, + ) -> "GraphQLQuery[StudyPageFields, StudyPageGraphQLField]": + pass + + @classmethod + def results_by_id( + cls, ids: list[str] + ) -> "GraphQLQuery[StudyResultFields, StudyResultGraphQLField]": + pass + + @classmethod + def studies( + cls, *, filter_: Optional[GetStudiesFilterInput] = None + ) -> "GraphQLQuery[StudyFields, StudyGraphQLField]": + pass + + @classmethod + def studies_by_id( + cls, ids: list[str] + ) -> "GraphQLQuery[StudyFields, StudyGraphQLField]": + pass + + @classmethod + def styles_by_id(cls, ids: list[str]) -> "GraphQLQuery[GraphQLField, GraphQLField]": + pass + + @classmethod + def current_user( + cls, + ) -> "GraphQLQuery[GqlUserResponseFields, GqlUserResponseGraphQLField]": + pass @classmethod def paged_power_factory_model_templates( @@ -146,27 +121,9 @@ def paged_power_factory_model_templates( limit: Optional[int] = None, offset: Optional[Any] = None, filter_: Optional[GetPowerFactoryModelTemplatesFilterInput] = None, - sort: Optional[GetPowerFactoryModelTemplatesSortCriteriaInput] = None - ) -> PowerFactoryModelTemplatePageFields: - """Retrieve a page of powerFactoryModel templates, with optional limit and offset, and optional filtering""" - arguments: dict[str, dict[str, Any]] = { - "limit": {"type": "Int", "value": limit}, - "offset": {"type": "Long", "value": offset}, - "filter": { - "type": "GetPowerFactoryModelTemplatesFilterInput", - "value": filter_, - }, - "sort": { - "type": "GetPowerFactoryModelTemplatesSortCriteriaInput", - "value": sort, - }, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return PowerFactoryModelTemplatePageFields( - field_name="pagedPowerFactoryModelTemplates", arguments=cleared_arguments - ) + sort: Optional[GetPowerFactoryModelTemplatesSortCriteriaInput] = None, + ) -> "GraphQLQuery[PowerFactoryModelTemplatePageFields, PowerFactoryModelTemplatePageGraphQLField]": + pass @classmethod def paged_power_factory_models( @@ -175,105 +132,53 @@ def paged_power_factory_models( limit: Optional[int] = None, offset: Optional[Any] = None, filter_: Optional[GetPowerFactoryModelsFilterInput] = None, - sort: Optional[GetPowerFactoryModelsSortCriteriaInput] = None - ) -> PowerFactoryModelPageFields: - """Retrieve a page of powerFactoryModels, with optional limit and offset, and optional filtering""" - arguments: dict[str, dict[str, Any]] = { - "limit": {"type": "Int", "value": limit}, - "offset": {"type": "Long", "value": offset}, - "filter": {"type": "GetPowerFactoryModelsFilterInput", "value": filter_}, - "sort": {"type": "GetPowerFactoryModelsSortCriteriaInput", "value": sort}, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return PowerFactoryModelPageFields( - field_name="pagedPowerFactoryModels", arguments=cleared_arguments - ) - - @classmethod - def power_factory_model_by_id(cls, model_id: str) -> PowerFactoryModelFields: - """Retrieve a powerFactoryModel by ID""" - arguments: dict[str, dict[str, Any]] = { - "modelId": {"type": "ID!", "value": model_id} - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return PowerFactoryModelFields( - field_name="powerFactoryModelById", arguments=cleared_arguments - ) + sort: Optional[GetPowerFactoryModelsSortCriteriaInput] = None, + ) -> "GraphQLQuery[PowerFactoryModelPageFields, PowerFactoryModelPageGraphQLField]": + pass + + @classmethod + def power_factory_model_by_id( + cls, model_id: str + ) -> "GraphQLQuery[PowerFactoryModelFields, PowerFactoryModelGraphQLField]": + pass @classmethod def power_factory_model_template_by_id( cls, template_id: str - ) -> PowerFactoryModelTemplateFields: - """Retrieve a powerFactoryModel template by ID""" - arguments: dict[str, dict[str, Any]] = { - "templateId": {"type": "ID!", "value": template_id} - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return PowerFactoryModelTemplateFields( - field_name="powerFactoryModelTemplateById", arguments=cleared_arguments - ) + ) -> "GraphQLQuery[PowerFactoryModelTemplateFields, PowerFactoryModelTemplateGraphQLField]": + pass @classmethod def power_factory_model_templates_by_ids( cls, template_ids: list[str] - ) -> PowerFactoryModelTemplateFields: - """Retrieve a list of powerFactoryModel templates by IDs""" - arguments: dict[str, dict[str, Any]] = { - "templateIds": {"type": "[ID!]!", "value": template_ids} - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return PowerFactoryModelTemplateFields( - field_name="powerFactoryModelTemplatesByIds", arguments=cleared_arguments - ) + ) -> "GraphQLQuery[PowerFactoryModelTemplateFields, PowerFactoryModelTemplateGraphQLField]": + pass @classmethod def power_factory_models_by_ids( cls, model_ids: list[str] - ) -> PowerFactoryModelFields: - """Retrieve a list of powerFactoryModels by IDs""" - arguments: dict[str, dict[str, Any]] = { - "modelIds": {"type": "[ID!]!", "value": model_ids} - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return PowerFactoryModelFields( - field_name="powerFactoryModelsByIds", arguments=cleared_arguments - ) + ) -> "GraphQLQuery[PowerFactoryModelFields, PowerFactoryModelGraphQLField]": + pass @classmethod - def get_active_work_packages(cls) -> GraphQLField: - """Retrieve a list of currently active (running, scheduled, pending) work packages""" - return GraphQLField(field_name="getActiveWorkPackages") + def get_active_work_packages(cls) -> "GraphQLQuery[GraphQLField, GraphQLField]": + pass @classmethod - def get_all_work_packages_authors(cls) -> GqlUserFields: - """Retrieve all users that have created work packages.""" - return GqlUserFields(field_name="getAllWorkPackagesAuthors") + def get_all_work_packages_authors( + cls, + ) -> "GraphQLQuery[GqlUserFields, GqlUserGraphQLField]": + pass @classmethod - def get_calibration_run(cls, id: str) -> HcCalibrationFields: - """Retrieve calibration run details by ID""" - arguments: dict[str, dict[str, Any]] = {"id": {"type": "ID!", "value": id}} - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return HcCalibrationFields( - field_name="getCalibrationRun", arguments=cleared_arguments - ) + def get_calibration_run( + cls, id: str + ) -> "GraphQLQuery[HcCalibrationFields, HcCalibrationGraphQLField]": + pass @classmethod - def get_calibration_sets(cls) -> GraphQLField: - """Retrieve available distribution transformer tap calibration sets.""" - return GraphQLField(field_name="getCalibrationSets") + def get_calibration_sets(cls) -> "GraphQLQuery[GraphQLField, GraphQLField]": + pass @classmethod def get_duration_curves( @@ -283,74 +188,32 @@ def get_duration_curves( feeder: str, year: int, conducting_equipment_mrid: str, - ) -> DurationCurveByTerminalFields: - """Retrieve duration curves for a single piece of equipment in a specific SYF.""" - arguments: dict[str, dict[str, Any]] = { - "workPackageId": {"type": "String!", "value": work_package_id}, - "scenario": {"type": "String!", "value": scenario}, - "feeder": {"type": "String!", "value": feeder}, - "year": {"type": "Int!", "value": year}, - "conductingEquipmentMrid": { - "type": "String!", - "value": conducting_equipment_mrid, - }, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return DurationCurveByTerminalFields( - field_name="getDurationCurves", arguments=cleared_arguments - ) + ) -> "GraphQLQuery[DurationCurveByTerminalFields, DurationCurveByTerminalGraphQLField]": + pass @classmethod def get_opportunities( cls, *, year: Optional[int] = None - ) -> OpportunitiesByYearFields: - """Retrieve all Opportunities available for a specific year.""" - arguments: dict[str, dict[str, Any]] = {"year": {"type": "Int", "value": year}} - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return OpportunitiesByYearFields( - field_name="getOpportunities", arguments=cleared_arguments - ) - - @classmethod - def get_opportunities_for_equipment(cls, m_rid: str) -> OpportunityFields: - """Retrieve Opportunities by attached conducting equipment mRID.""" - arguments: dict[str, dict[str, Any]] = { - "mRID": {"type": "String!", "value": m_rid} - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return OpportunityFields( - field_name="getOpportunitiesForEquipment", arguments=cleared_arguments - ) - - @classmethod - def get_opportunity(cls, id: str) -> OpportunityFields: - """Retrieve Opportunities by id.""" - arguments: dict[str, dict[str, Any]] = {"id": {"type": "String!", "value": id}} - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return OpportunityFields( - field_name="getOpportunity", arguments=cleared_arguments - ) + ) -> "GraphQLQuery[OpportunitiesByYearFields, OpportunitiesByYearGraphQLField]": + pass + + @classmethod + def get_opportunities_for_equipment( + cls, m_rid: str + ) -> "GraphQLQuery[OpportunityFields, OpportunityGraphQLField]": + pass + + @classmethod + def get_opportunity( + cls, id: str + ) -> "GraphQLQuery[OpportunityFields, OpportunityGraphQLField]": + pass @classmethod def get_opportunity_locations( cls, *, year: Optional[int] = None - ) -> OpportunityLocationFields: - """Retrieve all opportunity locations available for a specific year.""" - arguments: dict[str, dict[str, Any]] = {"year": {"type": "Int", "value": year}} - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return OpportunityLocationFields( - field_name="getOpportunityLocations", arguments=cleared_arguments - ) + ) -> "GraphQLQuery[OpportunityLocationFields, OpportunityLocationGraphQLField]": + pass @classmethod def get_scenario_configurations( @@ -358,20 +221,9 @@ def get_scenario_configurations( *, limit: Optional[int] = None, offset: Optional[Any] = None, - filter_: Optional[HcScenarioConfigsFilterInput] = None - ) -> HcScenarioConfigsPageFields: - """Retrieve a page scenario configurations from the hosting capacity input database.""" - arguments: dict[str, dict[str, Any]] = { - "limit": {"type": "Int", "value": limit}, - "offset": {"type": "Long", "value": offset}, - "filter": {"type": "HcScenarioConfigsFilterInput", "value": filter_}, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return HcScenarioConfigsPageFields( - field_name="getScenarioConfigurations", arguments=cleared_arguments - ) + filter_: Optional[HcScenarioConfigsFilterInput] = None, + ) -> "GraphQLQuery[HcScenarioConfigsPageFields, HcScenarioConfigsPageGraphQLField]": + pass @classmethod def get_transformer_tap_settings( @@ -379,60 +231,27 @@ def get_transformer_tap_settings( calibration_name: str, *, feeder: Optional[str] = None, - transformer_mrid: Optional[str] = None - ) -> GqlTxTapRecordFields: - """Retrieve distribution transformer tap settings from a calibration set in the hosting capacity input database.""" - arguments: dict[str, dict[str, Any]] = { - "calibrationName": {"type": "String!", "value": calibration_name}, - "feeder": {"type": "String", "value": feeder}, - "transformerMrid": {"type": "String", "value": transformer_mrid}, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return GqlTxTapRecordFields( - field_name="getTransformerTapSettings", arguments=cleared_arguments - ) + transformer_mrid: Optional[str] = None, + ) -> "GraphQLQuery[GqlTxTapRecordFields, GqlTxTapRecordGraphQLField]": + pass @classmethod def get_work_package_by_id( cls, id: str, *, with_groupings: Optional[bool] = None - ) -> HcWorkPackageFields: - """Retrieve a hosting capacity work package by ID, withGroupings: Whether to include model groupings in the work package progress details, default value is false""" - arguments: dict[str, dict[str, Any]] = { - "id": {"type": "ID!", "value": id}, - "withGroupings": {"type": "Boolean", "value": with_groupings}, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return HcWorkPackageFields( - field_name="getWorkPackageById", arguments=cleared_arguments - ) - - @classmethod - def get_work_package_cost_estimation(cls, input: WorkPackageInput) -> GraphQLField: - """Returns an estimated cost of the submitted hosting capacity work package.""" - arguments: dict[str, dict[str, Any]] = { - "input": {"type": "WorkPackageInput!", "value": input} - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return GraphQLField( - field_name="getWorkPackageCostEstimation", arguments=cleared_arguments - ) - - @classmethod - def get_work_package_tree(cls, id: str) -> WorkPackageTreeFields: - """Retrieve a work package tree with its ancestors and immediate children.""" - arguments: dict[str, dict[str, Any]] = {"id": {"type": "ID!", "value": id}} - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return WorkPackageTreeFields( - field_name="getWorkPackageTree", arguments=cleared_arguments - ) + ) -> "GraphQLQuery[HcWorkPackageFields, HcWorkPackageGraphQLField]": + pass + + @classmethod + def get_work_package_cost_estimation( + cls, input: WorkPackageInput + ) -> "GraphQLQuery[GraphQLField, GraphQLField]": + pass + + @classmethod + def get_work_package_tree( + cls, id: str + ) -> "GraphQLQuery[WorkPackageTreeFields, WorkPackageTreeGraphQLField]": + pass @classmethod def get_work_packages( @@ -442,38 +261,15 @@ def get_work_packages( offset: Optional[Any] = None, filter_: Optional[HcWorkPackagesFilterInput] = None, sort: Optional[HcWorkPackagesSortCriteriaInput] = None, - with_groupings: Optional[bool] = None - ) -> HcWorkPackagePageFields: - """Retrieve a page of hosting capacity work packages, with optional limit and offset, and optional filtering""" - arguments: dict[str, dict[str, Any]] = { - "limit": {"type": "Int", "value": limit}, - "offset": {"type": "Long", "value": offset}, - "filter": {"type": "HcWorkPackagesFilterInput", "value": filter_}, - "sort": {"type": "HcWorkPackagesSortCriteriaInput", "value": sort}, - "withGroupings": {"type": "Boolean", "value": with_groupings}, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return HcWorkPackagePageFields( - field_name="getWorkPackages", arguments=cleared_arguments - ) + with_groupings: Optional[bool] = None, + ) -> "GraphQLQuery[HcWorkPackagePageFields, HcWorkPackagePageGraphQLField]": + pass @classmethod def hosting_capacity_file_upload_url( cls, filename: str, file_type: HostingCapacityFileType - ) -> UploadUrlResponseFields: - """Generate a pre-signed URL to upload hosting capacity file to the storage location. Returns the pre-signed URL along with the final file path as it will be referenced by EAS""" - arguments: dict[str, dict[str, Any]] = { - "filename": {"type": "String!", "value": filename}, - "fileType": {"type": "HostingCapacityFileType!", "value": file_type}, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return UploadUrlResponseFields( - field_name="hostingCapacityFileUploadUrl", arguments=cleared_arguments - ) + ) -> "GraphQLQuery[UploadUrlResponseFields, UploadUrlResponseGraphQLField]": + pass @classmethod def list_calibration_runs( @@ -481,33 +277,15 @@ def list_calibration_runs( *, name: Optional[str] = None, calibration_time: Optional[Any] = None, - status: Optional[WorkflowStatus] = None - ) -> HcCalibrationFields: - """Retrieve all calibration runs initiated through EAS""" - arguments: dict[str, dict[str, Any]] = { - "name": {"type": "String", "value": name}, - "calibrationTime": {"type": "LocalDateTime", "value": calibration_time}, - "status": {"type": "WorkflowStatus", "value": status}, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return HcCalibrationFields( - field_name="listCalibrationRuns", arguments=cleared_arguments - ) - - @classmethod - def get_processed_diff(cls, diff_id: str) -> ProcessedDiffFields: - """Retrieve processed diff of hosting capacity work packages with diffId""" - arguments: dict[str, dict[str, Any]] = { - "diffId": {"type": "ID!", "value": diff_id} - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return ProcessedDiffFields( - field_name="getProcessedDiff", arguments=cleared_arguments - ) + status: Optional[WorkflowStatus] = None, + ) -> "GraphQLQuery[HcCalibrationFields, HcCalibrationGraphQLField]": + pass + + @classmethod + def get_processed_diff( + cls, diff_id: str + ) -> "GraphQLQuery[ProcessedDiffFields, ProcessedDiffGraphQLField]": + pass @classmethod def get_processed_diffs( @@ -516,70 +294,39 @@ def get_processed_diffs( limit: Optional[int] = None, offset: Optional[Any] = None, filter_: Optional[ProcessedDiffFilterInput] = None, - sort: Optional[ProcessedDiffSortCriteriaInput] = None - ) -> ProcessedDiffPageFields: - """Retrieve a page of processed diffs, with optional limit and offset, and optional filtering""" - arguments: dict[str, dict[str, Any]] = { - "limit": {"type": "Int", "value": limit}, - "offset": {"type": "Long", "value": offset}, - "filter": {"type": "ProcessedDiffFilterInput", "value": filter_}, - "sort": {"type": "ProcessedDiffSortCriteriaInput", "value": sort}, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return ProcessedDiffPageFields( - field_name="getProcessedDiffs", arguments=cleared_arguments - ) - - @classmethod - def get_all_jobs(cls) -> IngestionJobFields: - """Gets the ID and metadata of all ingestion jobs in reverse chronological order.""" - return IngestionJobFields(field_name="getAllJobs") - - @classmethod - def get_distinct_metric_names(cls, job_id: str) -> GraphQLField: - """Gets all possible values of metricName for a specific job.""" - arguments: dict[str, dict[str, Any]] = { - "jobId": {"type": "String!", "value": job_id} - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return GraphQLField( - field_name="getDistinctMetricNames", arguments=cleared_arguments - ) + sort: Optional[ProcessedDiffSortCriteriaInput] = None, + ) -> "GraphQLQuery[ProcessedDiffPageFields, ProcessedDiffPageGraphQLField]": + pass + + @classmethod + def get_all_jobs( + cls, + ) -> "GraphQLQuery[IngestionJobFields, IngestionJobGraphQLField]": + pass + + @classmethod + def get_distinct_metric_names( + cls, job_id: str + ) -> "GraphQLQuery[GraphQLField, GraphQLField]": + pass @classmethod def get_metrics( cls, job_id: str, container_type: ContainerType, container_id: str - ) -> MetricFields: - """Gets the metrics for a network container emitted in an ingestion job.""" - arguments: dict[str, dict[str, Any]] = { - "jobId": {"type": "String!", "value": job_id}, - "containerType": {"type": "ContainerType!", "value": container_type}, - "containerId": {"type": "String!", "value": container_id}, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return MetricFields(field_name="getMetrics", arguments=cleared_arguments) - - @classmethod - def get_newest_job(cls) -> IngestionJobFields: - """Gets the ID and metadata of the newest ingestion job. If no job exists, this returns null.""" - return IngestionJobFields(field_name="getNewestJob") - - @classmethod - def get_sources(cls, job_id: str) -> JobSourceFields: - """Gets the data sources used in an ingestion job.""" - arguments: dict[str, dict[str, Any]] = { - "jobId": {"type": "String!", "value": job_id} - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return JobSourceFields(field_name="getSources", arguments=cleared_arguments) + ) -> "GraphQLQuery[MetricFields, MetricGraphQLField]": + pass + + @classmethod + def get_newest_job( + cls, + ) -> "GraphQLQuery[IngestionJobFields, IngestionJobGraphQLField]": + pass + + @classmethod + def get_sources( + cls, job_id: str + ) -> "GraphQLQuery[JobSourceFields, JobSourceGraphQLField]": + pass @classmethod def paged_sincal_model_presets( @@ -588,21 +335,9 @@ def paged_sincal_model_presets( limit: Optional[int] = None, offset: Optional[Any] = None, filter_: Optional[GetSincalModelPresetsFilterInput] = None, - sort: Optional[GetSincalModelPresetsSortCriteriaInput] = None - ) -> SincalModelPresetPageFields: - """Retrieve a page of sincalModel presets, with optional limit and offset, and optional filtering. A default preset with null ID will also be included in the response, which may result in the number of presets returned exceeding the desired page size (limit).""" - arguments: dict[str, dict[str, Any]] = { - "limit": {"type": "Int", "value": limit}, - "offset": {"type": "Long", "value": offset}, - "filter": {"type": "GetSincalModelPresetsFilterInput", "value": filter_}, - "sort": {"type": "GetSincalModelPresetsSortCriteriaInput", "value": sort}, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return SincalModelPresetPageFields( - field_name="pagedSincalModelPresets", arguments=cleared_arguments - ) + sort: Optional[GetSincalModelPresetsSortCriteriaInput] = None, + ) -> "GraphQLQuery[SincalModelPresetPageFields, SincalModelPresetPageGraphQLField]": + pass @classmethod def paged_sincal_models( @@ -611,188 +346,98 @@ def paged_sincal_models( limit: Optional[int] = None, offset: Optional[Any] = None, filter_: Optional[GetSincalModelsFilterInput] = None, - sort: Optional[GetSincalModelsSortCriteriaInput] = None - ) -> SincalModelPageFields: - """Retrieve a page of sincalModels, with optional limit and offset, and optional filtering""" - arguments: dict[str, dict[str, Any]] = { - "limit": {"type": "Int", "value": limit}, - "offset": {"type": "Long", "value": offset}, - "filter": {"type": "GetSincalModelsFilterInput", "value": filter_}, - "sort": {"type": "GetSincalModelsSortCriteriaInput", "value": sort}, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return SincalModelPageFields( - field_name="pagedSincalModels", arguments=cleared_arguments - ) - - @classmethod - def sincal_model_by_id(cls, model_id: str) -> SincalModelFields: - """Retrieve a sincalModel by ID""" - arguments: dict[str, dict[str, Any]] = { - "modelId": {"type": "ID!", "value": model_id} - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return SincalModelFields( - field_name="sincalModelById", arguments=cleared_arguments - ) + sort: Optional[GetSincalModelsSortCriteriaInput] = None, + ) -> "GraphQLQuery[SincalModelPageFields, SincalModelPageGraphQLField]": + pass + + @classmethod + def sincal_model_by_id( + cls, model_id: str + ) -> "GraphQLQuery[SincalModelFields, SincalModelGraphQLField]": + pass @classmethod def sincal_model_config_upload_url( cls, filename: str, file_type: SincalFileType - ) -> UploadUrlResponseFields: - """Generate a pre-signed URL to upload a sincal configuration file to the input storage location. Returns the pre-signed URL along with the final file path as it will be referenced by EAS. This does not update the sincal configuration. To make use of a newly uploaded configuration file, pass the `filePath` returned by this query to the `updateSincalModelConfigFilePath()` mutation.""" - arguments: dict[str, dict[str, Any]] = { - "filename": {"type": "String!", "value": filename}, - "fileType": {"type": "SincalFileType!", "value": file_type}, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return UploadUrlResponseFields( - field_name="sincalModelConfigUploadUrl", arguments=cleared_arguments - ) - - @classmethod - def sincal_model_global_config(cls) -> SincalGlobalInputsConfigFields: - """Retrieve the current sincalModel input file paths.""" - return SincalGlobalInputsConfigFields(field_name="sincalModelGlobalConfig") - - @classmethod - def sincal_model_preset_by_id(cls, preset_id: str) -> SincalModelPresetFields: - """Retrieve a sincalModel preset by ID""" - arguments: dict[str, dict[str, Any]] = { - "presetId": {"type": "ID!", "value": preset_id} - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return SincalModelPresetFields( - field_name="sincalModelPresetById", arguments=cleared_arguments - ) + ) -> "GraphQLQuery[UploadUrlResponseFields, UploadUrlResponseGraphQLField]": + pass + + @classmethod + def sincal_model_global_config( + cls, + ) -> "GraphQLQuery[SincalGlobalInputsConfigFields, SincalGlobalInputsConfigGraphQLField]": + pass + + @classmethod + def sincal_model_preset_by_id( + cls, preset_id: str + ) -> "GraphQLQuery[SincalModelPresetFields, SincalModelPresetGraphQLField]": + pass @classmethod def sincal_model_presets_by_ids( cls, preset_ids: list[str] - ) -> SincalModelPresetFields: - """Retrieve a list of sincalModel presets by IDs""" - arguments: dict[str, dict[str, Any]] = { - "presetIds": {"type": "[ID!]!", "value": preset_ids} - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return SincalModelPresetFields( - field_name="sincalModelPresetsByIds", arguments=cleared_arguments - ) - - @classmethod - def sincal_models_by_ids(cls, model_ids: list[str]) -> SincalModelFields: - """Retrieve a list of sincalModels by IDs""" - arguments: dict[str, dict[str, Any]] = { - "modelIds": {"type": "[ID!]!", "value": model_ids} - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return SincalModelFields( - field_name="sincalModelsByIds", arguments=cleared_arguments - ) - - @classmethod - def create_machine_api_key(cls, roles: list[str], token_name: str) -> GraphQLField: - """Create a new JWT auth token for a machine with the specified roles.""" - arguments: dict[str, dict[str, Any]] = { - "roles": {"type": "[String!]!", "value": roles}, - "tokenName": {"type": "String!", "value": token_name}, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return GraphQLField( - field_name="createMachineApiKey", arguments=cleared_arguments - ) - - @classmethod - def create_user_api_key(cls, roles: list[str], token_name: str) -> GraphQLField: - """Create the JWT auth token for the current user with specified roles.""" - arguments: dict[str, dict[str, Any]] = { - "roles": {"type": "[String!]!", "value": roles}, - "tokenName": {"type": "String!", "value": token_name}, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return GraphQLField(field_name="createUserApiKey", arguments=cleared_arguments) - - @classmethod - def get_machine_tokens(cls) -> MachineUserFields: - """Gets all machine token users with their username and display name.""" - return MachineUserFields(field_name="getMachineTokens") - - @classmethod - def get_public_geo_view_config(cls) -> GraphQLField: - """Retrieve the GeoViewConfig used to config the EWB public map tile endpoint. Returns NUll if not enabled.""" - return GraphQLField(field_name="getPublicGeoViewConfig") - - @classmethod - def get_all_external_roles(cls) -> GraphQLField: - """Get all external roles from EAS.""" - return GraphQLField(field_name="getAllExternalRoles") - - @classmethod - def get_network_models(cls) -> NetworkModelsFields: - """Get all EWB network models""" - return NetworkModelsFields(field_name="getNetworkModels") + ) -> "GraphQLQuery[SincalModelPresetFields, SincalModelPresetGraphQLField]": + pass + + @classmethod + def sincal_models_by_ids( + cls, model_ids: list[str] + ) -> "GraphQLQuery[SincalModelFields, SincalModelGraphQLField]": + pass + + @classmethod + def create_machine_api_key( + cls, roles: list[str], token_name: str + ) -> "GraphQLQuery[GraphQLField, GraphQLField]": + pass + + @classmethod + def create_user_api_key( + cls, roles: list[str], token_name: str + ) -> "GraphQLQuery[GraphQLField, GraphQLField]": + pass + + @classmethod + def get_machine_tokens( + cls, + ) -> "GraphQLQuery[MachineUserFields, MachineUserGraphQLField]": + pass + + @classmethod + def get_public_geo_view_config(cls) -> "GraphQLQuery[GraphQLField, GraphQLField]": + pass + + @classmethod + def get_all_external_roles(cls) -> "GraphQLQuery[GraphQLField, GraphQLField]": + pass + + @classmethod + def get_network_models( + cls, + ) -> "GraphQLQuery[NetworkModelsFields, NetworkModelsGraphQLField]": + pass @classmethod def get_feeder_load_analysis_report_status( cls, report_id: str, full_spec: bool - ) -> FeederLoadAnalysisReportFields: - """Retrieve the status of a feeder load analysis job.""" - arguments: dict[str, dict[str, Any]] = { - "reportId": {"type": "ID!", "value": report_id}, - "fullSpec": {"type": "Boolean!", "value": full_spec}, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return FeederLoadAnalysisReportFields( - field_name="getFeederLoadAnalysisReportStatus", arguments=cleared_arguments - ) - - @classmethod - def get_ingestor_run(cls, id: int) -> IngestionRunFields: - """Retrieve ingestor run details by ID""" - arguments: dict[str, dict[str, Any]] = {"id": {"type": "Int!", "value": id}} - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return IngestionRunFields( - field_name="getIngestorRun", arguments=cleared_arguments - ) + ) -> "GraphQLQuery[FeederLoadAnalysisReportFields, FeederLoadAnalysisReportGraphQLField]": + pass + + @classmethod + def get_ingestor_run( + cls, id: int + ) -> "GraphQLQuery[IngestionRunFields, IngestionRunGraphQLField]": + pass @classmethod def list_ingestor_runs( cls, *, filter_: Optional[IngestorRunsFilterInput] = None, - sort: Optional[IngestorRunsSortCriteriaInput] = None - ) -> IngestionRunFields: - """Retrieve all ingestor runs initiated through EAS""" - arguments: dict[str, dict[str, Any]] = { - "filter": {"type": "IngestorRunsFilterInput", "value": filter_}, - "sort": {"type": "IngestorRunsSortCriteriaInput", "value": sort}, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return IngestionRunFields( - field_name="listIngestorRuns", arguments=cleared_arguments - ) + sort: Optional[IngestorRunsSortCriteriaInput] = None, + ) -> "GraphQLQuery[IngestionRunFields, IngestionRunGraphQLField]": + pass @classmethod def list_ingestor_runs_paged( @@ -801,21 +446,9 @@ def list_ingestor_runs_paged( limit: Optional[int] = None, offset: Optional[Any] = None, filter_: Optional[IngestorRunsFilterInput] = None, - sort: Optional[IngestorRunsSortCriteriaInput] = None - ) -> IngestorRunPageFields: - """Retrieve all ingestor runs initiated through EAS""" - arguments: dict[str, dict[str, Any]] = { - "limit": {"type": "Int", "value": limit}, - "offset": {"type": "Long", "value": offset}, - "filter": {"type": "IngestorRunsFilterInput", "value": filter_}, - "sort": {"type": "IngestorRunsSortCriteriaInput", "value": sort}, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return IngestorRunPageFields( - field_name="listIngestorRunsPaged", arguments=cleared_arguments - ) + sort: Optional[IngestorRunsSortCriteriaInput] = None, + ) -> "GraphQLQuery[IngestorRunPageFields, IngestorRunPageGraphQLField]": + pass @classmethod def paged_open_dss_models( @@ -824,98 +457,48 @@ def paged_open_dss_models( limit: Optional[int] = None, offset: Optional[Any] = None, filter_: Optional[GetOpenDssModelsFilterInput] = None, - sort: Optional[GetOpenDssModelsSortCriteriaInput] = None - ) -> OpenDssModelPageFields: - """Retrieve a page of OpenDSS models, with optional limit and offset, and optional filtering""" - arguments: dict[str, dict[str, Any]] = { - "limit": {"type": "Int", "value": limit}, - "offset": {"type": "Long", "value": offset}, - "filter": {"type": "GetOpenDssModelsFilterInput", "value": filter_}, - "sort": {"type": "GetOpenDssModelsSortCriteriaInput", "value": sort}, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return OpenDssModelPageFields( - field_name="pagedOpenDssModels", arguments=cleared_arguments - ) + sort: Optional[GetOpenDssModelsSortCriteriaInput] = None, + ) -> "GraphQLQuery[OpenDssModelPageFields, OpenDssModelPageGraphQLField]": + pass @classmethod def get_user_permitted_customer_list_column_config( cls, - ) -> UserCustomerListColumnConfigFields: - """Fetches the user permitted column configuration for the customer list view.""" - return UserCustomerListColumnConfigFields( - field_name="getUserPermittedCustomerListColumnConfig" - ) + ) -> "GraphQLQuery[UserCustomerListColumnConfigFields, UserCustomerListColumnConfigGraphQLField]": + pass @classmethod def get_user_saved_customer_list_column_config( cls, - ) -> UserCustomerListColumnConfigFields: - """Fetches the user's column configuration for the customer list view.""" - return UserCustomerListColumnConfigFields( - field_name="getUserSavedCustomerListColumnConfig" - ) - - @classmethod - def get_customer_list(cls, m_ri_ds: list[str]) -> CustomerDetailsResponseFields: - """Retrieve the list of customers and their details.""" - arguments: dict[str, dict[str, Any]] = { - "mRIDs": {"type": "[String!]!", "value": m_ri_ds} - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return CustomerDetailsResponseFields( - field_name="getCustomerList", arguments=cleared_arguments - ) + ) -> "GraphQLQuery[UserCustomerListColumnConfigFields, UserCustomerListColumnConfigGraphQLField]": + pass + + @classmethod + def get_customer_list( + cls, m_ri_ds: list[str] + ) -> "GraphQLQuery[CustomerDetailsResponseFields, CustomerDetailsResponseGraphQLField]": + pass @classmethod def get_customer_list_by_nmis( cls, nmis: list[str] - ) -> CustomerDetailsResponseFields: - """Retrieve customer details using NMIs as input.""" - arguments: dict[str, dict[str, Any]] = { - "nmis": {"type": "[String!]!", "value": nmis} - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return CustomerDetailsResponseFields( - field_name="getCustomerListByNmis", arguments=cleared_arguments - ) + ) -> "GraphQLQuery[CustomerDetailsResponseFields, CustomerDetailsResponseGraphQLField]": + pass @classmethod - def get_app_options(cls) -> AppOptionsFields: - """Get App Options""" - return AppOptionsFields(field_name="getAppOptions") + def get_app_options( + cls, + ) -> "GraphQLQuery[AppOptionsFields, AppOptionsGraphQLField]": + pass @classmethod def get_presigned_upload_url_for_variant( cls, filename: str, file_type: VariantFileType - ) -> UploadUrlResponseFields: - """Generate a pre-signed URL to upload variant files to the cloud storage. Returns the pre-signed URL along with the final file path as it will be referenced by EAS""" - arguments: dict[str, dict[str, Any]] = { - "filename": {"type": "String!", "value": filename}, - "fileType": {"type": "VariantFileType!", "value": file_type}, - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return UploadUrlResponseFields( - field_name="getPresignedUploadUrlForVariant", arguments=cleared_arguments - ) - - @classmethod - def get_variant_upload_info(cls, job_id: str) -> VariantWorkPackageFields: - """Retrieves status of a variant ingestion workflow""" - arguments: dict[str, dict[str, Any]] = { - "jobID": {"type": "String!", "value": job_id} - } - cleared_arguments = { - key: value for key, value in arguments.items() if value["value"] is not None - } - return VariantWorkPackageFields( - field_name="getVariantUploadInfo", arguments=cleared_arguments - ) + ) -> "GraphQLQuery[UploadUrlResponseFields, UploadUrlResponseGraphQLField]": + pass + + @classmethod + def get_variant_upload_info( + cls, job_id: str + ) -> "GraphQLQuery[VariantWorkPackageFields, VariantWorkPackageGraphQLField]": + pass diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_typing_fields.py b/src/zepben/eas/lib/generated_graphql_client/custom_typing_fields.py index 2c55e85..3cd62f6 100644 --- a/src/zepben/eas/lib/generated_graphql_client/custom_typing_fields.py +++ b/src/zepben/eas/lib/generated_graphql_client/custom_typing_fields.py @@ -1,9 +1,3 @@ -# Copyright 2026 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. - from .base_operation import GraphQLField diff --git a/src/zepben/eas/lib/generated_graphql_client/enums.py b/src/zepben/eas/lib/generated_graphql_client/enums.py index f5ef70b..5a153a4 100644 --- a/src/zepben/eas/lib/generated_graphql_client/enums.py +++ b/src/zepben/eas/lib/generated_graphql_client/enums.py @@ -1,9 +1,3 @@ -# Copyright 2026 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. - from enum import Enum diff --git a/src/zepben/eas/lib/generated_graphql_client/exceptions.py b/src/zepben/eas/lib/generated_graphql_client/exceptions.py index 2543235..e217e9b 100644 --- a/src/zepben/eas/lib/generated_graphql_client/exceptions.py +++ b/src/zepben/eas/lib/generated_graphql_client/exceptions.py @@ -1,9 +1,3 @@ -# Copyright 2026 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. - from typing import Any, Optional, Union import httpx diff --git a/src/zepben/eas/lib/generated_graphql_client/input_types.py b/src/zepben/eas/lib/generated_graphql_client/input_types.py index dfe636c..be71ecd 100644 --- a/src/zepben/eas/lib/generated_graphql_client/input_types.py +++ b/src/zepben/eas/lib/generated_graphql_client/input_types.py @@ -1,9 +1,3 @@ -# Copyright 2026 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. - from typing import Any, Optional from pydantic import Field diff --git a/test/test_client_generation.py b/test/test_client_generation.py new file mode 100644 index 0000000..02d67c4 --- /dev/null +++ b/test/test_client_generation.py @@ -0,0 +1,47 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. +import ast +import pytest + +@pytest.mark.asyncio +async def test_do_things(): + from zepben.eas.lib.generated_graphql_client import custom_queries + with open(custom_queries.__file__) as f: + orig_ast = ast.parse( + f.read(), + ) + n = None + extra_imports = [] + + for i, b in enumerate(orig_ast.body): + if isinstance(b, ast.ClassDef): + if n is None: + n = i + for func in b.body: + if isinstance(func, ast.FunctionDef): + func.body = [ast.Pass()] + extra_imports.append(func.returns.id.replace("Fields", "GraphQLField")) + func.returns = ast.Name(f'\"GraphQLQuery[{func.returns.id}, {func.returns.id.replace("Fields", "GraphQLField")}]\"') + + orig_ast.body.insert(n, ast.parse( + """ +class GraphQLQuery(Generic[TGraphQLQueryField, TGraphQLField]): +def fields(self, *fields: TGraphQLField): ... + """ + ).body[0]) + orig_ast.body.insert(n, ast.parse( + 'TGraphQLField = TypeVar("TGraphQLField")' + ).body[0]) + orig_ast.body.insert(n, ast.parse( + 'TGraphQLQueryField = TypeVar("TGraphQLQueryField")' + ).body[0]) + + orig_ast.body.insert(n, ast.parse("from typing import Generic, TypeVar").body[0]) + orig_ast.body.insert(n, ast.parse(f"from zepben.eas import {', '.join(extra_imports)}").body[0]) + + with open(custom_queries.__file__ + 'i', 'w') as f: + f.write(ast.unparse(orig_ast)) + diff --git a/test/test_integration_testing.py b/test/test_integration_testing.py index f20fbf8..a8d4611 100644 --- a/test/test_integration_testing.py +++ b/test/test_integration_testing.py @@ -3,9 +3,8 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. -import ast import asyncio -from typing import TypeVar, TYPE_CHECKING +from typing import TYPE_CHECKING import pytest @@ -64,43 +63,6 @@ async def test_can_connect_to_local_eas_async_calling_func(): @pytest.mark.asyncio async def test_do_things(): - from zepben.eas.lib.generated_graphql_client import custom_queries - with open(custom_queries.__file__) as f: - orig_ast = ast.parse( - f.read(), - ) - n = None - extra_imports = [] - - for i, b in enumerate(orig_ast.body): - if isinstance(b, ast.ClassDef): - if n is None: - n = i - for func in b.body: - if isinstance(func, ast.FunctionDef): - func.body = [ast.Pass()] - extra_imports.append(func.returns.id.replace("Fields", "GraphQLField")) - func.returns = ast.Name(f'\"GraphQLQuery[{func.returns.id}, {func.returns.id.replace("Fields", "GraphQLField")}]\"') - - orig_ast.body.insert(n, ast.parse( - """ -class GraphQLQuery(Generic[TGraphQLQueryField, TGraphQLField]): - def fields(self, *fields: TGraphQLField): ... - """ - ).body[0]) - orig_ast.body.insert(n, ast.parse( - 'TGraphQLField = TypeVar("TGraphQLField")' - ).body[0]) - orig_ast.body.insert(n, ast.parse( - 'TGraphQLQueryField = TypeVar("TGraphQLQueryField")' - ).body[0]) - - orig_ast.body.insert(n, ast.parse("from typing import Generic, TypeVar").body[0]) - orig_ast.body.insert(n, ast.parse(f"from zepben.eas import {', '.join(extra_imports)}").body[0]) - - with open(custom_queries.__file__ + 'i', 'w') as f: - f.write(ast.unparse(orig_ast)) - client = EasClient( host="localhost", port=7654, From ab2a2e3c99cac656f224e517a7541c689b11b3d3 Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Wed, 1 Apr 2026 13:30:08 +1100 Subject: [PATCH 28/32] it works Signed-off-by: Max Chesterfield --- pyproject.toml | 2 +- src/zepben/eas/client/decorators.py | 10 +- src/zepben/eas/client/eas_client.py | 24 +- src/zepben/eas/generate_client.py | 72 +- .../lib/ariadne_plugins/base_operation.pyi | 23 + .../custom_query_type_hinter.py | 34 +- src/zepben/eas/lib/ariadne_plugins/types.py | 12 + .../custom_queries.py | 643 ++++++++++++++++-- .../custom_queries.pyi | 264 ------- .../custom_queries.pyi.bak | 24 - test/test_client_generation.py | 1 + 11 files changed, 698 insertions(+), 411 deletions(-) create mode 100644 src/zepben/eas/lib/ariadne_plugins/base_operation.pyi create mode 100644 src/zepben/eas/lib/ariadne_plugins/types.py delete mode 100644 src/zepben/eas/lib/generated_graphql_client/custom_queries.pyi delete mode 100644 src/zepben/eas/lib/generated_graphql_client/custom_queries.pyi.bak diff --git a/pyproject.toml b/pyproject.toml index 26d6aa2..dd478bc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,7 +50,7 @@ test = [ "trustme==0.9.0" ] eas-codegen = [ - "ariadne-codegen==0.18.0" + "ariadne-codegen @ git+https://github.com/chestm007/ariadne-codegen" # This could break a pypi upload. Waiting on https://github.com/mirumee/ariadne-codegen/pull/413 to be merged. ] [tool.setuptools.packages.find] diff --git a/src/zepben/eas/client/decorators.py b/src/zepben/eas/client/decorators.py index fb0c321..8809272 100644 --- a/src/zepben/eas/client/decorators.py +++ b/src/zepben/eas/client/decorators.py @@ -9,7 +9,7 @@ import asyncio import functools import warnings -from typing import Callable +from typing import Callable, ParamSpec, TypeVar, cast def catch_warnings(func: Callable) -> Callable: @@ -23,7 +23,13 @@ def wrapper(*args, **kwargs): return wrapper -def async_func(func: Callable) -> Callable: +# Type hinting async_func will break the type hinting. +# +# P = ParamSpec("P") +# R = TypeVar("R") +# def async_func(func: Callable[P, R)) -> Callable[P, R]: +def async_func(func): + @functools.wraps(func) def wrapper(self, *args, **kwargs): if self._asynchronous: return func(self, *args, **kwargs) diff --git a/src/zepben/eas/client/eas_client.py b/src/zepben/eas/client/eas_client.py index 8438c90..5f8505b 100644 --- a/src/zepben/eas/client/eas_client.py +++ b/src/zepben/eas/client/eas_client.py @@ -136,9 +136,11 @@ def __init__( async def close(self): await self.http_client.aclose() - async def do_query(self, query: GraphQLQuery[T, R], field: R, *additional_fields: R, operation_name: str = None) -> T: + @async_func + async def do_query(self, query: GraphQLQuery[T, R], *additional_fields: R, operation_name: str = None) -> T: """Execute a query against the Evolve App Server.""" - return await super().query(query.fields(field, *additional_fields), operation_name=operation_name) + query = query.fields(*additional_fields) if hasattr(query, "fields") else query + return await super().query(query, operation_name=operation_name) @async_func async def mutation(self, *fields: GraphQLField, operation_name: str = None) -> dict[str, Any]: @@ -224,7 +226,7 @@ def get_work_package_cost_estimation(self, work_package: WorkPackageInput): :param work_package: An instance of the `WorkPackageConfig` data class representing the work package configuration for the run :return: The HTTP response received from the Evolve App Server after attempting to run work package """ - return self.query( + return self.do_query( Query.get_work_package_cost_estimation(work_package), ) @@ -266,7 +268,7 @@ def get_hosting_capacity_work_packages_progress(self): # FIXME: why is this inf :return: The HTTP response received from the Evolve App Server after requesting work packages progress info """ - return self.query( + return self.do_query( Query.get_active_work_packages(), ) @@ -296,7 +298,7 @@ def get_feeder_load_analysis_report_status(self, report_id: str, full_spec: bool :param full_spec: If true the response will include the request sent to generate the report :return: The HTTP response received from the Evolve App Server after requesting a feeder load analysis report status """ - return self.query( + return self.do_query( Query.get_feeder_load_analysis_report_status(report_id, full_spec=full_spec).fields( *FeederLoadAnalysisReportFields.all_fields() ), @@ -339,7 +341,7 @@ def get_ingestor_run(self, ingestor_run_id: int): :param ingestor_run_id: The ID of the ingestor run to retrieve execution information about. :return: The HTTP response received from the Evolve App Server including the ingestor run information (if found). """ - return self.query( + return self.do_query( Query.get_ingestor_run(ingestor_run_id).fields( *IngestionRunFields.all_fields() ), @@ -361,7 +363,7 @@ def get_ingestor_run_list( :param query_sort: An `IngestorRunsSortCriteriaInput` that can control the order of the returned record based on a number of fields. (Optional) :return: The HTTP response received from the Evolve App Server including all matching ingestor records found. """ - return self.query( + return self.do_query( Query.list_ingestor_runs(filter_=query_filter, sort=query_sort).fields( IngestionRunFields.id, IngestionRunFields.container_runtime_type, @@ -425,7 +427,7 @@ def get_hosting_capacity_calibration_run(self, id: str): :param id: The calibration run ID :return: The HTTP response received from the Evolve App Server after requesting calibration run info """ - return self.query( + return self.do_query( Query.get_calibration_run(id).fields( HcCalibrationFields.id, HcCalibrationFields.name, @@ -449,7 +451,7 @@ def get_hosting_capacity_calibration_sets(self): :return: The HTTP response received from the Evolve App Server after requesting completed calibration runs """ - return self.query( + return self.do_query( Query.get_calibration_sets(), ) @@ -470,7 +472,7 @@ def get_transformer_tap_settings( :param transformer_mrid: An optional filter to return only the transformer tap settings for a particular transfomer mrid :return: The HTTP response received from the Evolve App Server after requesting transformer tap settings for the calibration id """ - return self.query( + return self.do_query( Query.get_transformer_tap_settings( calibration_name=calibration_name, feeder=feeder, @@ -518,7 +520,7 @@ def get_paged_opendss_models( :param query_sort: The sorting to apply to the query :return: The HTTP response received from the Evolve App Server after requesting opendss export run information """ - return self.query( + return self.do_query( Query.paged_open_dss_models( limit=limit, offset=offset, diff --git a/src/zepben/eas/generate_client.py b/src/zepben/eas/generate_client.py index d140536..78044f2 100644 --- a/src/zepben/eas/generate_client.py +++ b/src/zepben/eas/generate_client.py @@ -5,7 +5,8 @@ # file, You can obtain one at https://mozilla.org/MPL/2.0/. import ast import sys -from typing import Optional +from _ast import ImportFrom +from typing import Optional, Callable, Any import click import ariadne_codegen.client_generators.custom_operation @@ -13,36 +14,41 @@ from graphql import assert_valid_schema -class ZBPatchedPluginManager(PluginManager): - def generate_custom_module(self, module: ast.Module) -> ast.Module: - return self._apply_plugins_on_object("generate_custom_module", module) - - def generate_custom_method(self, module: ast.FunctionDef) -> ast.FunctionDef: - return self._apply_plugins_on_object("generate_custom_method", module) - - -class ZBPatchedCustomOperationGenerator(ariadne_codegen.client_generators.custom_operation.CustomOperationGenerator): - plugin_manager: ZBPatchedPluginManager - - def _generate_method( - self, operation_name: str, operation_args, final_type, description: Optional[str] = None - ) -> ast.FunctionDef: - return self.plugin_manager.generate_custom_method( - super()._generate_method( - operation_name, - operation_args, - final_type, - description, - ) - ) - - def generate(self) -> ast.Module: - return self.plugin_manager.generate_custom_module( - super().generate( - ) - ) - -ariadne_codegen.client_generators.custom_operation.CustomOperationGenerator = ZBPatchedCustomOperationGenerator +# class ZBPatchedPluginManager(PluginManager): +# def generate_custom_module(self, module: ast.Module, import_adder: Callable[[ImportFrom], Any]) -> ast.Module: +# return self._apply_plugins_on_object( +# "generate_custom_module", +# module, import_adder +# ) +# +# def generate_custom_method(self, method_def: ast.FunctionDef, import_adder: Callable[[ImportFrom], Any]) -> ast.FunctionDef: +# return self._apply_plugins_on_object( +# "generate_custom_method", +# method_def, import_adder +# ) +# +# +# class ZBPatchedCustomOperationGenerator(ariadne_codegen.client_generators.custom_operation.CustomOperationGenerator): +# plugin_manager: ZBPatchedPluginManager +# +# def _generate_method( +# self, operation_name: str, operation_args, final_type, description: Optional[str] = None +# ) -> ast.FunctionDef: +# return self.plugin_manager.generate_custom_method( +# super()._generate_method( +# operation_name, +# operation_args, +# final_type, +# description, +# ), self._add_import +# ) +# +# def generate(self) -> ast.Module: +# return self.plugin_manager.generate_custom_module( +# super().generate(), self._add_import +# ) +# +# ariadne_codegen.client_generators.custom_operation.CustomOperationGenerator = ZBPatchedCustomOperationGenerator from ariadne_codegen.client_generators.package import get_package_generator from ariadne_codegen.config import get_client_settings, get_config_dict, get_graphql_schema_settings @@ -94,7 +100,7 @@ def client(config_dict): introspection_settings=settings.introspection_settings, ) - plugin_manager = ZBPatchedPluginManager( + plugin_manager = PluginManager( schema=schema, config_dict=config_dict, plugins_types=get_plugins_types(settings.plugins), @@ -143,7 +149,7 @@ def graphql_schema(config_dict): introspection_settings=settings.introspection_settings, ) ) - plugin_manager = ZBPatchedPluginManager( + plugin_manager = PluginManager( schema=schema, config_dict=config_dict, plugins_types=get_plugins_types(settings.plugins), diff --git a/src/zepben/eas/lib/ariadne_plugins/base_operation.pyi b/src/zepben/eas/lib/ariadne_plugins/base_operation.pyi new file mode 100644 index 0000000..7a3a603 --- /dev/null +++ b/src/zepben/eas/lib/ariadne_plugins/base_operation.pyi @@ -0,0 +1,23 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + +from _typeshed import Incomplete +from types import MethodType +from typing import Generator + +def __getattr__(name) -> Incomplete: ... + +class GraphQLField: + def __getattr__(self, name: str) -> Incomplete: ... + + @classmethod + def all_fields(cls) -> Generator[GraphQLField | MethodType, None, None]: + """ + Returns a generator over all ``GraphQLField``s that a given class returns + + :param cls: class to check + :return: generator over all GraphQLField's in a given class + """ diff --git a/src/zepben/eas/lib/ariadne_plugins/custom_query_type_hinter.py b/src/zepben/eas/lib/ariadne_plugins/custom_query_type_hinter.py index 287c065..014eb68 100644 --- a/src/zepben/eas/lib/ariadne_plugins/custom_query_type_hinter.py +++ b/src/zepben/eas/lib/ariadne_plugins/custom_query_type_hinter.py @@ -5,30 +5,37 @@ # file, You can obtain one at https://mozilla.org/MPL/2.0/. import ast +from _ast import ImportFrom +from typing import Callable, Any + from ariadne_codegen.plugins.base import Plugin class CustomQueryTypeHinterPlugin(Plugin): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self._current_class = None - self._all_imports = set() def generate_custom_method(self, method: ast.FunctionDef) -> ast.FunctionDef: return method - def generate_custom_module(self, module: ast.Module, **kwargs) -> ast.Module: - for b in module.body: - if isinstance((class_def := b), ast.ClassDef): - # 1. Target a specific class (e.g., the root 'Query' result) - if class_def.name == "Query": - for method in class_def.body: - method.body = [ast.Pass()] - method.returns = ast.Name( - f'\"GraphQLQuery[{method.returns.id}, {method.returns.id.replace("Fields", "GraphQLField")}]\"' - ) + def generate_custom_module(self, imports: list[ImportFrom], type_imports: list[ImportFrom], class_defs: list[ast.ClassDef]) -> tuple[list[ImportFrom], list[ImportFrom], list[ast.ClassDef]]: + # 1. Target a specific class (e.g., the root 'Query' result) + if class_defs[0].name == "Query": + for method in class_defs[0].body: + injected_type = method.returns.id.replace("Fields", "GraphQLField") + method.returns = ast.Name( + f'\"GraphQLQuery[{method.returns.id}, {injected_type}]\"' + ) + imports.extend( + [ + ImportFrom('.custom_typing_fields', [ast.alias(injected_type)], level=0), + ImportFrom('zepben.eas.lib.ariadne_plugins.types', [ + ast.alias('GraphQLQuery') + ], level=0) + ] + ) - print(next(b for b in module.body if isinstance(b, ast.ClassDef)).name) - return module + return imports, type_imports, class_defs def generate_client_import(self, import_: ast.ImportFrom) -> ast.ImportFrom: if (iname := import_.names[0].name) in ( @@ -42,3 +49,4 @@ def generate_client_import(self, import_: ast.ImportFrom) -> ast.ImportFrom: print(f"[ZBEX] Assuming class import {iname} is from module 'enums.py'") import_.module = 'enums' return import_ + diff --git a/src/zepben/eas/lib/ariadne_plugins/types.py b/src/zepben/eas/lib/ariadne_plugins/types.py new file mode 100644 index 0000000..6a46186 --- /dev/null +++ b/src/zepben/eas/lib/ariadne_plugins/types.py @@ -0,0 +1,12 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. +from typing import Generic, TypeVar + +TGraphQLField = TypeVar("TGraphQLField") +TGraphQLQueryField = TypeVar("TGraphQLQueryField") + +class GraphQLQuery(Generic[TGraphQLQueryField, TGraphQLField]): + def fields(self, *fields: TGraphQLField): ... diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_queries.py b/src/zepben/eas/lib/generated_graphql_client/custom_queries.py index 67de0a4..4d69671 100644 --- a/src/zepben/eas/lib/generated_graphql_client/custom_queries.py +++ b/src/zepben/eas/lib/generated_graphql_client/custom_queries.py @@ -1,5 +1,7 @@ from typing import Any, Optional +from zepben.eas.lib.ariadne_plugins.types import GraphQLQuery + from .custom_fields import ( AppOptionsFields, CustomerDetailsResponseFields, @@ -42,7 +44,49 @@ VariantWorkPackageFields, WorkPackageTreeFields, ) -from .custom_typing_fields import GraphQLField +from .custom_typing_fields import ( + AppOptionsGraphQLField, + CustomerDetailsResponseGraphQLField, + DurationCurveByTerminalGraphQLField, + FeederLoadAnalysisReportGraphQLField, + GqlTxTapRecordGraphQLField, + GqlUserGraphQLField, + GqlUserResponseGraphQLField, + GraphQLField, + HcCalibrationGraphQLField, + HcScenarioConfigsPageGraphQLField, + HcWorkPackageGraphQLField, + HcWorkPackagePageGraphQLField, + IngestionJobGraphQLField, + IngestionRunGraphQLField, + IngestorRunPageGraphQLField, + JobSourceGraphQLField, + MachineUserGraphQLField, + MetricGraphQLField, + NetworkModelsGraphQLField, + OpenDssModelPageGraphQLField, + OpportunitiesByYearGraphQLField, + OpportunityGraphQLField, + OpportunityLocationGraphQLField, + PowerFactoryModelGraphQLField, + PowerFactoryModelPageGraphQLField, + PowerFactoryModelTemplateGraphQLField, + PowerFactoryModelTemplatePageGraphQLField, + ProcessedDiffGraphQLField, + ProcessedDiffPageGraphQLField, + SincalGlobalInputsConfigGraphQLField, + SincalModelGraphQLField, + SincalModelPageGraphQLField, + SincalModelPresetGraphQLField, + SincalModelPresetPageGraphQLField, + StudyGraphQLField, + StudyPageGraphQLField, + StudyResultGraphQLField, + UploadUrlResponseGraphQLField, + UserCustomerListColumnConfigGraphQLField, + VariantWorkPackageGraphQLField, + WorkPackageTreeGraphQLField, +) from .enums import ( ContainerType, HostingCapacityFileType, @@ -84,35 +128,68 @@ def paged_studies( filter_: Optional[GetStudiesFilterInput] = None, sort: Optional[GetStudiesSortCriteriaInput] = None, ) -> "GraphQLQuery[StudyPageFields, StudyPageGraphQLField]": - pass + """Retrieve a page of studies, with optional limit and offset, and optional filtering""" + arguments: dict[str, dict[str, Any]] = { + "limit": {"type": "Int", "value": limit}, + "offset": {"type": "Long", "value": offset}, + "filter": {"type": "GetStudiesFilterInput", "value": filter_}, + "sort": {"type": "GetStudiesSortCriteriaInput", "value": sort}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return StudyPageFields(field_name="pagedStudies", arguments=cleared_arguments) @classmethod def results_by_id( cls, ids: list[str] ) -> "GraphQLQuery[StudyResultFields, StudyResultGraphQLField]": - pass + """Retrieve a list of results by IDs""" + arguments: dict[str, dict[str, Any]] = {"ids": {"type": "[ID!]!", "value": ids}} + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return StudyResultFields(field_name="resultsById", arguments=cleared_arguments) @classmethod def studies( cls, *, filter_: Optional[GetStudiesFilterInput] = None ) -> "GraphQLQuery[StudyFields, StudyGraphQLField]": - pass + """Retrieve a list of studies, with optional filtering""" + arguments: dict[str, dict[str, Any]] = { + "filter": {"type": "GetStudiesFilterInput", "value": filter_} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return StudyFields(field_name="studies", arguments=cleared_arguments) @classmethod def studies_by_id( cls, ids: list[str] ) -> "GraphQLQuery[StudyFields, StudyGraphQLField]": - pass + """Retrieve a list of studies by IDs""" + arguments: dict[str, dict[str, Any]] = {"ids": {"type": "[ID!]!", "value": ids}} + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return StudyFields(field_name="studiesById", arguments=cleared_arguments) @classmethod def styles_by_id(cls, ids: list[str]) -> "GraphQLQuery[GraphQLField, GraphQLField]": - pass + """Retrieve a list of style layers by IDs""" + arguments: dict[str, dict[str, Any]] = {"ids": {"type": "[ID!]!", "value": ids}} + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField(field_name="stylesById", arguments=cleared_arguments) @classmethod def current_user( cls, ) -> "GraphQLQuery[GqlUserResponseFields, GqlUserResponseGraphQLField]": - pass + """Get information about the current user""" + return GqlUserResponseFields(field_name="currentUser") @classmethod def paged_power_factory_model_templates( @@ -123,7 +200,25 @@ def paged_power_factory_model_templates( filter_: Optional[GetPowerFactoryModelTemplatesFilterInput] = None, sort: Optional[GetPowerFactoryModelTemplatesSortCriteriaInput] = None, ) -> "GraphQLQuery[PowerFactoryModelTemplatePageFields, PowerFactoryModelTemplatePageGraphQLField]": - pass + """Retrieve a page of powerFactoryModel templates, with optional limit and offset, and optional filtering""" + arguments: dict[str, dict[str, Any]] = { + "limit": {"type": "Int", "value": limit}, + "offset": {"type": "Long", "value": offset}, + "filter": { + "type": "GetPowerFactoryModelTemplatesFilterInput", + "value": filter_, + }, + "sort": { + "type": "GetPowerFactoryModelTemplatesSortCriteriaInput", + "value": sort, + }, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return PowerFactoryModelTemplatePageFields( + field_name="pagedPowerFactoryModelTemplates", arguments=cleared_arguments + ) @classmethod def paged_power_factory_models( @@ -134,51 +229,109 @@ def paged_power_factory_models( filter_: Optional[GetPowerFactoryModelsFilterInput] = None, sort: Optional[GetPowerFactoryModelsSortCriteriaInput] = None, ) -> "GraphQLQuery[PowerFactoryModelPageFields, PowerFactoryModelPageGraphQLField]": - pass + """Retrieve a page of powerFactoryModels, with optional limit and offset, and optional filtering""" + arguments: dict[str, dict[str, Any]] = { + "limit": {"type": "Int", "value": limit}, + "offset": {"type": "Long", "value": offset}, + "filter": {"type": "GetPowerFactoryModelsFilterInput", "value": filter_}, + "sort": {"type": "GetPowerFactoryModelsSortCriteriaInput", "value": sort}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return PowerFactoryModelPageFields( + field_name="pagedPowerFactoryModels", arguments=cleared_arguments + ) @classmethod def power_factory_model_by_id( cls, model_id: str ) -> "GraphQLQuery[PowerFactoryModelFields, PowerFactoryModelGraphQLField]": - pass + """Retrieve a powerFactoryModel by ID""" + arguments: dict[str, dict[str, Any]] = { + "modelId": {"type": "ID!", "value": model_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return PowerFactoryModelFields( + field_name="powerFactoryModelById", arguments=cleared_arguments + ) @classmethod def power_factory_model_template_by_id( cls, template_id: str ) -> "GraphQLQuery[PowerFactoryModelTemplateFields, PowerFactoryModelTemplateGraphQLField]": - pass + """Retrieve a powerFactoryModel template by ID""" + arguments: dict[str, dict[str, Any]] = { + "templateId": {"type": "ID!", "value": template_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return PowerFactoryModelTemplateFields( + field_name="powerFactoryModelTemplateById", arguments=cleared_arguments + ) @classmethod def power_factory_model_templates_by_ids( cls, template_ids: list[str] ) -> "GraphQLQuery[PowerFactoryModelTemplateFields, PowerFactoryModelTemplateGraphQLField]": - pass + """Retrieve a list of powerFactoryModel templates by IDs""" + arguments: dict[str, dict[str, Any]] = { + "templateIds": {"type": "[ID!]!", "value": template_ids} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return PowerFactoryModelTemplateFields( + field_name="powerFactoryModelTemplatesByIds", arguments=cleared_arguments + ) @classmethod def power_factory_models_by_ids( cls, model_ids: list[str] ) -> "GraphQLQuery[PowerFactoryModelFields, PowerFactoryModelGraphQLField]": - pass + """Retrieve a list of powerFactoryModels by IDs""" + arguments: dict[str, dict[str, Any]] = { + "modelIds": {"type": "[ID!]!", "value": model_ids} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return PowerFactoryModelFields( + field_name="powerFactoryModelsByIds", arguments=cleared_arguments + ) @classmethod def get_active_work_packages(cls) -> "GraphQLQuery[GraphQLField, GraphQLField]": - pass + """Retrieve a list of currently active (running, scheduled, pending) work packages""" + return GraphQLField(field_name="getActiveWorkPackages") @classmethod def get_all_work_packages_authors( cls, ) -> "GraphQLQuery[GqlUserFields, GqlUserGraphQLField]": - pass + """Retrieve all users that have created work packages.""" + return GqlUserFields(field_name="getAllWorkPackagesAuthors") @classmethod def get_calibration_run( cls, id: str ) -> "GraphQLQuery[HcCalibrationFields, HcCalibrationGraphQLField]": - pass + """Retrieve calibration run details by ID""" + arguments: dict[str, dict[str, Any]] = {"id": {"type": "ID!", "value": id}} + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return HcCalibrationFields( + field_name="getCalibrationRun", arguments=cleared_arguments + ) @classmethod def get_calibration_sets(cls) -> "GraphQLQuery[GraphQLField, GraphQLField]": - pass + """Retrieve available distribution transformer tap calibration sets.""" + return GraphQLField(field_name="getCalibrationSets") @classmethod def get_duration_curves( @@ -189,31 +342,77 @@ def get_duration_curves( year: int, conducting_equipment_mrid: str, ) -> "GraphQLQuery[DurationCurveByTerminalFields, DurationCurveByTerminalGraphQLField]": - pass + """Retrieve duration curves for a single piece of equipment in a specific SYF.""" + arguments: dict[str, dict[str, Any]] = { + "workPackageId": {"type": "String!", "value": work_package_id}, + "scenario": {"type": "String!", "value": scenario}, + "feeder": {"type": "String!", "value": feeder}, + "year": {"type": "Int!", "value": year}, + "conductingEquipmentMrid": { + "type": "String!", + "value": conducting_equipment_mrid, + }, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return DurationCurveByTerminalFields( + field_name="getDurationCurves", arguments=cleared_arguments + ) @classmethod def get_opportunities( cls, *, year: Optional[int] = None ) -> "GraphQLQuery[OpportunitiesByYearFields, OpportunitiesByYearGraphQLField]": - pass + """Retrieve all Opportunities available for a specific year.""" + arguments: dict[str, dict[str, Any]] = {"year": {"type": "Int", "value": year}} + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return OpportunitiesByYearFields( + field_name="getOpportunities", arguments=cleared_arguments + ) @classmethod def get_opportunities_for_equipment( cls, m_rid: str ) -> "GraphQLQuery[OpportunityFields, OpportunityGraphQLField]": - pass + """Retrieve Opportunities by attached conducting equipment mRID.""" + arguments: dict[str, dict[str, Any]] = { + "mRID": {"type": "String!", "value": m_rid} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return OpportunityFields( + field_name="getOpportunitiesForEquipment", arguments=cleared_arguments + ) @classmethod def get_opportunity( cls, id: str ) -> "GraphQLQuery[OpportunityFields, OpportunityGraphQLField]": - pass + """Retrieve Opportunities by id.""" + arguments: dict[str, dict[str, Any]] = {"id": {"type": "String!", "value": id}} + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return OpportunityFields( + field_name="getOpportunity", arguments=cleared_arguments + ) @classmethod def get_opportunity_locations( cls, *, year: Optional[int] = None ) -> "GraphQLQuery[OpportunityLocationFields, OpportunityLocationGraphQLField]": - pass + """Retrieve all opportunity locations available for a specific year.""" + arguments: dict[str, dict[str, Any]] = {"year": {"type": "Int", "value": year}} + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return OpportunityLocationFields( + field_name="getOpportunityLocations", arguments=cleared_arguments + ) @classmethod def get_scenario_configurations( @@ -223,7 +422,18 @@ def get_scenario_configurations( offset: Optional[Any] = None, filter_: Optional[HcScenarioConfigsFilterInput] = None, ) -> "GraphQLQuery[HcScenarioConfigsPageFields, HcScenarioConfigsPageGraphQLField]": - pass + """Retrieve a page scenario configurations from the hosting capacity input database.""" + arguments: dict[str, dict[str, Any]] = { + "limit": {"type": "Int", "value": limit}, + "offset": {"type": "Long", "value": offset}, + "filter": {"type": "HcScenarioConfigsFilterInput", "value": filter_}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return HcScenarioConfigsPageFields( + field_name="getScenarioConfigurations", arguments=cleared_arguments + ) @classmethod def get_transformer_tap_settings( @@ -233,25 +443,62 @@ def get_transformer_tap_settings( feeder: Optional[str] = None, transformer_mrid: Optional[str] = None, ) -> "GraphQLQuery[GqlTxTapRecordFields, GqlTxTapRecordGraphQLField]": - pass + """Retrieve distribution transformer tap settings from a calibration set in the hosting capacity input database.""" + arguments: dict[str, dict[str, Any]] = { + "calibrationName": {"type": "String!", "value": calibration_name}, + "feeder": {"type": "String", "value": feeder}, + "transformerMrid": {"type": "String", "value": transformer_mrid}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GqlTxTapRecordFields( + field_name="getTransformerTapSettings", arguments=cleared_arguments + ) @classmethod def get_work_package_by_id( cls, id: str, *, with_groupings: Optional[bool] = None ) -> "GraphQLQuery[HcWorkPackageFields, HcWorkPackageGraphQLField]": - pass + """Retrieve a hosting capacity work package by ID, withGroupings: Whether to include model groupings in the work package progress details, default value is false""" + arguments: dict[str, dict[str, Any]] = { + "id": {"type": "ID!", "value": id}, + "withGroupings": {"type": "Boolean", "value": with_groupings}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return HcWorkPackageFields( + field_name="getWorkPackageById", arguments=cleared_arguments + ) @classmethod def get_work_package_cost_estimation( cls, input: WorkPackageInput ) -> "GraphQLQuery[GraphQLField, GraphQLField]": - pass + """Returns an estimated cost of the submitted hosting capacity work package.""" + arguments: dict[str, dict[str, Any]] = { + "input": {"type": "WorkPackageInput!", "value": input} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="getWorkPackageCostEstimation", arguments=cleared_arguments + ) @classmethod def get_work_package_tree( cls, id: str ) -> "GraphQLQuery[WorkPackageTreeFields, WorkPackageTreeGraphQLField]": - pass + """Retrieve a work package tree with its ancestors and immediate children.""" + arguments: dict[str, dict[str, Any]] = {"id": {"type": "ID!", "value": id}} + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return WorkPackageTreeFields( + field_name="getWorkPackageTree", arguments=cleared_arguments + ) @classmethod def get_work_packages( @@ -263,13 +510,36 @@ def get_work_packages( sort: Optional[HcWorkPackagesSortCriteriaInput] = None, with_groupings: Optional[bool] = None, ) -> "GraphQLQuery[HcWorkPackagePageFields, HcWorkPackagePageGraphQLField]": - pass + """Retrieve a page of hosting capacity work packages, with optional limit and offset, and optional filtering""" + arguments: dict[str, dict[str, Any]] = { + "limit": {"type": "Int", "value": limit}, + "offset": {"type": "Long", "value": offset}, + "filter": {"type": "HcWorkPackagesFilterInput", "value": filter_}, + "sort": {"type": "HcWorkPackagesSortCriteriaInput", "value": sort}, + "withGroupings": {"type": "Boolean", "value": with_groupings}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return HcWorkPackagePageFields( + field_name="getWorkPackages", arguments=cleared_arguments + ) @classmethod def hosting_capacity_file_upload_url( cls, filename: str, file_type: HostingCapacityFileType ) -> "GraphQLQuery[UploadUrlResponseFields, UploadUrlResponseGraphQLField]": - pass + """Generate a pre-signed URL to upload hosting capacity file to the storage location. Returns the pre-signed URL along with the final file path as it will be referenced by EAS""" + arguments: dict[str, dict[str, Any]] = { + "filename": {"type": "String!", "value": filename}, + "fileType": {"type": "HostingCapacityFileType!", "value": file_type}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return UploadUrlResponseFields( + field_name="hostingCapacityFileUploadUrl", arguments=cleared_arguments + ) @classmethod def list_calibration_runs( @@ -279,13 +549,33 @@ def list_calibration_runs( calibration_time: Optional[Any] = None, status: Optional[WorkflowStatus] = None, ) -> "GraphQLQuery[HcCalibrationFields, HcCalibrationGraphQLField]": - pass + """Retrieve all calibration runs initiated through EAS""" + arguments: dict[str, dict[str, Any]] = { + "name": {"type": "String", "value": name}, + "calibrationTime": {"type": "LocalDateTime", "value": calibration_time}, + "status": {"type": "WorkflowStatus", "value": status}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return HcCalibrationFields( + field_name="listCalibrationRuns", arguments=cleared_arguments + ) @classmethod def get_processed_diff( cls, diff_id: str ) -> "GraphQLQuery[ProcessedDiffFields, ProcessedDiffGraphQLField]": - pass + """Retrieve processed diff of hosting capacity work packages with diffId""" + arguments: dict[str, dict[str, Any]] = { + "diffId": {"type": "ID!", "value": diff_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return ProcessedDiffFields( + field_name="getProcessedDiff", arguments=cleared_arguments + ) @classmethod def get_processed_diffs( @@ -296,37 +586,76 @@ def get_processed_diffs( filter_: Optional[ProcessedDiffFilterInput] = None, sort: Optional[ProcessedDiffSortCriteriaInput] = None, ) -> "GraphQLQuery[ProcessedDiffPageFields, ProcessedDiffPageGraphQLField]": - pass + """Retrieve a page of processed diffs, with optional limit and offset, and optional filtering""" + arguments: dict[str, dict[str, Any]] = { + "limit": {"type": "Int", "value": limit}, + "offset": {"type": "Long", "value": offset}, + "filter": {"type": "ProcessedDiffFilterInput", "value": filter_}, + "sort": {"type": "ProcessedDiffSortCriteriaInput", "value": sort}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return ProcessedDiffPageFields( + field_name="getProcessedDiffs", arguments=cleared_arguments + ) @classmethod def get_all_jobs( cls, ) -> "GraphQLQuery[IngestionJobFields, IngestionJobGraphQLField]": - pass + """Gets the ID and metadata of all ingestion jobs in reverse chronological order.""" + return IngestionJobFields(field_name="getAllJobs") @classmethod def get_distinct_metric_names( cls, job_id: str ) -> "GraphQLQuery[GraphQLField, GraphQLField]": - pass + """Gets all possible values of metricName for a specific job.""" + arguments: dict[str, dict[str, Any]] = { + "jobId": {"type": "String!", "value": job_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="getDistinctMetricNames", arguments=cleared_arguments + ) @classmethod def get_metrics( cls, job_id: str, container_type: ContainerType, container_id: str ) -> "GraphQLQuery[MetricFields, MetricGraphQLField]": - pass + """Gets the metrics for a network container emitted in an ingestion job.""" + arguments: dict[str, dict[str, Any]] = { + "jobId": {"type": "String!", "value": job_id}, + "containerType": {"type": "ContainerType!", "value": container_type}, + "containerId": {"type": "String!", "value": container_id}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return MetricFields(field_name="getMetrics", arguments=cleared_arguments) @classmethod def get_newest_job( cls, ) -> "GraphQLQuery[IngestionJobFields, IngestionJobGraphQLField]": - pass + """Gets the ID and metadata of the newest ingestion job. If no job exists, this returns null.""" + return IngestionJobFields(field_name="getNewestJob") @classmethod def get_sources( cls, job_id: str ) -> "GraphQLQuery[JobSourceFields, JobSourceGraphQLField]": - pass + """Gets the data sources used in an ingestion job.""" + arguments: dict[str, dict[str, Any]] = { + "jobId": {"type": "String!", "value": job_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return JobSourceFields(field_name="getSources", arguments=cleared_arguments) @classmethod def paged_sincal_model_presets( @@ -337,7 +666,19 @@ def paged_sincal_model_presets( filter_: Optional[GetSincalModelPresetsFilterInput] = None, sort: Optional[GetSincalModelPresetsSortCriteriaInput] = None, ) -> "GraphQLQuery[SincalModelPresetPageFields, SincalModelPresetPageGraphQLField]": - pass + """Retrieve a page of sincalModel presets, with optional limit and offset, and optional filtering. A default preset with null ID will also be included in the response, which may result in the number of presets returned exceeding the desired page size (limit).""" + arguments: dict[str, dict[str, Any]] = { + "limit": {"type": "Int", "value": limit}, + "offset": {"type": "Long", "value": offset}, + "filter": {"type": "GetSincalModelPresetsFilterInput", "value": filter_}, + "sort": {"type": "GetSincalModelPresetsSortCriteriaInput", "value": sort}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return SincalModelPresetPageFields( + field_name="pagedSincalModelPresets", arguments=cleared_arguments + ) @classmethod def paged_sincal_models( @@ -348,87 +689,185 @@ def paged_sincal_models( filter_: Optional[GetSincalModelsFilterInput] = None, sort: Optional[GetSincalModelsSortCriteriaInput] = None, ) -> "GraphQLQuery[SincalModelPageFields, SincalModelPageGraphQLField]": - pass + """Retrieve a page of sincalModels, with optional limit and offset, and optional filtering""" + arguments: dict[str, dict[str, Any]] = { + "limit": {"type": "Int", "value": limit}, + "offset": {"type": "Long", "value": offset}, + "filter": {"type": "GetSincalModelsFilterInput", "value": filter_}, + "sort": {"type": "GetSincalModelsSortCriteriaInput", "value": sort}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return SincalModelPageFields( + field_name="pagedSincalModels", arguments=cleared_arguments + ) @classmethod def sincal_model_by_id( cls, model_id: str ) -> "GraphQLQuery[SincalModelFields, SincalModelGraphQLField]": - pass + """Retrieve a sincalModel by ID""" + arguments: dict[str, dict[str, Any]] = { + "modelId": {"type": "ID!", "value": model_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return SincalModelFields( + field_name="sincalModelById", arguments=cleared_arguments + ) @classmethod def sincal_model_config_upload_url( cls, filename: str, file_type: SincalFileType ) -> "GraphQLQuery[UploadUrlResponseFields, UploadUrlResponseGraphQLField]": - pass + """Generate a pre-signed URL to upload a sincal configuration file to the input storage location. Returns the pre-signed URL along with the final file path as it will be referenced by EAS. This does not update the sincal configuration. To make use of a newly uploaded configuration file, pass the `filePath` returned by this query to the `updateSincalModelConfigFilePath()` mutation.""" + arguments: dict[str, dict[str, Any]] = { + "filename": {"type": "String!", "value": filename}, + "fileType": {"type": "SincalFileType!", "value": file_type}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return UploadUrlResponseFields( + field_name="sincalModelConfigUploadUrl", arguments=cleared_arguments + ) @classmethod def sincal_model_global_config( cls, ) -> "GraphQLQuery[SincalGlobalInputsConfigFields, SincalGlobalInputsConfigGraphQLField]": - pass + """Retrieve the current sincalModel input file paths.""" + return SincalGlobalInputsConfigFields(field_name="sincalModelGlobalConfig") @classmethod def sincal_model_preset_by_id( cls, preset_id: str ) -> "GraphQLQuery[SincalModelPresetFields, SincalModelPresetGraphQLField]": - pass + """Retrieve a sincalModel preset by ID""" + arguments: dict[str, dict[str, Any]] = { + "presetId": {"type": "ID!", "value": preset_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return SincalModelPresetFields( + field_name="sincalModelPresetById", arguments=cleared_arguments + ) @classmethod def sincal_model_presets_by_ids( cls, preset_ids: list[str] ) -> "GraphQLQuery[SincalModelPresetFields, SincalModelPresetGraphQLField]": - pass + """Retrieve a list of sincalModel presets by IDs""" + arguments: dict[str, dict[str, Any]] = { + "presetIds": {"type": "[ID!]!", "value": preset_ids} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return SincalModelPresetFields( + field_name="sincalModelPresetsByIds", arguments=cleared_arguments + ) @classmethod def sincal_models_by_ids( cls, model_ids: list[str] ) -> "GraphQLQuery[SincalModelFields, SincalModelGraphQLField]": - pass + """Retrieve a list of sincalModels by IDs""" + arguments: dict[str, dict[str, Any]] = { + "modelIds": {"type": "[ID!]!", "value": model_ids} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return SincalModelFields( + field_name="sincalModelsByIds", arguments=cleared_arguments + ) @classmethod def create_machine_api_key( cls, roles: list[str], token_name: str ) -> "GraphQLQuery[GraphQLField, GraphQLField]": - pass + """Create a new JWT auth token for a machine with the specified roles.""" + arguments: dict[str, dict[str, Any]] = { + "roles": {"type": "[String!]!", "value": roles}, + "tokenName": {"type": "String!", "value": token_name}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField( + field_name="createMachineApiKey", arguments=cleared_arguments + ) @classmethod def create_user_api_key( cls, roles: list[str], token_name: str ) -> "GraphQLQuery[GraphQLField, GraphQLField]": - pass + """Create the JWT auth token for the current user with specified roles.""" + arguments: dict[str, dict[str, Any]] = { + "roles": {"type": "[String!]!", "value": roles}, + "tokenName": {"type": "String!", "value": token_name}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return GraphQLField(field_name="createUserApiKey", arguments=cleared_arguments) @classmethod def get_machine_tokens( cls, ) -> "GraphQLQuery[MachineUserFields, MachineUserGraphQLField]": - pass + """Gets all machine token users with their username and display name.""" + return MachineUserFields(field_name="getMachineTokens") @classmethod def get_public_geo_view_config(cls) -> "GraphQLQuery[GraphQLField, GraphQLField]": - pass + """Retrieve the GeoViewConfig used to config the EWB public map tile endpoint. Returns NUll if not enabled.""" + return GraphQLField(field_name="getPublicGeoViewConfig") @classmethod def get_all_external_roles(cls) -> "GraphQLQuery[GraphQLField, GraphQLField]": - pass + """Get all external roles from EAS.""" + return GraphQLField(field_name="getAllExternalRoles") @classmethod def get_network_models( cls, ) -> "GraphQLQuery[NetworkModelsFields, NetworkModelsGraphQLField]": - pass + """Get all EWB network models""" + return NetworkModelsFields(field_name="getNetworkModels") @classmethod def get_feeder_load_analysis_report_status( cls, report_id: str, full_spec: bool ) -> "GraphQLQuery[FeederLoadAnalysisReportFields, FeederLoadAnalysisReportGraphQLField]": - pass + """Retrieve the status of a feeder load analysis job.""" + arguments: dict[str, dict[str, Any]] = { + "reportId": {"type": "ID!", "value": report_id}, + "fullSpec": {"type": "Boolean!", "value": full_spec}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return FeederLoadAnalysisReportFields( + field_name="getFeederLoadAnalysisReportStatus", arguments=cleared_arguments + ) @classmethod def get_ingestor_run( cls, id: int ) -> "GraphQLQuery[IngestionRunFields, IngestionRunGraphQLField]": - pass + """Retrieve ingestor run details by ID""" + arguments: dict[str, dict[str, Any]] = {"id": {"type": "Int!", "value": id}} + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return IngestionRunFields( + field_name="getIngestorRun", arguments=cleared_arguments + ) @classmethod def list_ingestor_runs( @@ -437,7 +876,17 @@ def list_ingestor_runs( filter_: Optional[IngestorRunsFilterInput] = None, sort: Optional[IngestorRunsSortCriteriaInput] = None, ) -> "GraphQLQuery[IngestionRunFields, IngestionRunGraphQLField]": - pass + """Retrieve all ingestor runs initiated through EAS""" + arguments: dict[str, dict[str, Any]] = { + "filter": {"type": "IngestorRunsFilterInput", "value": filter_}, + "sort": {"type": "IngestorRunsSortCriteriaInput", "value": sort}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return IngestionRunFields( + field_name="listIngestorRuns", arguments=cleared_arguments + ) @classmethod def list_ingestor_runs_paged( @@ -448,7 +897,19 @@ def list_ingestor_runs_paged( filter_: Optional[IngestorRunsFilterInput] = None, sort: Optional[IngestorRunsSortCriteriaInput] = None, ) -> "GraphQLQuery[IngestorRunPageFields, IngestorRunPageGraphQLField]": - pass + """Retrieve all ingestor runs initiated through EAS""" + arguments: dict[str, dict[str, Any]] = { + "limit": {"type": "Int", "value": limit}, + "offset": {"type": "Long", "value": offset}, + "filter": {"type": "IngestorRunsFilterInput", "value": filter_}, + "sort": {"type": "IngestorRunsSortCriteriaInput", "value": sort}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return IngestorRunPageFields( + field_name="listIngestorRunsPaged", arguments=cleared_arguments + ) @classmethod def paged_open_dss_models( @@ -459,46 +920,102 @@ def paged_open_dss_models( filter_: Optional[GetOpenDssModelsFilterInput] = None, sort: Optional[GetOpenDssModelsSortCriteriaInput] = None, ) -> "GraphQLQuery[OpenDssModelPageFields, OpenDssModelPageGraphQLField]": - pass + """Retrieve a page of OpenDSS models, with optional limit and offset, and optional filtering""" + arguments: dict[str, dict[str, Any]] = { + "limit": {"type": "Int", "value": limit}, + "offset": {"type": "Long", "value": offset}, + "filter": {"type": "GetOpenDssModelsFilterInput", "value": filter_}, + "sort": {"type": "GetOpenDssModelsSortCriteriaInput", "value": sort}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return OpenDssModelPageFields( + field_name="pagedOpenDssModels", arguments=cleared_arguments + ) @classmethod def get_user_permitted_customer_list_column_config( cls, ) -> "GraphQLQuery[UserCustomerListColumnConfigFields, UserCustomerListColumnConfigGraphQLField]": - pass + """Fetches the user permitted column configuration for the customer list view.""" + return UserCustomerListColumnConfigFields( + field_name="getUserPermittedCustomerListColumnConfig" + ) @classmethod def get_user_saved_customer_list_column_config( cls, ) -> "GraphQLQuery[UserCustomerListColumnConfigFields, UserCustomerListColumnConfigGraphQLField]": - pass + """Fetches the user's column configuration for the customer list view.""" + return UserCustomerListColumnConfigFields( + field_name="getUserSavedCustomerListColumnConfig" + ) @classmethod def get_customer_list( cls, m_ri_ds: list[str] ) -> "GraphQLQuery[CustomerDetailsResponseFields, CustomerDetailsResponseGraphQLField]": - pass + """Retrieve the list of customers and their details.""" + arguments: dict[str, dict[str, Any]] = { + "mRIDs": {"type": "[String!]!", "value": m_ri_ds} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return CustomerDetailsResponseFields( + field_name="getCustomerList", arguments=cleared_arguments + ) @classmethod def get_customer_list_by_nmis( cls, nmis: list[str] ) -> "GraphQLQuery[CustomerDetailsResponseFields, CustomerDetailsResponseGraphQLField]": - pass + """Retrieve customer details using NMIs as input.""" + arguments: dict[str, dict[str, Any]] = { + "nmis": {"type": "[String!]!", "value": nmis} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return CustomerDetailsResponseFields( + field_name="getCustomerListByNmis", arguments=cleared_arguments + ) @classmethod def get_app_options( cls, ) -> "GraphQLQuery[AppOptionsFields, AppOptionsGraphQLField]": - pass + """Get App Options""" + return AppOptionsFields(field_name="getAppOptions") @classmethod def get_presigned_upload_url_for_variant( cls, filename: str, file_type: VariantFileType ) -> "GraphQLQuery[UploadUrlResponseFields, UploadUrlResponseGraphQLField]": - pass + """Generate a pre-signed URL to upload variant files to the cloud storage. Returns the pre-signed URL along with the final file path as it will be referenced by EAS""" + arguments: dict[str, dict[str, Any]] = { + "filename": {"type": "String!", "value": filename}, + "fileType": {"type": "VariantFileType!", "value": file_type}, + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return UploadUrlResponseFields( + field_name="getPresignedUploadUrlForVariant", arguments=cleared_arguments + ) @classmethod def get_variant_upload_info( cls, job_id: str ) -> "GraphQLQuery[VariantWorkPackageFields, VariantWorkPackageGraphQLField]": - pass + """Retrieves status of a variant ingestion workflow""" + arguments: dict[str, dict[str, Any]] = { + "jobID": {"type": "String!", "value": job_id} + } + cleared_arguments = { + key: value for key, value in arguments.items() if value["value"] is not None + } + return VariantWorkPackageFields( + field_name="getVariantUploadInfo", arguments=cleared_arguments + ) diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_queries.pyi b/src/zepben/eas/lib/generated_graphql_client/custom_queries.pyi deleted file mode 100644 index 67d94b8..0000000 --- a/src/zepben/eas/lib/generated_graphql_client/custom_queries.pyi +++ /dev/null @@ -1,264 +0,0 @@ -from typing import Any, Optional -from .custom_fields import AppOptionsFields, CustomerDetailsResponseFields, DurationCurveByTerminalFields, FeederLoadAnalysisReportFields, GqlTxTapRecordFields, GqlUserFields, GqlUserResponseFields, HcCalibrationFields, HcScenarioConfigsPageFields, HcWorkPackageFields, HcWorkPackagePageFields, IngestionJobFields, IngestionRunFields, IngestorRunPageFields, JobSourceFields, MachineUserFields, MetricFields, NetworkModelsFields, OpenDssModelPageFields, OpportunitiesByYearFields, OpportunityFields, OpportunityLocationFields, PowerFactoryModelFields, PowerFactoryModelPageFields, PowerFactoryModelTemplateFields, PowerFactoryModelTemplatePageFields, ProcessedDiffFields, ProcessedDiffPageFields, SincalGlobalInputsConfigFields, SincalModelFields, SincalModelPageFields, SincalModelPresetFields, SincalModelPresetPageFields, StudyFields, StudyPageFields, StudyResultFields, UploadUrlResponseFields, UserCustomerListColumnConfigFields, VariantWorkPackageFields, WorkPackageTreeFields -from .custom_typing_fields import GraphQLField -from .input_types import GetOpenDssModelsFilterInput, GetOpenDssModelsSortCriteriaInput, GetPowerFactoryModelsFilterInput, GetPowerFactoryModelsSortCriteriaInput, GetPowerFactoryModelTemplatesFilterInput, GetPowerFactoryModelTemplatesSortCriteriaInput, GetSincalModelPresetsFilterInput, GetSincalModelPresetsSortCriteriaInput, GetSincalModelsFilterInput, GetSincalModelsSortCriteriaInput, GetStudiesFilterInput, GetStudiesSortCriteriaInput, HcScenarioConfigsFilterInput, HcWorkPackagesFilterInput, HcWorkPackagesSortCriteriaInput, IngestorRunsFilterInput, IngestorRunsSortCriteriaInput, ProcessedDiffFilterInput, ProcessedDiffSortCriteriaInput, WorkPackageInput -from . import HostingCapacityFileType, ContainerType, SincalFileType, VariantFileType, WorkflowStatus -from zepben.eas import StudyPageGraphQLField, StudyResultGraphQLField, StudyGraphQLField, StudyGraphQLField, GraphQLField, GqlUserResponseGraphQLField, PowerFactoryModelTemplatePageGraphQLField, PowerFactoryModelPageGraphQLField, PowerFactoryModelGraphQLField, PowerFactoryModelTemplateGraphQLField, PowerFactoryModelTemplateGraphQLField, PowerFactoryModelGraphQLField, GraphQLField, GqlUserGraphQLField, HcCalibrationGraphQLField, GraphQLField, DurationCurveByTerminalGraphQLField, OpportunitiesByYearGraphQLField, OpportunityGraphQLField, OpportunityGraphQLField, OpportunityLocationGraphQLField, HcScenarioConfigsPageGraphQLField, GqlTxTapRecordGraphQLField, HcWorkPackageGraphQLField, GraphQLField, WorkPackageTreeGraphQLField, HcWorkPackagePageGraphQLField, UploadUrlResponseGraphQLField, HcCalibrationGraphQLField, ProcessedDiffGraphQLField, ProcessedDiffPageGraphQLField, IngestionJobGraphQLField, GraphQLField, MetricGraphQLField, IngestionJobGraphQLField, JobSourceGraphQLField, SincalModelPresetPageGraphQLField, SincalModelPageGraphQLField, SincalModelGraphQLField, UploadUrlResponseGraphQLField, SincalGlobalInputsConfigGraphQLField, SincalModelPresetGraphQLField, SincalModelPresetGraphQLField, SincalModelGraphQLField, GraphQLField, GraphQLField, MachineUserGraphQLField, GraphQLField, GraphQLField, NetworkModelsGraphQLField, FeederLoadAnalysisReportGraphQLField, IngestionRunGraphQLField, IngestionRunGraphQLField, IngestorRunPageGraphQLField, OpenDssModelPageGraphQLField, UserCustomerListColumnConfigGraphQLField, UserCustomerListColumnConfigGraphQLField, CustomerDetailsResponseGraphQLField, CustomerDetailsResponseGraphQLField, AppOptionsGraphQLField, UploadUrlResponseGraphQLField, VariantWorkPackageGraphQLField -from typing import Generic, TypeVar -TGraphQLQueryField = TypeVar('TGraphQLQueryField') -TGraphQLField = TypeVar('TGraphQLField') - -class GraphQLQuery(Generic[TGraphQLQueryField, TGraphQLField]): - - def fields(self, *fields: TGraphQLField): - ... - -class Query: - - @classmethod - def paged_studies(cls, *, limit: Optional[int]=None, offset: Optional[Any]=None, filter_: Optional[GetStudiesFilterInput]=None, sort: Optional[GetStudiesSortCriteriaInput]=None) -> "GraphQLQuery[StudyPageFields, StudyPageGraphQLField]": - pass - - @classmethod - def results_by_id(cls, ids: list[str]) -> "GraphQLQuery[StudyResultFields, StudyResultGraphQLField]": - pass - - @classmethod - def studies(cls, *, filter_: Optional[GetStudiesFilterInput]=None) -> "GraphQLQuery[StudyFields, StudyGraphQLField]": - pass - - @classmethod - def studies_by_id(cls, ids: list[str]) -> "GraphQLQuery[StudyFields, StudyGraphQLField]": - pass - - @classmethod - def styles_by_id(cls, ids: list[str]) -> "GraphQLQuery[GraphQLField, GraphQLField]": - pass - - @classmethod - def current_user(cls) -> "GraphQLQuery[GqlUserResponseFields, GqlUserResponseGraphQLField]": - pass - - @classmethod - def paged_power_factory_model_templates(cls, *, limit: Optional[int]=None, offset: Optional[Any]=None, filter_: Optional[GetPowerFactoryModelTemplatesFilterInput]=None, sort: Optional[GetPowerFactoryModelTemplatesSortCriteriaInput]=None) -> "GraphQLQuery[PowerFactoryModelTemplatePageFields, PowerFactoryModelTemplatePageGraphQLField]": - pass - - @classmethod - def paged_power_factory_models(cls, *, limit: Optional[int]=None, offset: Optional[Any]=None, filter_: Optional[GetPowerFactoryModelsFilterInput]=None, sort: Optional[GetPowerFactoryModelsSortCriteriaInput]=None) -> "GraphQLQuery[PowerFactoryModelPageFields, PowerFactoryModelPageGraphQLField]": - pass - - @classmethod - def power_factory_model_by_id(cls, model_id: str) -> "GraphQLQuery[PowerFactoryModelFields, PowerFactoryModelGraphQLField]": - pass - - @classmethod - def power_factory_model_template_by_id(cls, template_id: str) -> "GraphQLQuery[PowerFactoryModelTemplateFields, PowerFactoryModelTemplateGraphQLField]": - pass - - @classmethod - def power_factory_model_templates_by_ids(cls, template_ids: list[str]) -> "GraphQLQuery[PowerFactoryModelTemplateFields, PowerFactoryModelTemplateGraphQLField]": - pass - - @classmethod - def power_factory_models_by_ids(cls, model_ids: list[str]) -> "GraphQLQuery[PowerFactoryModelFields, PowerFactoryModelGraphQLField]": - pass - - @classmethod - def get_active_work_packages(cls) -> "GraphQLQuery[GraphQLField, GraphQLField]": - pass - - @classmethod - def get_all_work_packages_authors(cls) -> "GraphQLQuery[GqlUserFields, GqlUserGraphQLField]": - pass - - @classmethod - def get_calibration_run(cls, id: str) -> "GraphQLQuery[HcCalibrationFields, HcCalibrationGraphQLField]": - pass - - @classmethod - def get_calibration_sets(cls) -> "GraphQLQuery[GraphQLField, GraphQLField]": - pass - - @classmethod - def get_duration_curves(cls, work_package_id: str, scenario: str, feeder: str, year: int, conducting_equipment_mrid: str) -> "GraphQLQuery[DurationCurveByTerminalFields, DurationCurveByTerminalGraphQLField]": - pass - - @classmethod - def get_opportunities(cls, *, year: Optional[int]=None) -> "GraphQLQuery[OpportunitiesByYearFields, OpportunitiesByYearGraphQLField]": - pass - - @classmethod - def get_opportunities_for_equipment(cls, m_rid: str) -> "GraphQLQuery[OpportunityFields, OpportunityGraphQLField]": - pass - - @classmethod - def get_opportunity(cls, id: str) -> "GraphQLQuery[OpportunityFields, OpportunityGraphQLField]": - pass - - @classmethod - def get_opportunity_locations(cls, *, year: Optional[int]=None) -> "GraphQLQuery[OpportunityLocationFields, OpportunityLocationGraphQLField]": - pass - - @classmethod - def get_scenario_configurations(cls, *, limit: Optional[int]=None, offset: Optional[Any]=None, filter_: Optional[HcScenarioConfigsFilterInput]=None) -> "GraphQLQuery[HcScenarioConfigsPageFields, HcScenarioConfigsPageGraphQLField]": - pass - - @classmethod - def get_transformer_tap_settings(cls, calibration_name: str, *, feeder: Optional[str]=None, transformer_mrid: Optional[str]=None) -> "GraphQLQuery[GqlTxTapRecordFields, GqlTxTapRecordGraphQLField]": - pass - - @classmethod - def get_work_package_by_id(cls, id: str, *, with_groupings: Optional[bool]=None) -> "GraphQLQuery[HcWorkPackageFields, HcWorkPackageGraphQLField]": - pass - - @classmethod - def get_work_package_cost_estimation(cls, input: WorkPackageInput) -> "GraphQLQuery[GraphQLField, GraphQLField]": - pass - - @classmethod - def get_work_package_tree(cls, id: str) -> "GraphQLQuery[WorkPackageTreeFields, WorkPackageTreeGraphQLField]": - pass - - @classmethod - def get_work_packages(cls, *, limit: Optional[int]=None, offset: Optional[Any]=None, filter_: Optional[HcWorkPackagesFilterInput]=None, sort: Optional[HcWorkPackagesSortCriteriaInput]=None, with_groupings: Optional[bool]=None) -> "GraphQLQuery[HcWorkPackagePageFields, HcWorkPackagePageGraphQLField]": - pass - - @classmethod - def hosting_capacity_file_upload_url(cls, filename: str, file_type: HostingCapacityFileType) -> "GraphQLQuery[UploadUrlResponseFields, UploadUrlResponseGraphQLField]": - pass - - @classmethod - def list_calibration_runs(cls, *, name: Optional[str]=None, calibration_time: Optional[Any]=None, status: Optional[WorkflowStatus]=None) -> "GraphQLQuery[HcCalibrationFields, HcCalibrationGraphQLField]": - pass - - @classmethod - def get_processed_diff(cls, diff_id: str) -> "GraphQLQuery[ProcessedDiffFields, ProcessedDiffGraphQLField]": - pass - - @classmethod - def get_processed_diffs(cls, *, limit: Optional[int]=None, offset: Optional[Any]=None, filter_: Optional[ProcessedDiffFilterInput]=None, sort: Optional[ProcessedDiffSortCriteriaInput]=None) -> "GraphQLQuery[ProcessedDiffPageFields, ProcessedDiffPageGraphQLField]": - pass - - @classmethod - def get_all_jobs(cls) -> "GraphQLQuery[IngestionJobFields, IngestionJobGraphQLField]": - pass - - @classmethod - def get_distinct_metric_names(cls, job_id: str) -> "GraphQLQuery[GraphQLField, GraphQLField]": - pass - - @classmethod - def get_metrics(cls, job_id: str, container_type: ContainerType, container_id: str) -> "GraphQLQuery[MetricFields, MetricGraphQLField]": - pass - - @classmethod - def get_newest_job(cls) -> "GraphQLQuery[IngestionJobFields, IngestionJobGraphQLField]": - pass - - @classmethod - def get_sources(cls, job_id: str) -> "GraphQLQuery[JobSourceFields, JobSourceGraphQLField]": - pass - - @classmethod - def paged_sincal_model_presets(cls, *, limit: Optional[int]=None, offset: Optional[Any]=None, filter_: Optional[GetSincalModelPresetsFilterInput]=None, sort: Optional[GetSincalModelPresetsSortCriteriaInput]=None) -> "GraphQLQuery[SincalModelPresetPageFields, SincalModelPresetPageGraphQLField]": - pass - - @classmethod - def paged_sincal_models(cls, *, limit: Optional[int]=None, offset: Optional[Any]=None, filter_: Optional[GetSincalModelsFilterInput]=None, sort: Optional[GetSincalModelsSortCriteriaInput]=None) -> "GraphQLQuery[SincalModelPageFields, SincalModelPageGraphQLField]": - pass - - @classmethod - def sincal_model_by_id(cls, model_id: str) -> "GraphQLQuery[SincalModelFields, SincalModelGraphQLField]": - pass - - @classmethod - def sincal_model_config_upload_url(cls, filename: str, file_type: SincalFileType) -> "GraphQLQuery[UploadUrlResponseFields, UploadUrlResponseGraphQLField]": - pass - - @classmethod - def sincal_model_global_config(cls) -> "GraphQLQuery[SincalGlobalInputsConfigFields, SincalGlobalInputsConfigGraphQLField]": - pass - - @classmethod - def sincal_model_preset_by_id(cls, preset_id: str) -> "GraphQLQuery[SincalModelPresetFields, SincalModelPresetGraphQLField]": - pass - - @classmethod - def sincal_model_presets_by_ids(cls, preset_ids: list[str]) -> "GraphQLQuery[SincalModelPresetFields, SincalModelPresetGraphQLField]": - pass - - @classmethod - def sincal_models_by_ids(cls, model_ids: list[str]) -> "GraphQLQuery[SincalModelFields, SincalModelGraphQLField]": - pass - - @classmethod - def create_machine_api_key(cls, roles: list[str], token_name: str) -> "GraphQLQuery[GraphQLField, GraphQLField]": - pass - - @classmethod - def create_user_api_key(cls, roles: list[str], token_name: str) -> "GraphQLQuery[GraphQLField, GraphQLField]": - pass - - @classmethod - def get_machine_tokens(cls) -> "GraphQLQuery[MachineUserFields, MachineUserGraphQLField]": - pass - - @classmethod - def get_public_geo_view_config(cls) -> "GraphQLQuery[GraphQLField, GraphQLField]": - pass - - @classmethod - def get_all_external_roles(cls) -> "GraphQLQuery[GraphQLField, GraphQLField]": - pass - - @classmethod - def get_network_models(cls) -> "GraphQLQuery[NetworkModelsFields, NetworkModelsGraphQLField]": - pass - - @classmethod - def get_feeder_load_analysis_report_status(cls, report_id: str, full_spec: bool) -> "GraphQLQuery[FeederLoadAnalysisReportFields, FeederLoadAnalysisReportGraphQLField]": - pass - - @classmethod - def get_ingestor_run(cls, id: int) -> "GraphQLQuery[IngestionRunFields, IngestionRunGraphQLField]": - pass - - @classmethod - def list_ingestor_runs(cls, *, filter_: Optional[IngestorRunsFilterInput]=None, sort: Optional[IngestorRunsSortCriteriaInput]=None) -> "GraphQLQuery[IngestionRunFields, IngestionRunGraphQLField]": - pass - - @classmethod - def list_ingestor_runs_paged(cls, *, limit: Optional[int]=None, offset: Optional[Any]=None, filter_: Optional[IngestorRunsFilterInput]=None, sort: Optional[IngestorRunsSortCriteriaInput]=None) -> "GraphQLQuery[IngestorRunPageFields, IngestorRunPageGraphQLField]": - pass - - @classmethod - def paged_open_dss_models(cls, *, limit: Optional[int]=None, offset: Optional[Any]=None, filter_: Optional[GetOpenDssModelsFilterInput]=None, sort: Optional[GetOpenDssModelsSortCriteriaInput]=None) -> "GraphQLQuery[OpenDssModelPageFields, OpenDssModelPageGraphQLField]": - pass - - @classmethod - def get_user_permitted_customer_list_column_config(cls) -> "GraphQLQuery[UserCustomerListColumnConfigFields, UserCustomerListColumnConfigGraphQLField]": - pass - - @classmethod - def get_user_saved_customer_list_column_config(cls) -> "GraphQLQuery[UserCustomerListColumnConfigFields, UserCustomerListColumnConfigGraphQLField]": - pass - - @classmethod - def get_customer_list(cls, m_ri_ds: list[str]) -> "GraphQLQuery[CustomerDetailsResponseFields, CustomerDetailsResponseGraphQLField]": - pass - - @classmethod - def get_customer_list_by_nmis(cls, nmis: list[str]) -> "GraphQLQuery[CustomerDetailsResponseFields, CustomerDetailsResponseGraphQLField]": - pass - - @classmethod - def get_app_options(cls) -> "GraphQLQuery[AppOptionsFields, AppOptionsGraphQLField]": - pass - - @classmethod - def get_presigned_upload_url_for_variant(cls, filename: str, file_type: VariantFileType) -> "GraphQLQuery[UploadUrlResponseFields, UploadUrlResponseGraphQLField]": - pass - - @classmethod - def get_variant_upload_info(cls, job_id: str) -> "GraphQLQuery[VariantWorkPackageFields, VariantWorkPackageGraphQLField]": - pass \ No newline at end of file diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_queries.pyi.bak b/src/zepben/eas/lib/generated_graphql_client/custom_queries.pyi.bak deleted file mode 100644 index 341d379..0000000 --- a/src/zepben/eas/lib/generated_graphql_client/custom_queries.pyi.bak +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2026 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. -from _typeshed import Incomplete -from typing import Generic, TypeVar, Optional - -from zepben.eas import IngestionRunGraphQLField, IngestionRunFields, IngestorRunsFilterInput, IngestorRunsSortCriteriaInput - -TGraphQLQueryField = TypeVar("TGraphQLQueryField") -TGraphQLField = TypeVar("TGraphQLField") - -def __getattr__(name) -> Incomplete: ... - -class GraphQLQuery(Generic[TGraphQLQueryField, TGraphQLField]): - def fields(self, *fields: TGraphQLField): ... - - -class Query: - def __getattr__(self, name: str) -> Incomplete: ... - - @classmethod - def list_ingestor_runs(cls, *, filter_: Optional[IngestorRunsFilterInput] = None, sort: Optional[IngestorRunsSortCriteriaInput] = None ) -> GraphQLQuery[IngestionRunFields, IngestionRunGraphQLField]: ... diff --git a/test/test_client_generation.py b/test/test_client_generation.py index 02d67c4..d663d6b 100644 --- a/test/test_client_generation.py +++ b/test/test_client_generation.py @@ -6,6 +6,7 @@ import ast import pytest +@pytest.mark.skip("deleteme") @pytest.mark.asyncio async def test_do_things(): from zepben.eas.lib.generated_graphql_client import custom_queries From 66b2dbe4351a20eb5e412fa94f934ddccbd5a53e Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Wed, 1 Apr 2026 15:08:42 +1100 Subject: [PATCH 29/32] move monkey patched code into plugin spec format - depends on PR in code gen lib Signed-off-by: Max Chesterfield --- pyproject.toml | 6 +- src/zepben/eas/__init__.py | 14 +- src/zepben/eas/client/decorators.py | 13 +- src/zepben/eas/client/eas_client.py | 82 ++--- .../lib/ariadne_plugins/base_operation.pyi | 23 -- .../custom_query_type_hinter.py | 59 +-- .../eas/lib/ariadne_plugins/gql_all_fields.py | 45 +++ .../ariadne_plugins/missed_import_checker.py | 23 ++ .../async_base_client.py | 343 ++++-------------- .../generated_graphql_client/base_model.py | 17 +- .../base_operation.py | 110 ++---- .../base_operation.pyi | 23 -- .../custom_queries.py | 93 ++--- .../generated_graphql_client/exceptions.py | 54 +-- 14 files changed, 310 insertions(+), 595 deletions(-) delete mode 100644 src/zepben/eas/lib/ariadne_plugins/base_operation.pyi create mode 100644 src/zepben/eas/lib/ariadne_plugins/gql_all_fields.py create mode 100644 src/zepben/eas/lib/ariadne_plugins/missed_import_checker.py delete mode 100644 src/zepben/eas/lib/generated_graphql_client/base_operation.pyi diff --git a/pyproject.toml b/pyproject.toml index dd478bc..e455fba 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,7 +49,7 @@ test = [ "pytest-httpserver==1.0.8", "trustme==0.9.0" ] -eas-codegen = [ +codegen = [ "ariadne-codegen @ git+https://github.com/chestm007/ariadne-codegen" # This could break a pypi upload. Waiting on https://github.com/mirumee/ariadne-codegen/pull/413 to be merged. ] @@ -65,5 +65,7 @@ target_package_name='generated_graphql_client' introspection_descriptions=true introspection_input_value_deprecations=true plugins=[ - "zepben.eas.lib.ariadne_plugins.custom_query_type_hinter.CustomQueryTypeHinterPlugin" + "zepben.eas.lib.ariadne_plugins.custom_query_type_hinter.CustomQueryTypeHinterPlugin", + "zepben.eas.lib.ariadne_plugins.missed_import_checker.MissedImportCheckerPlugin", + "zepben.eas.lib.ariadne_plugins.gql_all_fields.GqlAllFieldsPlugin" ] diff --git a/src/zepben/eas/__init__.py b/src/zepben/eas/__init__.py index a6b3ea5..860a1aa 100644 --- a/src/zepben/eas/__init__.py +++ b/src/zepben/eas/__init__.py @@ -4,11 +4,17 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. # +import warnings from zepben.eas.client.eas_client import * from zepben.eas.client.enums import * -from zepben.eas.lib.generated_graphql_client import * -from zepben.eas.lib.generated_graphql_client.custom_mutations import * -from zepben.eas.lib.generated_graphql_client.custom_queries import * -from zepben.eas.lib.generated_graphql_client.custom_fields import * +try: + from zepben.eas.lib.generated_graphql_client import * + from zepben.eas.lib.generated_graphql_client.custom_mutations import * + from zepben.eas.lib.generated_graphql_client.custom_queries import * + from zepben.eas.lib.generated_graphql_client.custom_fields import * +except ImportError: + warnings.warn( + "Could not import `zepben.eas.lib.generated_graphql_client`. " + ) diff --git a/src/zepben/eas/client/decorators.py b/src/zepben/eas/client/decorators.py index 8809272..47c1278 100644 --- a/src/zepben/eas/client/decorators.py +++ b/src/zepben/eas/client/decorators.py @@ -4,12 +4,12 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. -__all__ = ['catch_warnings', 'async_func', 'opt_in', "add_method_to"] +__all__ = ['catch_warnings', 'async_func', 'opt_in'] import asyncio import functools import warnings -from typing import Callable, ParamSpec, TypeVar, cast +from typing import Callable def catch_warnings(func: Callable) -> Callable: @@ -40,15 +40,6 @@ def wrapper(self, *args, **kwargs): return asyncio.run(func(self, *args, **kwargs)) return wrapper -def add_method_to(class_to_extend: type) -> Callable: - """ - - :rtype: Callable - """ - def decorator(func: Callable): - setattr(class_to_extend, func.__name__, func) - return decorator - def opt_in(func: Callable) -> Callable: @functools.wraps(func) def wrapper(self, *args, **kwargs): diff --git a/src/zepben/eas/client/eas_client.py b/src/zepben/eas/client/eas_client.py index 5f8505b..565b820 100644 --- a/src/zepben/eas/client/eas_client.py +++ b/src/zepben/eas/client/eas_client.py @@ -9,34 +9,38 @@ __all__ = ["EasClient"] import sys - +import warnings if sys.version_info < (3, 13): from typing_extensions import deprecated else: from warnings import deprecated -import inspect import ssl from datetime import datetime from http import HTTPStatus -from types import MethodType -from typing import Any, Generator, cast, TypeVar, TYPE_CHECKING +from typing import Any, cast, TypeVar, TYPE_CHECKING import httpx from graphql import OperationType -from zepben.eas.client.decorators import async_func, catch_warnings, opt_in, add_method_to +from zepben.eas.client.decorators import async_func, catch_warnings, opt_in + +try: + from zepben.eas.lib.generated_graphql_client import GraphQLClientHttpError, GraphQLClientInvalidResponseError, \ + GraphQLClientGraphQLMultiError, Client, WorkPackageInput, FeederLoadAnalysisInput, StudyInput, IngestorConfigInput, \ + IngestorRunsFilterInput, IngestorRunsSortCriteriaInput, HcGeneratorConfigInput, HcModelConfigInput, OpenDssModelInput, \ + GetOpenDssModelsFilterInput, GetOpenDssModelsSortCriteriaInput + from zepben.eas.lib.generated_graphql_client.base_operation import GraphQLField + from zepben.eas.lib.generated_graphql_client.custom_fields import FeederLoadAnalysisReportFields, IngestionRunFields, \ + HcCalibrationFields, GqlTxTapRecordFields, OpenDssModelPageFields, OpenDssModelFields + from zepben.eas.lib.generated_graphql_client.custom_mutations import Mutation + from zepben.eas.lib.generated_graphql_client.custom_queries import Query + -from zepben.eas.lib.generated_graphql_client import GraphQLClientHttpError, GraphQLClientInvalidResponseError, \ - GraphQLClientGraphQLMultiError, Client, WorkPackageInput, FeederLoadAnalysisInput, StudyInput, IngestorConfigInput, \ - IngestorRunsFilterInput, IngestorRunsSortCriteriaInput, HcGeneratorConfigInput, HcModelConfigInput, OpenDssModelInput, \ - GetOpenDssModelsFilterInput, GetOpenDssModelsSortCriteriaInput -from zepben.eas.lib.generated_graphql_client.base_operation import GraphQLField -from zepben.eas.lib.generated_graphql_client.custom_fields import FeederLoadAnalysisReportFields, IngestionRunFields, \ - HcCalibrationFields, GqlTxTapRecordFields, OpenDssModelPageFields, OpenDssModelFields -from zepben.eas.lib.generated_graphql_client.custom_mutations import Mutation -from zepben.eas.lib.generated_graphql_client.custom_queries import Query +except ImportError: + warnings.warn("could not import generated graphql client.") + Client = object if TYPE_CHECKING: from zepben.eas import GraphQLQuery @@ -45,31 +49,6 @@ R = TypeVar("R") -# noinspection PyDecorator,PyNestedDecorators -@add_method_to(GraphQLField) -@classmethod -def all_fields(cls) -> Generator[GraphQLField | MethodType, None, None]: - """ - returns a generator over all ``GraphQLField``s that a given class returns - - :param cls: class to check - :return: generator over all GraphQLField's in a given class - """ - for k in dir(cls): - # we only want "public" attrs. - if k.startswith("_"): - continue - # obviously we don't want to return ourselves. - if k == "all_fields": - continue - - v = getattr(cls, k) - if isinstance(v, GraphQLField): - yield v - elif inspect.ismethod(v): - yield v().fields(*v().all_fields()) - - class EasClient(Client): """ A class used to represent a client to the Evolve App Server, with methods that represent requests to its API. @@ -526,19 +505,18 @@ def get_paged_opendss_models( offset=offset, filter_=query_filter, sort=query_sort - ).fields( - OpenDssModelPageFields.total_count, - OpenDssModelPageFields.offset, - OpenDssModelPageFields.models().fields( - OpenDssModelFields.id, - OpenDssModelFields.name, - OpenDssModelFields.created_at, - OpenDssModelFields.state, - OpenDssModelFields.download_url, - OpenDssModelFields.is_public, - OpenDssModelFields.errors, - OpenDssModelFields.generation_spec - ), + ), + OpenDssModelPageFields.total_count, + OpenDssModelPageFields.offset, + OpenDssModelPageFields.models().fields( + OpenDssModelFields.id, + OpenDssModelFields.name, + OpenDssModelFields.created_at, + OpenDssModelFields.state, + OpenDssModelFields.download_url, + OpenDssModelFields.is_public, + OpenDssModelFields.errors, + OpenDssModelFields.generation_spec ), ) diff --git a/src/zepben/eas/lib/ariadne_plugins/base_operation.pyi b/src/zepben/eas/lib/ariadne_plugins/base_operation.pyi deleted file mode 100644 index 7a3a603..0000000 --- a/src/zepben/eas/lib/ariadne_plugins/base_operation.pyi +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2026 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. - -from _typeshed import Incomplete -from types import MethodType -from typing import Generator - -def __getattr__(name) -> Incomplete: ... - -class GraphQLField: - def __getattr__(self, name: str) -> Incomplete: ... - - @classmethod - def all_fields(cls) -> Generator[GraphQLField | MethodType, None, None]: - """ - Returns a generator over all ``GraphQLField``s that a given class returns - - :param cls: class to check - :return: generator over all GraphQLField's in a given class - """ diff --git a/src/zepben/eas/lib/ariadne_plugins/custom_query_type_hinter.py b/src/zepben/eas/lib/ariadne_plugins/custom_query_type_hinter.py index 014eb68..f328b46 100644 --- a/src/zepben/eas/lib/ariadne_plugins/custom_query_type_hinter.py +++ b/src/zepben/eas/lib/ariadne_plugins/custom_query_type_hinter.py @@ -5,48 +5,29 @@ # file, You can obtain one at https://mozilla.org/MPL/2.0/. import ast -from _ast import ImportFrom -from typing import Callable, Any +from _ast import ClassDef from ariadne_codegen.plugins.base import Plugin class CustomQueryTypeHinterPlugin(Plugin): - def __init__(self, *args, **kwargs) -> None: - super().__init__(*args, **kwargs) - self._current_class = None - def generate_custom_method(self, method: ast.FunctionDef) -> ast.FunctionDef: - return method - - def generate_custom_module(self, imports: list[ImportFrom], type_imports: list[ImportFrom], class_defs: list[ast.ClassDef]) -> tuple[list[ImportFrom], list[ImportFrom], list[ast.ClassDef]]: - # 1. Target a specific class (e.g., the root 'Query' result) - if class_defs[0].name == "Query": - for method in class_defs[0].body: - injected_type = method.returns.id.replace("Fields", "GraphQLField") - method.returns = ast.Name( - f'\"GraphQLQuery[{method.returns.id}, {injected_type}]\"' - ) - imports.extend( - [ - ImportFrom('.custom_typing_fields', [ast.alias(injected_type)], level=0), - ImportFrom('zepben.eas.lib.ariadne_plugins.types', [ - ast.alias('GraphQLQuery') - ], level=0) - ] - ) - - return imports, type_imports, class_defs - - def generate_client_import(self, import_: ast.ImportFrom) -> ast.ImportFrom: - if (iname := import_.names[0].name) in ( - 'SincalFileType', - 'VariantFileType', - 'ContainerType', - 'HostingCapacityFileType', - 'WorkflowStatus', - ): - if import_.module is None: - print(f"[ZBEX] Assuming class import {iname} is from module 'enums.py'") - import_.module = 'enums' - return import_ + def generate_custom_module(self, module: ast.Module) -> ast.Module: + for b in module.body: + if isinstance((class_def := b), ClassDef): + if class_def.name == "Query": + for method in class_def.body: + injected_type = method.returns.id.replace("Fields", "GraphQLField") + method.returns = ast.Name( + f'\"GraphQLQuery[{method.returns.id}, {injected_type}]\"' + ) + module.body.extend( + [ + ast.ImportFrom('.custom_typing_fields', [ast.alias(injected_type)], level=0), + ast.ImportFrom('zepben.eas.lib.ariadne_plugins.types', [ + ast.alias('GraphQLQuery') + ], level=0) + ] + ) + + return module diff --git a/src/zepben/eas/lib/ariadne_plugins/gql_all_fields.py b/src/zepben/eas/lib/ariadne_plugins/gql_all_fields.py new file mode 100644 index 0000000..0f2de43 --- /dev/null +++ b/src/zepben/eas/lib/ariadne_plugins/gql_all_fields.py @@ -0,0 +1,45 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. +import ast +from ariadne_codegen import Plugin + + +gql_field_all_fields_ast = ast.parse(""" +@classmethod +def all_fields(cls) -> "Generator[GraphQLField | MethodType, None, None]": + \"\"\" + returns a generator over all ``GraphQLField``s that a given class returns + + :param cls: class to check + :return: generator over all GraphQLField's in a given class + \"\"\" + import inspect + + for k in dir(cls): + # we only want "public" attrs. + if k.startswith("_"): + continue + # obviously we don't want to return ourselves. + if k == "all_fields": + continue + + v = getattr(cls, k) + if isinstance(v, GraphQLField): + yield v + elif inspect.ismethod(v): + yield v().fields(*v().all_fields()) +""").body[0] + +class GqlAllFieldsPlugin(Plugin): + + def copy_code(self, copied_code: str) -> str: + code_as_ast = ast.parse(copied_code) + for b in code_as_ast.body: + if isinstance(class_def := b, ast.ClassDef): + if class_def.name == "GraphQLField": + class_def.body.append(gql_field_all_fields_ast) + return ast.unparse(code_as_ast) + diff --git a/src/zepben/eas/lib/ariadne_plugins/missed_import_checker.py b/src/zepben/eas/lib/ariadne_plugins/missed_import_checker.py new file mode 100644 index 0000000..450e330 --- /dev/null +++ b/src/zepben/eas/lib/ariadne_plugins/missed_import_checker.py @@ -0,0 +1,23 @@ +# Copyright 2026 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. +import ast +from ariadne_codegen import Plugin + + +class MissedImportCheckerPlugin(Plugin): + + def generate_client_import(self, import_: ast.ImportFrom) -> ast.ImportFrom: + if (iname := import_.names[0].name) in ( + 'SincalFileType', + 'VariantFileType', + 'ContainerType', + 'HostingCapacityFileType', + 'WorkflowStatus', + ): + if import_.module is None: + print(f"[ZBEX] Assuming class import {iname} is from module 'enums.py'") + import_.module = 'enums' + return import_ diff --git a/src/zepben/eas/lib/generated_graphql_client/async_base_client.py b/src/zepben/eas/lib/generated_graphql_client/async_base_client.py index c642b77..3f23742 100644 --- a/src/zepben/eas/lib/generated_graphql_client/async_base_client.py +++ b/src/zepben/eas/lib/generated_graphql_client/async_base_client.py @@ -4,81 +4,47 @@ from collections.abc import AsyncIterator from typing import IO, Any, Optional, TypeVar, cast from uuid import uuid4 - import httpx from pydantic import BaseModel from pydantic_core import to_jsonable_python - from .base_model import UNSET, Upload -from .exceptions import ( - GraphQLClientError, - GraphQLClientGraphQLMultiError, - GraphQLClientHttpError, - GraphQLClientInvalidMessageFormat, - GraphQLClientInvalidResponseError, -) - +from .exceptions import GraphQLClientError, GraphQLClientGraphQLMultiError, GraphQLClientHttpError, GraphQLClientInvalidMessageFormat, GraphQLClientInvalidResponseError try: - from websockets import ( # type: ignore[import-not-found,unused-ignore] - ClientConnection, - ) - from websockets import ( # type: ignore[import-not-found,unused-ignore] - connect as ws_connect, - ) - from websockets.typing import ( # type: ignore[import-not-found,unused-ignore] - Data, - Origin, - Subprotocol, - ) + from websockets import ClientConnection + from websockets import connect as ws_connect + from websockets.typing import Data, Origin, Subprotocol except ImportError: from contextlib import asynccontextmanager - @asynccontextmanager # type: ignore + @asynccontextmanager async def ws_connect(*args, **kwargs): raise NotImplementedError("Subscriptions require 'websockets' package.") yield + ClientConnection = Any + Data = Any + Origin = Any - ClientConnection = Any # type: ignore[misc,assignment,unused-ignore] - Data = Any # type: ignore[misc,assignment,unused-ignore] - Origin = Any # type: ignore[misc,assignment,unused-ignore] - - def Subprotocol(*args, **kwargs): # type: ignore # noqa: N802, N803 + def Subprotocol(*args, **kwargs): raise NotImplementedError("Subscriptions require 'websockets' package.") - - -Self = TypeVar("Self", bound="AsyncBaseClient") - -GRAPHQL_TRANSPORT_WS = "graphql-transport-ws" - +Self = TypeVar('Self', bound='AsyncBaseClient') +GRAPHQL_TRANSPORT_WS = 'graphql-transport-ws' class GraphQLTransportWSMessageType(str, enum.Enum): - CONNECTION_INIT = "connection_init" - CONNECTION_ACK = "connection_ack" - PING = "ping" - PONG = "pong" - SUBSCRIBE = "subscribe" - NEXT = "next" - ERROR = "error" - COMPLETE = "complete" - + CONNECTION_INIT = 'connection_init' + CONNECTION_ACK = 'connection_ack' + PING = 'ping' + PONG = 'pong' + SUBSCRIBE = 'subscribe' + NEXT = 'next' + ERROR = 'error' + COMPLETE = 'complete' class AsyncBaseClient: - def __init__( - self, - url: str = "", - headers: Optional[dict[str, str]] = None, - http_client: Optional[httpx.AsyncClient] = None, - ws_url: str = "", - ws_headers: Optional[dict[str, Any]] = None, - ws_origin: Optional[str] = None, - ws_connection_init_payload: Optional[dict[str, Any]] = None, - ) -> None: + + def __init__(self, url: str='', headers: Optional[dict[str, str]]=None, http_client: Optional[httpx.AsyncClient]=None, ws_url: str='', ws_headers: Optional[dict[str, Any]]=None, ws_origin: Optional[str]=None, ws_connection_init_payload: Optional[dict[str, Any]]=None) -> None: self.url = url self.headers = headers - self.http_client = ( - http_client if http_client else httpx.AsyncClient(headers=headers) - ) - + self.http_client = http_client if http_client else httpx.AsyncClient(headers=headers) self.ws_url = ws_url self.ws_headers = ws_headers or {} self.ws_origin = Origin(ws_origin) if ws_origin else None @@ -87,130 +53,57 @@ def __init__( async def __aenter__(self: Self) -> Self: return self - async def __aexit__( - self, - exc_type: object, - exc_val: object, - exc_tb: object, - ) -> None: + async def __aexit__(self, exc_type: object, exc_val: object, exc_tb: object) -> None: await self.http_client.aclose() - async def execute( - self, - query: str, - operation_name: Optional[str] = None, - variables: Optional[dict[str, Any]] = None, - **kwargs: Any, - ) -> httpx.Response: + async def execute(self, query: str, operation_name: Optional[str]=None, variables: Optional[dict[str, Any]]=None, **kwargs: Any) -> httpx.Response: processed_variables, files, files_map = self._process_variables(variables) - if files and files_map: - return await self._execute_multipart( - query=query, - operation_name=operation_name, - variables=processed_variables, - files=files, - files_map=files_map, - **kwargs, - ) - - return await self._execute_json( - query=query, - operation_name=operation_name, - variables=processed_variables, - **kwargs, - ) + return await self._execute_multipart(query=query, operation_name=operation_name, variables=processed_variables, files=files, files_map=files_map, **kwargs) + return await self._execute_json(query=query, operation_name=operation_name, variables=processed_variables, **kwargs) def get_data(self, response: httpx.Response) -> dict[str, Any]: if not response.is_success: - raise GraphQLClientHttpError( - status_code=response.status_code, response=response - ) - + raise GraphQLClientHttpError(status_code=response.status_code, response=response) try: response_json = response.json() except ValueError as exc: raise GraphQLClientInvalidResponseError(response=response) from exc - - if (not isinstance(response_json, dict)) or ( - "data" not in response_json and "errors" not in response_json - ): + if not isinstance(response_json, dict) or ('data' not in response_json and 'errors' not in response_json): raise GraphQLClientInvalidResponseError(response=response) - - data = response_json.get("data") - errors = response_json.get("errors") - + data = response_json.get('data') + errors = response_json.get('errors') if errors: - raise GraphQLClientGraphQLMultiError.from_errors_dicts( - errors_dicts=errors, data=data - ) - + raise GraphQLClientGraphQLMultiError.from_errors_dicts(errors_dicts=errors, data=data) return cast(dict[str, Any], data) - async def execute_ws( - self, - query: str, - operation_name: Optional[str] = None, - variables: Optional[dict[str, Any]] = None, - **kwargs: Any, - ) -> AsyncIterator[dict[str, Any]]: + async def execute_ws(self, query: str, operation_name: Optional[str]=None, variables: Optional[dict[str, Any]]=None, **kwargs: Any) -> AsyncIterator[dict[str, Any]]: headers = self.ws_headers.copy() - headers.update(kwargs.pop("additional_headers", {})) - - merged_kwargs: dict[str, Any] = {"origin": self.ws_origin} + headers.update(kwargs.pop('additional_headers', {})) + merged_kwargs: dict[str, Any] = {'origin': self.ws_origin} merged_kwargs.update(kwargs) - merged_kwargs["additional_headers"] = headers - + merged_kwargs['additional_headers'] = headers operation_id = str(uuid4()) - async with ws_connect( - self.ws_url, - subprotocols=[Subprotocol(GRAPHQL_TRANSPORT_WS)], - **merged_kwargs, - ) as websocket: + async with ws_connect(self.ws_url, subprotocols=[Subprotocol(GRAPHQL_TRANSPORT_WS)], **merged_kwargs) as websocket: await self._send_connection_init(websocket) - # Wait for connection_ack; some servers (e.g. Hasura) send ping before - # connection_ack, so we loop and handle pings until we get ack. try: - await asyncio.wait_for( - self._wait_for_connection_ack(websocket), - timeout=5.0, - ) + await asyncio.wait_for(self._wait_for_connection_ack(websocket), timeout=5.0) except asyncio.TimeoutError as exc: - raise GraphQLClientError( - "Connection ack not received within 5 seconds" - ) from exc - await self._send_subscribe( - websocket, - operation_id=operation_id, - query=query, - operation_name=operation_name, - variables=variables, - ) - + raise GraphQLClientError('Connection ack not received within 5 seconds') from exc + await self._send_subscribe(websocket, operation_id=operation_id, query=query, operation_name=operation_name, variables=variables) async for message in websocket: data = await self._handle_ws_message(message, websocket) - if data and "connection_ack" not in data: + if data and 'connection_ack' not in data: yield data - def _process_variables( - self, variables: Optional[dict[str, Any]] - ) -> tuple[ - dict[str, Any], dict[str, tuple[str, IO[bytes], str]], dict[str, list[str]] - ]: + def _process_variables(self, variables: Optional[dict[str, Any]]) -> tuple[dict[str, Any], dict[str, tuple[str, IO[bytes], str]], dict[str, list[str]]]: if not variables: - return {}, {}, {} - + return ({}, {}, {}) serializable_variables = self._convert_dict_to_json_serializable(variables) return self._get_files_from_variables(serializable_variables) - def _convert_dict_to_json_serializable( - self, dict_: dict[str, Any] - ) -> dict[str, Any]: - return { - key: self._convert_value(value) - for key, value in dict_.items() - if value is not UNSET - } + def _convert_dict_to_json_serializable(self, dict_: dict[str, Any]) -> dict[str, Any]: + return {key: self._convert_value(value) for key, value in dict_.items() if value is not UNSET} def _convert_value(self, value: Any) -> Any: if isinstance(value, BaseModel): @@ -219,11 +112,7 @@ def _convert_value(self, value: Any) -> Any: return [self._convert_value(item) for item in value] return value - def _get_files_from_variables( - self, variables: dict[str, Any] - ) -> tuple[ - dict[str, Any], dict[str, tuple[str, IO[bytes], str]], dict[str, list[str]] - ]: + def _get_files_from_variables(self, variables: dict[str, Any]) -> tuple[dict[str, Any], dict[str, tuple[str, IO[bytes], str]], dict[str, list[str]]]: files_map: dict[str, list[str]] = {} files_list: list[Upload] = [] @@ -231,17 +120,15 @@ def separate_files(path: str, obj: Any) -> Any: if isinstance(obj, list): nulled_list = [] for index, value in enumerate(obj): - value = separate_files(f"{path}.{index}", value) + value = separate_files(f'{path}.{index}', value) nulled_list.append(value) return nulled_list - if isinstance(obj, dict): nulled_dict = {} for key, value in obj.items(): - value = separate_files(f"{path}.{key}", value) + value = separate_files(f'{path}.{key}', value) nulled_dict[key] = value return nulled_dict - if isinstance(obj, Upload): if obj in files_list: file_index = files_list.index(obj) @@ -251,143 +138,63 @@ def separate_files(path: str, obj: Any) -> Any: files_list.append(obj) files_map[str(file_index)] = [path] return None - return obj + nulled_variables = separate_files('variables', variables) + files: dict[str, tuple[str, IO[bytes], str]] = {str(i): (file_.filename, cast(IO[bytes], file_.content), file_.content_type) for i, file_ in enumerate(files_list)} + return (nulled_variables, files, files_map) - nulled_variables = separate_files("variables", variables) - files: dict[str, tuple[str, IO[bytes], str]] = { - str(i): (file_.filename, cast(IO[bytes], file_.content), file_.content_type) - for i, file_ in enumerate(files_list) - } - return nulled_variables, files, files_map - - async def _execute_multipart( - self, - query: str, - operation_name: Optional[str], - variables: dict[str, Any], - files: dict[str, tuple[str, IO[bytes], str]], - files_map: dict[str, list[str]], - **kwargs: Any, - ) -> httpx.Response: - data = { - "operations": json.dumps( - { - "query": query, - "operationName": operation_name, - "variables": variables, - }, - default=to_jsonable_python, - ), - "map": json.dumps(files_map, default=to_jsonable_python), - } - - return await self.http_client.post( - url=self.url, data=data, files=files, **kwargs - ) - - async def _execute_json( - self, - query: str, - operation_name: Optional[str], - variables: dict[str, Any], - **kwargs: Any, - ) -> httpx.Response: - headers: dict[str, str] = {"Content-type": "application/json"} - headers.update(kwargs.get("headers", {})) + async def _execute_multipart(self, query: str, operation_name: Optional[str], variables: dict[str, Any], files: dict[str, tuple[str, IO[bytes], str]], files_map: dict[str, list[str]], **kwargs: Any) -> httpx.Response: + data = {'operations': json.dumps({'query': query, 'operationName': operation_name, 'variables': variables}, default=to_jsonable_python), 'map': json.dumps(files_map, default=to_jsonable_python)} + return await self.http_client.post(url=self.url, data=data, files=files, **kwargs) + async def _execute_json(self, query: str, operation_name: Optional[str], variables: dict[str, Any], **kwargs: Any) -> httpx.Response: + headers: dict[str, str] = {'Content-type': 'application/json'} + headers.update(kwargs.get('headers', {})) merged_kwargs: dict[str, Any] = kwargs.copy() - merged_kwargs["headers"] = headers - - return await self.http_client.post( - url=self.url, - content=json.dumps( - { - "query": query, - "operationName": operation_name, - "variables": variables, - }, - default=to_jsonable_python, - ), - **merged_kwargs, - ) + merged_kwargs['headers'] = headers + return await self.http_client.post(url=self.url, content=json.dumps({'query': query, 'operationName': operation_name, 'variables': variables}, default=to_jsonable_python), **merged_kwargs) async def _send_connection_init(self, websocket: ClientConnection) -> None: - payload: dict[str, Any] = { - "type": GraphQLTransportWSMessageType.CONNECTION_INIT.value - } + payload: dict[str, Any] = {'type': GraphQLTransportWSMessageType.CONNECTION_INIT.value} if self.ws_connection_init_payload: - payload["payload"] = self.ws_connection_init_payload + payload['payload'] = self.ws_connection_init_payload await websocket.send(json.dumps(payload)) async def _wait_for_connection_ack(self, websocket: ClientConnection) -> None: """Read messages until connection_ack; handle ping/pong in between.""" async for message in websocket: data = await self._handle_ws_message(message, websocket) - if data is not None and "connection_ack" in data: + if data is not None and 'connection_ack' in data: return - async def _send_subscribe( - self, - websocket: ClientConnection, - operation_id: str, - query: str, - operation_name: Optional[str] = None, - variables: Optional[dict[str, Any]] = None, - ) -> None: - payload_inner: dict[str, Any] = { - "query": query, - "operationName": operation_name, - } + async def _send_subscribe(self, websocket: ClientConnection, operation_id: str, query: str, operation_name: Optional[str]=None, variables: Optional[dict[str, Any]]=None) -> None: + payload_inner: dict[str, Any] = {'query': query, 'operationName': operation_name} if variables: - payload_inner["variables"] = self._convert_dict_to_json_serializable( - variables - ) - payload: dict[str, Any] = { - "id": operation_id, - "type": GraphQLTransportWSMessageType.SUBSCRIBE.value, - "payload": payload_inner, - } + payload_inner['variables'] = self._convert_dict_to_json_serializable(variables) + payload: dict[str, Any] = {'id': operation_id, 'type': GraphQLTransportWSMessageType.SUBSCRIBE.value, 'payload': payload_inner} await websocket.send(json.dumps(payload)) - async def _handle_ws_message( - self, - message: Data, - websocket: ClientConnection, - expected_type: Optional[GraphQLTransportWSMessageType] = None, - ) -> Optional[dict[str, Any]]: + async def _handle_ws_message(self, message: Data, websocket: ClientConnection, expected_type: Optional[GraphQLTransportWSMessageType]=None) -> Optional[dict[str, Any]]: try: message_dict = json.loads(message) except json.JSONDecodeError as exc: raise GraphQLClientInvalidMessageFormat(message=message) from exc - - type_ = message_dict.get("type") - payload = message_dict.get("payload", {}) - + type_ = message_dict.get('type') + payload = message_dict.get('payload', {}) if not type_ or type_ not in {t.value for t in GraphQLTransportWSMessageType}: raise GraphQLClientInvalidMessageFormat(message=message) - if expected_type and expected_type != type_: - raise GraphQLClientInvalidMessageFormat( - f"Invalid message received. Expected: {expected_type.value}" - ) - + raise GraphQLClientInvalidMessageFormat(f'Invalid message received. Expected: {expected_type.value}') if type_ == GraphQLTransportWSMessageType.NEXT: - if "data" not in payload: + if 'data' not in payload: raise GraphQLClientInvalidMessageFormat(message=message) - return cast(dict[str, Any], payload["data"]) - + return cast(dict[str, Any], payload['data']) if type_ == GraphQLTransportWSMessageType.COMPLETE: await websocket.close() elif type_ == GraphQLTransportWSMessageType.PING: - await websocket.send( - json.dumps({"type": GraphQLTransportWSMessageType.PONG.value}) - ) + await websocket.send(json.dumps({'type': GraphQLTransportWSMessageType.PONG.value})) elif type_ == GraphQLTransportWSMessageType.ERROR: - raise GraphQLClientGraphQLMultiError.from_errors_dicts( - errors_dicts=payload, data=message_dict - ) + raise GraphQLClientGraphQLMultiError.from_errors_dicts(errors_dicts=payload, data=message_dict) elif type_ == GraphQLTransportWSMessageType.CONNECTION_ACK: - return {"connection_ack": True} - - return None + return {'connection_ack': True} + return None \ No newline at end of file diff --git a/src/zepben/eas/lib/generated_graphql_client/base_model.py b/src/zepben/eas/lib/generated_graphql_client/base_model.py index 68e2f9e..25d3444 100644 --- a/src/zepben/eas/lib/generated_graphql_client/base_model.py +++ b/src/zepben/eas/lib/generated_graphql_client/base_model.py @@ -1,28 +1,19 @@ from io import IOBase - from pydantic import BaseModel as PydanticBaseModel from pydantic import ConfigDict - class UnsetType: + def __bool__(self) -> bool: return False - - UNSET = UnsetType() - class BaseModel(PydanticBaseModel): - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - arbitrary_types_allowed=True, - protected_namespaces=(), - ) - + model_config = ConfigDict(populate_by_name=True, validate_assignment=True, arbitrary_types_allowed=True, protected_namespaces=()) class Upload: + def __init__(self, filename: str, content: IOBase, content_type: str): self.filename = filename self.content = content - self.content_type = content_type + self.content_type = content_type \ No newline at end of file diff --git a/src/zepben/eas/lib/generated_graphql_client/base_operation.py b/src/zepben/eas/lib/generated_graphql_client/base_operation.py index 65708d7..ba2db36 100644 --- a/src/zepben/eas/lib/generated_graphql_client/base_operation.py +++ b/src/zepben/eas/lib/generated_graphql_client/base_operation.py @@ -1,15 +1,5 @@ from typing import Any, Optional, Union - -from graphql import ( - ArgumentNode, - FieldNode, - InlineFragmentNode, - NamedTypeNode, - NameNode, - SelectionSetNode, - VariableNode, -) - +from graphql import ArgumentNode, FieldNode, InlineFragmentNode, NamedTypeNode, NameNode, SelectionSetNode, VariableNode class GraphQLArgument: """ @@ -22,11 +12,7 @@ def __init__(self, argument_name: str, argument_value: Any) -> None: def to_ast(self) -> ArgumentNode: """Converts the argument to an ArgumentNode AST object.""" - return ArgumentNode( - name=NameNode(value=self._name), - value=VariableNode(name=NameNode(value=self._value)), - ) - + return ArgumentNode(name=NameNode(value=self._name), value=VariableNode(name=NameNode(value=self._value))) class GraphQLField: """ @@ -38,9 +24,7 @@ class GraphQLField: of the GraphQL field. """ - def __init__( - self, field_name: str, arguments: Optional[dict[str, dict[str, Any]]] = None - ) -> None: + def __init__(self, field_name: str, arguments: Optional[dict[str, dict[str, Any]]]=None) -> None: self._field_name = field_name self._variables = arguments or {} self.formatted_variables: dict[str, dict[str, Any]] = {} @@ -48,57 +32,33 @@ def __init__( self._alias: Optional[str] = None self._inline_fragments: dict[str, tuple[GraphQLField, ...]] = {} - def alias(self, alias: str) -> "GraphQLField": + def alias(self, alias: str) -> 'GraphQLField': """Sets an alias for the GraphQL field and returns the instance.""" self._alias = alias return self def _build_field_name(self) -> str: """Builds the field name, including the alias if present.""" - return f"{self._alias}: {self._field_name}" if self._alias else self._field_name + return f'{self._alias}: {self._field_name}' if self._alias else self._field_name - def _build_selections( - self, idx: int, used_names: set[str] - ) -> list[Union[FieldNode, InlineFragmentNode]]: + def _build_selections(self, idx: int, used_names: set[str]) -> list[Union[FieldNode, InlineFragmentNode]]: """Builds the selection set for the current GraphQL field, including subfields and inline fragments.""" - # Create selections from subfields - selections: list[Union[FieldNode, InlineFragmentNode]] = [ - subfield.to_ast(idx, used_names) for subfield in self._subfields - ] - - # Add inline fragments + selections: list[Union[FieldNode, InlineFragmentNode]] = [subfield.to_ast(idx, used_names) for subfield in self._subfields] for name, subfields in self._inline_fragments.items(): - selections.append( - InlineFragmentNode( - type_condition=NamedTypeNode(name=NameNode(value=name)), - selection_set=SelectionSetNode( - selections=[ - subfield.to_ast(idx, used_names) for subfield in subfields - ] - ), - ) - ) - + selections.append(InlineFragmentNode(type_condition=NamedTypeNode(name=NameNode(value=name)), selection_set=SelectionSetNode(selections=[subfield.to_ast(idx, used_names) for subfield in subfields]))) return selections - def _format_variable_name( - self, idx: int, var_name: str, used_names: set[str] - ) -> str: + def _format_variable_name(self, idx: int, var_name: str, used_names: set[str]) -> str: """Generates a unique variable name by appending an index and, if necessary, an additional counter to avoid duplicates.""" - base_name = f"{var_name}_{idx}" + base_name = f'{var_name}_{idx}' unique_name = base_name counter = 1 - - # Ensure the generated name is unique while unique_name in used_names: - unique_name = f"{base_name}_{counter}" + unique_name = f'{base_name}_{counter}' counter += 1 - - # Add the unique name to the set of used names used_names.add(unique_name) - return unique_name def _collect_all_variables(self, idx: int, used_names: set[str]) -> None: @@ -107,34 +67,16 @@ def _collect_all_variables(self, idx: int, used_names: set[str]) -> None: ensuring unique names. """ self.formatted_variables = {} - for k, v in self._variables.items(): unique_name = self._format_variable_name(idx, k, used_names) - self.formatted_variables[unique_name] = { - "name": k, - "type": v["type"], - "value": v["value"], - } + self.formatted_variables[unique_name] = {'name': k, 'type': v['type'], 'value': v['value']} - def to_ast(self, idx: int, used_names: Optional[set[str]] = None) -> FieldNode: + def to_ast(self, idx: int, used_names: Optional[set[str]]=None) -> FieldNode: """Converts the current GraphQL field to an AST (Abstract Syntax Tree) node.""" if used_names is None: used_names = set() - self._collect_all_variables(idx, used_names) - - return FieldNode( - name=NameNode(value=self._build_field_name()), - arguments=[ - GraphQLArgument(v["name"], k).to_ast() - for k, v in self.formatted_variables.items() - ], - selection_set=( - SelectionSetNode(selections=self._build_selections(idx, used_names)) - if self._subfields or self._inline_fragments - else None - ), - ) + return FieldNode(name=NameNode(value=self._build_field_name()), arguments=[GraphQLArgument(v['name'], k).to_ast() for k, v in self.formatted_variables.items()], selection_set=SelectionSetNode(selections=self._build_selections(idx, used_names)) if self._subfields or self._inline_fragments else None) def get_formatted_variables(self) -> dict[str, dict[str, Any]]: """ @@ -142,13 +84,29 @@ def get_formatted_variables(self) -> dict[str, dict[str, Any]]: including those from subfields and inline fragments. """ formatted_variables = self.formatted_variables.copy() - - # Collect variables from subfields for subfield in self._subfields: formatted_variables.update(subfield.get_formatted_variables()) - - # Collect variables from inline fragments for subfields in self._inline_fragments.values(): for subfield in subfields: formatted_variables.update(subfield.get_formatted_variables()) return formatted_variables + + @classmethod + def all_fields(cls) -> 'Generator[GraphQLField | MethodType, None, None]': + """ + returns a generator over all ``GraphQLField``s that a given class returns + + :param cls: class to check + :return: generator over all GraphQLField's in a given class + """ + import inspect + for k in dir(cls): + if k.startswith('_'): + continue + if k == 'all_fields': + continue + v = getattr(cls, k) + if isinstance(v, GraphQLField): + yield v + elif inspect.ismethod(v): + yield v().fields(*v().all_fields()) \ No newline at end of file diff --git a/src/zepben/eas/lib/generated_graphql_client/base_operation.pyi b/src/zepben/eas/lib/generated_graphql_client/base_operation.pyi deleted file mode 100644 index 7a3a603..0000000 --- a/src/zepben/eas/lib/generated_graphql_client/base_operation.pyi +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2026 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. - -from _typeshed import Incomplete -from types import MethodType -from typing import Generator - -def __getattr__(name) -> Incomplete: ... - -class GraphQLField: - def __getattr__(self, name: str) -> Incomplete: ... - - @classmethod - def all_fields(cls) -> Generator[GraphQLField | MethodType, None, None]: - """ - Returns a generator over all ``GraphQLField``s that a given class returns - - :param cls: class to check - :return: generator over all GraphQLField's in a given class - """ diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_queries.py b/src/zepben/eas/lib/generated_graphql_client/custom_queries.py index 4d69671..2e110fb 100644 --- a/src/zepben/eas/lib/generated_graphql_client/custom_queries.py +++ b/src/zepben/eas/lib/generated_graphql_client/custom_queries.py @@ -1,7 +1,5 @@ from typing import Any, Optional -from zepben.eas.lib.ariadne_plugins.types import GraphQLQuery - from .custom_fields import ( AppOptionsFields, CustomerDetailsResponseFields, @@ -44,49 +42,7 @@ VariantWorkPackageFields, WorkPackageTreeFields, ) -from .custom_typing_fields import ( - AppOptionsGraphQLField, - CustomerDetailsResponseGraphQLField, - DurationCurveByTerminalGraphQLField, - FeederLoadAnalysisReportGraphQLField, - GqlTxTapRecordGraphQLField, - GqlUserGraphQLField, - GqlUserResponseGraphQLField, - GraphQLField, - HcCalibrationGraphQLField, - HcScenarioConfigsPageGraphQLField, - HcWorkPackageGraphQLField, - HcWorkPackagePageGraphQLField, - IngestionJobGraphQLField, - IngestionRunGraphQLField, - IngestorRunPageGraphQLField, - JobSourceGraphQLField, - MachineUserGraphQLField, - MetricGraphQLField, - NetworkModelsGraphQLField, - OpenDssModelPageGraphQLField, - OpportunitiesByYearGraphQLField, - OpportunityGraphQLField, - OpportunityLocationGraphQLField, - PowerFactoryModelGraphQLField, - PowerFactoryModelPageGraphQLField, - PowerFactoryModelTemplateGraphQLField, - PowerFactoryModelTemplatePageGraphQLField, - ProcessedDiffGraphQLField, - ProcessedDiffPageGraphQLField, - SincalGlobalInputsConfigGraphQLField, - SincalModelGraphQLField, - SincalModelPageGraphQLField, - SincalModelPresetGraphQLField, - SincalModelPresetPageGraphQLField, - StudyGraphQLField, - StudyPageGraphQLField, - StudyResultGraphQLField, - UploadUrlResponseGraphQLField, - UserCustomerListColumnConfigGraphQLField, - VariantWorkPackageGraphQLField, - WorkPackageTreeGraphQLField, -) +from .custom_typing_fields import GraphQLField from .enums import ( ContainerType, HostingCapacityFileType, @@ -1019,3 +975,50 @@ def get_variant_upload_info( return VariantWorkPackageFields( field_name="getVariantUploadInfo", arguments=cleared_arguments ) + + +from zepben.eas.lib.ariadne_plugins.types import GraphQLQuery + +from .custom_typing_fields import ( + AppOptionsGraphQLField, + CustomerDetailsResponseGraphQLField, + DurationCurveByTerminalGraphQLField, + FeederLoadAnalysisReportGraphQLField, + GqlTxTapRecordGraphQLField, + GqlUserGraphQLField, + GqlUserResponseGraphQLField, + GraphQLField, + HcCalibrationGraphQLField, + HcScenarioConfigsPageGraphQLField, + HcWorkPackageGraphQLField, + HcWorkPackagePageGraphQLField, + IngestionJobGraphQLField, + IngestionRunGraphQLField, + IngestorRunPageGraphQLField, + JobSourceGraphQLField, + MachineUserGraphQLField, + MetricGraphQLField, + NetworkModelsGraphQLField, + OpenDssModelPageGraphQLField, + OpportunitiesByYearGraphQLField, + OpportunityGraphQLField, + OpportunityLocationGraphQLField, + PowerFactoryModelGraphQLField, + PowerFactoryModelPageGraphQLField, + PowerFactoryModelTemplateGraphQLField, + PowerFactoryModelTemplatePageGraphQLField, + ProcessedDiffGraphQLField, + ProcessedDiffPageGraphQLField, + SincalGlobalInputsConfigGraphQLField, + SincalModelGraphQLField, + SincalModelPageGraphQLField, + SincalModelPresetGraphQLField, + SincalModelPresetPageGraphQLField, + StudyGraphQLField, + StudyPageGraphQLField, + StudyResultGraphQLField, + UploadUrlResponseGraphQLField, + UserCustomerListColumnConfigGraphQLField, + VariantWorkPackageGraphQLField, + WorkPackageTreeGraphQLField, +) diff --git a/src/zepben/eas/lib/generated_graphql_client/exceptions.py b/src/zepben/eas/lib/generated_graphql_client/exceptions.py index e217e9b..5f05996 100644 --- a/src/zepben/eas/lib/generated_graphql_client/exceptions.py +++ b/src/zepben/eas/lib/generated_graphql_client/exceptions.py @@ -1,38 +1,29 @@ from typing import Any, Optional, Union - import httpx - class GraphQLClientError(Exception): """Base exception.""" - class GraphQLClientHttpError(GraphQLClientError): + def __init__(self, status_code: int, response: httpx.Response) -> None: self.status_code = status_code self.response = response def __str__(self) -> str: - return f"HTTP status code: {self.status_code}" - + return f'HTTP status code: {self.status_code}' class GraphQLClientInvalidResponseError(GraphQLClientError): + def __init__(self, response: httpx.Response) -> None: self.response = response def __str__(self) -> str: - return "Invalid response format." - + return 'Invalid response format.' class GraphQLClientGraphQLError(GraphQLClientError): - def __init__( - self, - message: str, - locations: Optional[list[dict[str, int]]] = None, - path: Optional[list[str]] = None, - extensions: Optional[dict[str, object]] = None, - original: Optional[dict[str, object]] = None, - ): + + def __init__(self, message: str, locations: Optional[list[dict[str, int]]]=None, path: Optional[list[str]]=None, extensions: Optional[dict[str, object]]=None, original: Optional[dict[str, object]]=None): self.message = message self.locations = locations self.path = path @@ -43,41 +34,26 @@ def __str__(self) -> str: return self.message @classmethod - def from_dict(cls, error: dict[str, Any]) -> "GraphQLClientGraphQLError": - return cls( - message=error["message"], - locations=error.get("locations"), - path=error.get("path"), - extensions=error.get("extensions"), - original=error, - ) - + def from_dict(cls, error: dict[str, Any]) -> 'GraphQLClientGraphQLError': + return cls(message=error['message'], locations=error.get('locations'), path=error.get('path'), extensions=error.get('extensions'), original=error) class GraphQLClientGraphQLMultiError(GraphQLClientError): - def __init__( - self, - errors: list[GraphQLClientGraphQLError], - data: Optional[dict[str, Any]] = None, - ): + + def __init__(self, errors: list[GraphQLClientGraphQLError], data: Optional[dict[str, Any]]=None): self.errors = errors self.data = data def __str__(self) -> str: - return "; ".join(str(e) for e in self.errors) + return '; '.join((str(e) for e in self.errors)) @classmethod - def from_errors_dicts( - cls, errors_dicts: list[dict[str, Any]], data: Optional[dict[str, Any]] = None - ) -> "GraphQLClientGraphQLMultiError": - return cls( - errors=[GraphQLClientGraphQLError.from_dict(e) for e in errors_dicts], - data=data, - ) + def from_errors_dicts(cls, errors_dicts: list[dict[str, Any]], data: Optional[dict[str, Any]]=None) -> 'GraphQLClientGraphQLMultiError': + return cls(errors=[GraphQLClientGraphQLError.from_dict(e) for e in errors_dicts], data=data) +class GraphQLClientInvalidMessageFormat(GraphQLClientError): -class GraphQLClientInvalidMessageFormat(GraphQLClientError): # noqa: N818 def __init__(self, message: Union[str, bytes]) -> None: self.message = message def __str__(self) -> str: - return "Invalid message format." + return 'Invalid message format.' \ No newline at end of file From 4aa0275ef73bb9a7c5dbcdde483d81e07a6e115a Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Wed, 1 Apr 2026 15:48:43 +1100 Subject: [PATCH 30/32] stuff Signed-off-by: Max Chesterfield --- src/zepben/eas/client/eas_client.py | 20 ++++++++++---------- test/test_integration_testing.py | 8 ++++++-- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/src/zepben/eas/client/eas_client.py b/src/zepben/eas/client/eas_client.py index 565b820..3f41115 100644 --- a/src/zepben/eas/client/eas_client.py +++ b/src/zepben/eas/client/eas_client.py @@ -116,7 +116,7 @@ async def close(self): await self.http_client.aclose() @async_func - async def do_query(self, query: GraphQLQuery[T, R], *additional_fields: R, operation_name: str = None) -> T: + async def query(self, query: GraphQLQuery[T, R], *additional_fields: R, operation_name: str = None) -> T: """Execute a query against the Evolve App Server.""" query = query.fields(*additional_fields) if hasattr(query, "fields") else query return await super().query(query, operation_name=operation_name) @@ -205,7 +205,7 @@ def get_work_package_cost_estimation(self, work_package: WorkPackageInput): :param work_package: An instance of the `WorkPackageConfig` data class representing the work package configuration for the run :return: The HTTP response received from the Evolve App Server after attempting to run work package """ - return self.do_query( + return self.query( Query.get_work_package_cost_estimation(work_package), ) @@ -247,7 +247,7 @@ def get_hosting_capacity_work_packages_progress(self): # FIXME: why is this inf :return: The HTTP response received from the Evolve App Server after requesting work packages progress info """ - return self.do_query( + return self.query( Query.get_active_work_packages(), ) @@ -277,7 +277,7 @@ def get_feeder_load_analysis_report_status(self, report_id: str, full_spec: bool :param full_spec: If true the response will include the request sent to generate the report :return: The HTTP response received from the Evolve App Server after requesting a feeder load analysis report status """ - return self.do_query( + return self.query( Query.get_feeder_load_analysis_report_status(report_id, full_spec=full_spec).fields( *FeederLoadAnalysisReportFields.all_fields() ), @@ -320,7 +320,7 @@ def get_ingestor_run(self, ingestor_run_id: int): :param ingestor_run_id: The ID of the ingestor run to retrieve execution information about. :return: The HTTP response received from the Evolve App Server including the ingestor run information (if found). """ - return self.do_query( + return self.query( Query.get_ingestor_run(ingestor_run_id).fields( *IngestionRunFields.all_fields() ), @@ -342,7 +342,7 @@ def get_ingestor_run_list( :param query_sort: An `IngestorRunsSortCriteriaInput` that can control the order of the returned record based on a number of fields. (Optional) :return: The HTTP response received from the Evolve App Server including all matching ingestor records found. """ - return self.do_query( + return self.query( Query.list_ingestor_runs(filter_=query_filter, sort=query_sort).fields( IngestionRunFields.id, IngestionRunFields.container_runtime_type, @@ -406,7 +406,7 @@ def get_hosting_capacity_calibration_run(self, id: str): :param id: The calibration run ID :return: The HTTP response received from the Evolve App Server after requesting calibration run info """ - return self.do_query( + return self.query( Query.get_calibration_run(id).fields( HcCalibrationFields.id, HcCalibrationFields.name, @@ -430,7 +430,7 @@ def get_hosting_capacity_calibration_sets(self): :return: The HTTP response received from the Evolve App Server after requesting completed calibration runs """ - return self.do_query( + return self.query( Query.get_calibration_sets(), ) @@ -451,7 +451,7 @@ def get_transformer_tap_settings( :param transformer_mrid: An optional filter to return only the transformer tap settings for a particular transfomer mrid :return: The HTTP response received from the Evolve App Server after requesting transformer tap settings for the calibration id """ - return self.do_query( + return self.query( Query.get_transformer_tap_settings( calibration_name=calibration_name, feeder=feeder, @@ -499,7 +499,7 @@ def get_paged_opendss_models( :param query_sort: The sorting to apply to the query :return: The HTTP response received from the Evolve App Server after requesting opendss export run information """ - return self.do_query( + return self.query( Query.paged_open_dss_models( limit=limit, offset=offset, diff --git a/test/test_integration_testing.py b/test/test_integration_testing.py index a8d4611..9630fde 100644 --- a/test/test_integration_testing.py +++ b/test/test_integration_testing.py @@ -61,6 +61,7 @@ async def test_can_connect_to_local_eas_async_calling_func(): ) )) +@pytest.mark.skip("only displays type hinting in client.query call") @pytest.mark.asyncio async def test_do_things(): client = EasClient( @@ -68,6 +69,9 @@ async def test_do_things(): port=7654, asynchronous=True ) - await client.do_query(Query.list_ingestor_runs(filter_=None, sort=None), IngestionRunFields.completed_at, IngestionRunFields.status) - await client.do_query(Query.list_ingestor_runs(filter_=None, sort=None), IngestionJobFields.application, 1) + try: + await client.query(Query.list_ingestor_runs(filter_=None, sort=None), IngestionRunFields.completed_at, IngestionRunFields.status) + await client.query(Query.list_ingestor_runs(filter_=None, sort=None), IngestionJobFields.application, 1) + except: + pass # my_query(Query.list_ingestor_runs(filter_=None, sort=None)) From a049bc88c77225654e37a24105d52bbc7eb8d436 Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Wed, 1 Apr 2026 18:26:51 +1100 Subject: [PATCH 31/32] the stuff Signed-off-by: Max Chesterfield --- src/zepben/eas/__init__.py | 1 - src/zepben/eas/client/decorators.py | 3 +- src/zepben/eas/client/enums.py | 20 -- src/zepben/eas/generate_client.py | 176 ------------------ .../ariadne_plugins/missed_import_checker.py | 15 +- .../lib/generated_graphql_client/__init__.py | 2 + .../async_base_client.py | 10 +- .../base_operation.py | 6 +- .../generated_graphql_client/custom_fields.py | 2 +- .../eas/lib/generated_graphql_client/enums.py | 7 + .../generated_graphql_client/input_types.py | 3 +- 11 files changed, 27 insertions(+), 218 deletions(-) delete mode 100644 src/zepben/eas/client/enums.py delete mode 100644 src/zepben/eas/generate_client.py diff --git a/src/zepben/eas/__init__.py b/src/zepben/eas/__init__.py index 860a1aa..e901cff 100644 --- a/src/zepben/eas/__init__.py +++ b/src/zepben/eas/__init__.py @@ -7,7 +7,6 @@ import warnings from zepben.eas.client.eas_client import * -from zepben.eas.client.enums import * try: from zepben.eas.lib.generated_graphql_client import * diff --git a/src/zepben/eas/client/decorators.py b/src/zepben/eas/client/decorators.py index 47c1278..43a4076 100644 --- a/src/zepben/eas/client/decorators.py +++ b/src/zepben/eas/client/decorators.py @@ -25,9 +25,10 @@ def wrapper(*args, **kwargs): # Type hinting async_func will break the type hinting. # +# from typing import ParamSpec, TypeVar # P = ParamSpec("P") # R = TypeVar("R") -# def async_func(func: Callable[P, R)) -> Callable[P, R]: +# def async_func(func: Callable[P, R]) -> Callable[P, R]: def async_func(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): diff --git a/src/zepben/eas/client/enums.py b/src/zepben/eas/client/enums.py deleted file mode 100644 index e88816f..0000000 --- a/src/zepben/eas/client/enums.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2026 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. - -__all__ = ["OpenDssModelState"] - -from enum import Enum - -__doc__ = """ - This file should ONLY contain enums that the gql generator misses. - Ideally it should be non existent. -""" - -class OpenDssModelState(Enum): - COULD_NOT_START = 'COULD_NOT_START' - CREATION = 'CREATION' - COMPLETED = 'COMPLETED' - FAILED = 'FAILED' diff --git a/src/zepben/eas/generate_client.py b/src/zepben/eas/generate_client.py deleted file mode 100644 index 78044f2..0000000 --- a/src/zepben/eas/generate_client.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright 2026 Zeppelin Bend Pty Ltd -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. -import ast -import sys -from _ast import ImportFrom -from typing import Optional, Callable, Any - -import click -import ariadne_codegen.client_generators.custom_operation -from ariadne_codegen.plugins.manager import PluginManager -from graphql import assert_valid_schema - - -# class ZBPatchedPluginManager(PluginManager): -# def generate_custom_module(self, module: ast.Module, import_adder: Callable[[ImportFrom], Any]) -> ast.Module: -# return self._apply_plugins_on_object( -# "generate_custom_module", -# module, import_adder -# ) -# -# def generate_custom_method(self, method_def: ast.FunctionDef, import_adder: Callable[[ImportFrom], Any]) -> ast.FunctionDef: -# return self._apply_plugins_on_object( -# "generate_custom_method", -# method_def, import_adder -# ) -# -# -# class ZBPatchedCustomOperationGenerator(ariadne_codegen.client_generators.custom_operation.CustomOperationGenerator): -# plugin_manager: ZBPatchedPluginManager -# -# def _generate_method( -# self, operation_name: str, operation_args, final_type, description: Optional[str] = None -# ) -> ast.FunctionDef: -# return self.plugin_manager.generate_custom_method( -# super()._generate_method( -# operation_name, -# operation_args, -# final_type, -# description, -# ), self._add_import -# ) -# -# def generate(self) -> ast.Module: -# return self.plugin_manager.generate_custom_module( -# super().generate(), self._add_import -# ) -# -# ariadne_codegen.client_generators.custom_operation.CustomOperationGenerator = ZBPatchedCustomOperationGenerator - -from ariadne_codegen.client_generators.package import get_package_generator -from ariadne_codegen.config import get_client_settings, get_config_dict, get_graphql_schema_settings -from ariadne_codegen.graphql_schema_generators.schema import ( - generate_graphql_schema_graphql_file, - generate_graphql_schema_python_file, -) -from ariadne_codegen.plugins.explorer import get_plugins_types -from ariadne_codegen.schema import ( - add_mixin_directive_to_schema, - filter_fragments_definitions, - filter_operations_definitions, - get_graphql_queries, - get_graphql_schema_from_path, - get_graphql_schema_from_url, -) -from ariadne_codegen.settings import Strategy, get_validation_rule - - -@click.command() -@click.version_option() -@click.option("--config", default=None, help="Path to custom configuration file.") -@click.argument( - "strategy", - default=Strategy.CLIENT.value, - type=click.Choice([e.value for e in Strategy]), - required=False, -) -def main(strategy=Strategy.CLIENT.value, config=None): - config_dict = get_config_dict(config) - if strategy == Strategy.CLIENT: - client(config_dict) - - if strategy == Strategy.GRAPHQL_SCHEMA: - graphql_schema(config_dict) - - -def client(config_dict): - settings = get_client_settings(config_dict) - - if settings.schema_path: - schema = get_graphql_schema_from_path(settings.schema_path) - else: - schema = get_graphql_schema_from_url( - url=settings.remote_schema_url, - headers=settings.remote_schema_headers, - verify_ssl=settings.remote_schema_verify_ssl, - timeout=settings.remote_schema_timeout, - introspection_settings=settings.introspection_settings, - ) - - plugin_manager = PluginManager( - schema=schema, - config_dict=config_dict, - plugins_types=get_plugins_types(settings.plugins), - ) - schema = add_mixin_directive_to_schema(schema) - schema = plugin_manager.process_schema(schema) - assert_valid_schema(schema) - - fragments = [] - queries = [] - if settings.queries_path: - definitions = get_graphql_queries( - settings.queries_path, - schema, - [get_validation_rule(e) for e in settings.skip_validation_rules], - ) - queries = filter_operations_definitions(definitions) - fragments = filter_fragments_definitions(definitions) - - sys.stdout.write(settings.used_settings_message) - - package_generator = get_package_generator( - schema=schema, - fragments=fragments, - settings=settings, - plugin_manager=plugin_manager, - ) - for query in queries: - package_generator.add_operation(query) - generated_files = package_generator.generate() - - sys.stdout.write("\nGenerated files:\n " + "\n ".join(generated_files) + "\n") - - -def graphql_schema(config_dict): - settings = get_graphql_schema_settings(config_dict) - - schema = ( - get_graphql_schema_from_path(settings.schema_path) - if settings.schema_path - else get_graphql_schema_from_url( - url=settings.remote_schema_url, - headers=settings.remote_schema_headers, - verify_ssl=settings.remote_schema_verify_ssl, - timeout=settings.remote_schema_timeout, - introspection_settings=settings.introspection_settings, - ) - ) - plugin_manager = PluginManager( - schema=schema, - config_dict=config_dict, - plugins_types=get_plugins_types(settings.plugins), - ) - schema = plugin_manager.process_schema(schema) - assert_valid_schema(schema) - - sys.stdout.write(settings.used_settings_message) - - if settings.target_file_format == "py": - generate_graphql_schema_python_file( - schema=schema, - target_file_path=settings.target_file_path, - type_map_name=settings.type_map_variable_name, - schema_variable_name=settings.schema_variable_name, - ) - else: - generate_graphql_schema_graphql_file( - schema=schema, - target_file_path=settings.target_file_path, - ) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/src/zepben/eas/lib/ariadne_plugins/missed_import_checker.py b/src/zepben/eas/lib/ariadne_plugins/missed_import_checker.py index 450e330..8ddd281 100644 --- a/src/zepben/eas/lib/ariadne_plugins/missed_import_checker.py +++ b/src/zepben/eas/lib/ariadne_plugins/missed_import_checker.py @@ -8,16 +8,11 @@ class MissedImportCheckerPlugin(Plugin): + """This is masking a problem""" def generate_client_import(self, import_: ast.ImportFrom) -> ast.ImportFrom: - if (iname := import_.names[0].name) in ( - 'SincalFileType', - 'VariantFileType', - 'ContainerType', - 'HostingCapacityFileType', - 'WorkflowStatus', - ): - if import_.module is None: - print(f"[ZBEX] Assuming class import {iname} is from module 'enums.py'") - import_.module = 'enums' + # Somethings wrong with how the import is being generated without `module` + if import_.module is None: + print(f"[ZBEX] Assuming class import {import_.names[0].name} is from module 'enums.py'") + import_.module = 'enums' return import_ diff --git a/src/zepben/eas/lib/generated_graphql_client/__init__.py b/src/zepben/eas/lib/generated_graphql_client/__init__.py index 24681c4..a9a688b 100644 --- a/src/zepben/eas/lib/generated_graphql_client/__init__.py +++ b/src/zepben/eas/lib/generated_graphql_client/__init__.py @@ -18,6 +18,7 @@ IngestorRuntimeKind, InterventionClass, MeasurementZoneType, + OpenDssModelState, OpportunitiesNeed, OpportunitiesType, SectionType, @@ -189,6 +190,7 @@ "OpenDssModelGenerationSpecInput", "OpenDssModelInput", "OpenDssModelOptionsInput", + "OpenDssModelState", "OpenDssModulesConfigInput", "OpportunitiesNeed", "OpportunitiesType", diff --git a/src/zepben/eas/lib/generated_graphql_client/async_base_client.py b/src/zepben/eas/lib/generated_graphql_client/async_base_client.py index 3f23742..7991c6e 100644 --- a/src/zepben/eas/lib/generated_graphql_client/async_base_client.py +++ b/src/zepben/eas/lib/generated_graphql_client/async_base_client.py @@ -57,7 +57,7 @@ async def __aexit__(self, exc_type: object, exc_val: object, exc_tb: object) -> await self.http_client.aclose() async def execute(self, query: str, operation_name: Optional[str]=None, variables: Optional[dict[str, Any]]=None, **kwargs: Any) -> httpx.Response: - processed_variables, files, files_map = self._process_variables(variables) + (processed_variables, files, files_map) = self._process_variables(variables) if files and files_map: return await self._execute_multipart(query=query, operation_name=operation_name, variables=processed_variables, files=files, files_map=files_map, **kwargs) return await self._execute_json(query=query, operation_name=operation_name, variables=processed_variables, **kwargs) @@ -103,7 +103,7 @@ def _process_variables(self, variables: Optional[dict[str, Any]]) -> tuple[dict[ return self._get_files_from_variables(serializable_variables) def _convert_dict_to_json_serializable(self, dict_: dict[str, Any]) -> dict[str, Any]: - return {key: self._convert_value(value) for key, value in dict_.items() if value is not UNSET} + return {key: self._convert_value(value) for (key, value) in dict_.items() if value is not UNSET} def _convert_value(self, value: Any) -> Any: if isinstance(value, BaseModel): @@ -119,13 +119,13 @@ def _get_files_from_variables(self, variables: dict[str, Any]) -> tuple[dict[str def separate_files(path: str, obj: Any) -> Any: if isinstance(obj, list): nulled_list = [] - for index, value in enumerate(obj): + for (index, value) in enumerate(obj): value = separate_files(f'{path}.{index}', value) nulled_list.append(value) return nulled_list if isinstance(obj, dict): nulled_dict = {} - for key, value in obj.items(): + for (key, value) in obj.items(): value = separate_files(f'{path}.{key}', value) nulled_dict[key] = value return nulled_dict @@ -140,7 +140,7 @@ def separate_files(path: str, obj: Any) -> Any: return None return obj nulled_variables = separate_files('variables', variables) - files: dict[str, tuple[str, IO[bytes], str]] = {str(i): (file_.filename, cast(IO[bytes], file_.content), file_.content_type) for i, file_ in enumerate(files_list)} + files: dict[str, tuple[str, IO[bytes], str]] = {str(i): (file_.filename, cast(IO[bytes], file_.content), file_.content_type) for (i, file_) in enumerate(files_list)} return (nulled_variables, files, files_map) async def _execute_multipart(self, query: str, operation_name: Optional[str], variables: dict[str, Any], files: dict[str, tuple[str, IO[bytes], str]], files_map: dict[str, list[str]], **kwargs: Any) -> httpx.Response: diff --git a/src/zepben/eas/lib/generated_graphql_client/base_operation.py b/src/zepben/eas/lib/generated_graphql_client/base_operation.py index ba2db36..3219e1f 100644 --- a/src/zepben/eas/lib/generated_graphql_client/base_operation.py +++ b/src/zepben/eas/lib/generated_graphql_client/base_operation.py @@ -45,7 +45,7 @@ def _build_selections(self, idx: int, used_names: set[str]) -> list[Union[FieldN """Builds the selection set for the current GraphQL field, including subfields and inline fragments.""" selections: list[Union[FieldNode, InlineFragmentNode]] = [subfield.to_ast(idx, used_names) for subfield in self._subfields] - for name, subfields in self._inline_fragments.items(): + for (name, subfields) in self._inline_fragments.items(): selections.append(InlineFragmentNode(type_condition=NamedTypeNode(name=NameNode(value=name)), selection_set=SelectionSetNode(selections=[subfield.to_ast(idx, used_names) for subfield in subfields]))) return selections @@ -67,7 +67,7 @@ def _collect_all_variables(self, idx: int, used_names: set[str]) -> None: ensuring unique names. """ self.formatted_variables = {} - for k, v in self._variables.items(): + for (k, v) in self._variables.items(): unique_name = self._format_variable_name(idx, k, used_names) self.formatted_variables[unique_name] = {'name': k, 'type': v['type'], 'value': v['value']} @@ -76,7 +76,7 @@ def to_ast(self, idx: int, used_names: Optional[set[str]]=None) -> FieldNode: if used_names is None: used_names = set() self._collect_all_variables(idx, used_names) - return FieldNode(name=NameNode(value=self._build_field_name()), arguments=[GraphQLArgument(v['name'], k).to_ast() for k, v in self.formatted_variables.items()], selection_set=SelectionSetNode(selections=self._build_selections(idx, used_names)) if self._subfields or self._inline_fragments else None) + return FieldNode(name=NameNode(value=self._build_field_name()), arguments=[GraphQLArgument(v['name'], k).to_ast() for (k, v) in self.formatted_variables.items()], selection_set=SelectionSetNode(selections=self._build_selections(idx, used_names)) if self._subfields or self._inline_fragments else None) def get_formatted_variables(self) -> dict[str, dict[str, Any]]: """ diff --git a/src/zepben/eas/lib/generated_graphql_client/custom_fields.py b/src/zepben/eas/lib/generated_graphql_client/custom_fields.py index d7bb87b..2b6873c 100644 --- a/src/zepben/eas/lib/generated_graphql_client/custom_fields.py +++ b/src/zepben/eas/lib/generated_graphql_client/custom_fields.py @@ -1,6 +1,5 @@ from typing import Any, Optional, Union -from . import SerializationType from .base_operation import GraphQLField from .custom_typing_fields import ( AppOptionsGraphQLField, @@ -75,6 +74,7 @@ WorkPackageProgressDetailsGraphQLField, WorkPackageTreeGraphQLField, ) +from .enums import SerializationType class AppOptionsFields(GraphQLField): diff --git a/src/zepben/eas/lib/generated_graphql_client/enums.py b/src/zepben/eas/lib/generated_graphql_client/enums.py index 5a153a4..7998246 100644 --- a/src/zepben/eas/lib/generated_graphql_client/enums.py +++ b/src/zepben/eas/lib/generated_graphql_client/enums.py @@ -139,6 +139,13 @@ class MeasurementZoneType(str, Enum): CALIBRATION = "CALIBRATION" +class OpenDssModelState(str, Enum): + COULD_NOT_START = "COULD_NOT_START" + CREATION = "CREATION" + COMPLETED = "COMPLETED" + FAILED = "FAILED" + + class OpportunitiesNeed(str, Enum): EXPORTDECREASE = "EXPORTDECREASE" EXPORTINCREASE = "EXPORTINCREASE" diff --git a/src/zepben/eas/lib/generated_graphql_client/input_types.py b/src/zepben/eas/lib/generated_graphql_client/input_types.py index be71ecd..915dbd9 100644 --- a/src/zepben/eas/lib/generated_graphql_client/input_types.py +++ b/src/zepben/eas/lib/generated_graphql_client/input_types.py @@ -13,6 +13,7 @@ IngestorRunState, IngestorRuntimeKind, InterventionClass, + OpenDssModelState, SectionType, SortOrder, ) @@ -195,7 +196,7 @@ class GeoJsonOverlayInput(BaseModel): class GetOpenDssModelsFilterInput(BaseModel): is_public: Optional[bool] = Field(alias="isPublic", default=None) name: Optional[str] = None - state: Optional[list[str]] = None + state: Optional[list[OpenDssModelState]] = None class GetOpenDssModelsSortCriteriaInput(BaseModel): From 5e65d8d1b2166a93f3390138a7f312e7cd8b48fd Mon Sep 17 00:00:00 2001 From: Max Chesterfield Date: Wed, 1 Apr 2026 18:46:42 +1100 Subject: [PATCH 32/32] changelog and readme Signed-off-by: Max Chesterfield --- README.md | 2 +- changelog.md | 24 ++++++++++++++++++++++-- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index b0f6b21..3b0bb17 100644 --- a/README.md +++ b/README.md @@ -170,7 +170,7 @@ This will enable all `deprecated` and `opt_in` methods on the class, they are di To regenerate the graphql client you will need to install `zepben.eas` with `eas-codegen` optional dependencies: ```shell -pip install zepben.eas[eas-codegen] +pip install -e ".[eas-codegen]" ``` With these installed and EAS running locally on port 7654, you can then generate the client: diff --git a/changelog.md b/changelog.md index 26dc60f..d32d684 100644 --- a/changelog.md +++ b/changelog.md @@ -1,13 +1,33 @@ # EAS Python client ## [0.30.0] - UNRELEASED ### Breaking Changes -* None. +* Deprecated methods in EasClient: + * aclose + * get_work_package_cost_estimation + * run_hosting_capacity_work_package + * cancel_hosting_capacity_work_package + * get_hosting_capacity_work_packages_progress + * run_feeder_load_analysis_report + * get_feeder_load_analysis_report_status + * upload_study + * run_ingestor + * get_ingestor_run + * get_ingestor_run_list + * run_hosting_capacity_calibration + * get_hosting_capacity_calibration_run + * get_hosting_capacity_calibration_sets + * get_transformer_tap_settings + * run_opendss_export + * get_paged_opendss_models + * get_opendss_model +* Most Input object will need to be migrated over to the new data model ### New Features * None. ### Enhancements -* None. +* EasClient has new `query` and `mutation` methods that will accept `Query` and `Mutation` objects respectively. + * Available queries and mutations can be found as `@classmethods` on `Queries` and `Mutations`. ### Fixes * None.