diff --git a/proxlb/models/balancing.py b/proxlb/models/balancing.py index 6abae0c..a2ed049 100644 --- a/proxlb/models/balancing.py +++ b/proxlb/models/balancing.py @@ -175,7 +175,7 @@ def exec_rebalancing_vm(self, proxmox_api: ProxmoxApi, proxlb_data: ProxLbData, try: logger.info(f"Balancing: Starting to migrate VM guest {guest_name} from {guest_node_current} to {guest_node_target}.") - job_id = proxmox_api.nodes(guest_node_current).qemu(guest_id).migrate().post(**migration_options) + job_id = proxmox_api.nodes(guest_node_current).qemu(guest_id).migrate.post.model(**migration_options) except proxmoxer.core.ResourceException as proxmox_api_error: logger.critical(f"Balancing: Failed to migrate guest {guest_name} of type VM due to some Proxmox errors. Please check if resource is locked or similar.") logger.debug(f"Balancing: Failed to migrate guest {guest_name} of type VM due to some Proxmox errors: {proxmox_api_error}") @@ -206,7 +206,7 @@ def exec_rebalancing_ct(self, proxmox_api: ProxmoxApi, proxlb_data: ProxLbData, try: logger.info(f"Balancing: Starting to migrate CT guest {guest_name} from {guest_node_current} to {guest_node_target}.") - job_id = proxmox_api.nodes(guest_node_current).lxc(guest_id).migrate().post(target=guest_node_target, restart=1) + job_id = proxmox_api.nodes(guest_node_current).lxc(guest_id).migrate.post(target=guest_node_target, restart=1) except proxmoxer.core.ResourceException as proxmox_api_error: logger.critical(f"Balancing: Failed to migrate guest {guest_name} of type CT due to some Proxmox errors. Please check if resource is locked or similar.") logger.debug(f"Balancing: Failed to migrate guest {guest_name} of type CT due to some Proxmox errors: {proxmox_api_error}") @@ -230,25 +230,28 @@ def get_rebalancing_job_status(self, proxmox_api: ProxmoxApi, proxlb_data: ProxL bool: True if the job completed successfully, False otherwise. """ logger.debug("Starting: get_rebalancing_job_status.") - job = proxmox_api.nodes(guest_current_node).tasks(job_id).status().get() + job = proxmox_api.nodes(guest_current_node).tasks(job_id).status.get.model() + + job_status: str | None = job.status # Fetch actual migration job status if this got spawned by a HA job - if job["type"] == "hamigrate": + if job.type == "hamigrate": logger.debug(f"Balancing: Job ID {job_id} (guest: {guest_name}) is a HA migration job. Fetching underlying migration job...") time.sleep(1) - vm_id = int(job["id"]) - qm_migrate_jobs = proxmox_api.nodes(guest_current_node).tasks.get(typefilter="qmigrate", vmid=vm_id, start=0, source="active", limit=1) + vm_id = int(job.id) + qm_migrate_jobs = proxmox_api.nodes(guest_current_node).tasks.get.model(typefilter="qmigrate", vmid=vm_id, start=0, source="active", limit=1) if len(qm_migrate_jobs) > 0: - job = qm_migrate_jobs[0] - job_id = job["upid"] - logger.debug(f'Overwriting job polling for: ID {job_id} (guest: {guest_name}) by {job}') + qmjob = qm_migrate_jobs[0] + job_id = qmjob.upid + job_status = qmjob.status + logger.debug(f'Overwriting job polling for: ID {job_id} (guest: {guest_name}) by {qmjob}') else: logger.debug(f"Balancing: Job ID {job_id} (guest: {guest_name}) is a standard migration job. Proceeding with status check.") # Watch job id until it finalizes # Note: Unsaved jobs are delivered in uppercase from Proxmox API - if job.get("status", "").lower() == "running": + if job_status and job_status.lower() == "running": # Do not hammer the API while # watching the job status time.sleep(10) @@ -264,9 +267,9 @@ def get_rebalancing_job_status(self, proxmox_api: ProxmoxApi, proxlb_data: ProxL return False # Validate job output for errors when finished - if job["status"] == "stopped": + if job_status == "stopped": - if job["exitstatus"] == "OK": + if job.exitstatus == "OK": logger.debug(f"Balancing: Job ID {job_id} (guest: {guest_name}) was successfully.") logger.debug("Finished: get_rebalancing_job_status.") return True diff --git a/proxlb/models/guests.py b/proxlb/models/guests.py index d00cf50..ff6c2e2 100644 --- a/proxlb/models/guests.py +++ b/proxlb/models/guests.py @@ -71,43 +71,49 @@ def get_guests(proxmox_api: ProxmoxApi, pools: Dict[str, ProxLbData.Pool], ha_ru # VM objects: Iterate over all VMs on the current node by the qemu API object. # Unlike the nodes we need to keep them even when being ignored to create proper # resource metrics for rebalancing to ensure that we do not overprovisiong the node. - for guest in proxmox_api.nodes(node).qemu.get(): - if guest['status'] == 'running': + for qemu_guest in proxmox_api.nodes(node).qemu.get.model(): + assert qemu_guest.cpus is not None + assert qemu_guest.disk is not None + assert qemu_guest.maxdisk is not None + assert qemu_guest.maxmem is not None + assert qemu_guest.mem is not None + assert qemu_guest.name is not None + if qemu_guest.status == 'running': - guest_tags = Tags.get_tags_from_guests(proxmox_api, node, guest['vmid'], GuestType.Vm) - guest_pools = Pools.get_pools_for_guest(guest['name'], pools) - guest_ha_rules = HaRules.get_ha_rules_for_guest(guest['name'], ha_rules, guest['vmid']) + guest_tags = Tags.get_tags_from_guests(proxmox_api, node, qemu_guest.vmid, GuestType.Vm) + guest_pools = Pools.get_pools_for_guest(qemu_guest.name, pools) + guest_ha_rules = HaRules.get_ha_rules_for_guest(qemu_guest.name, ha_rules, qemu_guest.vmid) - guests[guest['name']] = ProxLbData.Guest( - name=guest['name'], + guests[qemu_guest.name] = ProxLbData.Guest( + name=qemu_guest.name, cpu=ProxLbData.Guest.Metric( - total=int(guest['cpus']), - used=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'cpu', None), - pressure_some_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'cpu', 'some'), - pressure_full_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'cpu', 'full'), - pressure_some_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'cpu', 'some', spikes=True), - pressure_full_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'cpu', 'full', spikes=True), + total=int(qemu_guest.cpus), + used=Guests.get_guest_rrd_data(proxmox_api, node, qemu_guest.vmid, qemu_guest.name, 'cpu', None), + pressure_some_percent=Guests.get_guest_rrd_data(proxmox_api, node, qemu_guest.vmid, qemu_guest.name, 'cpu', 'some'), + pressure_full_percent=Guests.get_guest_rrd_data(proxmox_api, node, qemu_guest.vmid, qemu_guest.name, 'cpu', 'full'), + pressure_some_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, qemu_guest.vmid, qemu_guest.name, 'cpu', 'some', spikes=True), + pressure_full_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, qemu_guest.vmid, qemu_guest.name, 'cpu', 'full', spikes=True), pressure_hot=False, ), disk=ProxLbData.Guest.Metric( - total=guest['maxdisk'], - used=guest['disk'], - pressure_some_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'disk', 'some'), - pressure_full_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'disk', 'full'), - pressure_some_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'disk', 'some', spikes=True), - pressure_full_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'disk', 'full', spikes=True), + total=qemu_guest.maxdisk, + used=qemu_guest.disk, + pressure_some_percent=Guests.get_guest_rrd_data(proxmox_api, node, qemu_guest.vmid, qemu_guest.name, 'disk', 'some'), + pressure_full_percent=Guests.get_guest_rrd_data(proxmox_api, node, qemu_guest.vmid, qemu_guest.name, 'disk', 'full'), + pressure_some_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, qemu_guest.vmid, qemu_guest.name, 'disk', 'some', spikes=True), + pressure_full_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, qemu_guest.vmid, qemu_guest.name, 'disk', 'full', spikes=True), pressure_hot=False, ), memory=ProxLbData.Guest.Metric( - total=guest['maxmem'], - used=guest['mem'], - pressure_some_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'memory', 'some'), - pressure_full_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'memory', 'full'), - pressure_some_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'memory', 'some', spikes=True), - pressure_full_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'memory', 'full', spikes=True), + total=qemu_guest.maxmem, + used=qemu_guest.mem, + pressure_some_percent=Guests.get_guest_rrd_data(proxmox_api, node, qemu_guest.vmid, qemu_guest.name, 'memory', 'some'), + pressure_full_percent=Guests.get_guest_rrd_data(proxmox_api, node, qemu_guest.vmid, qemu_guest.name, 'memory', 'full'), + pressure_some_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, qemu_guest.vmid, qemu_guest.name, 'memory', 'some', spikes=True), + pressure_full_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, qemu_guest.vmid, qemu_guest.name, 'memory', 'full', spikes=True), pressure_hot=False, ), - id=guest['vmid'], + id=qemu_guest.vmid, node_current=node, node_target=node, processed=False, @@ -123,50 +129,56 @@ def get_guests(proxmox_api: ProxmoxApi, pools: Dict[str, ProxLbData.Pool], ha_ru type=GuestType.Vm, ) - logger.debug(f"Resources of Guest {guest['name']} (type VM) added: {guests[guest['name']]}") + logger.debug(f"Resources of Guest {qemu_guest.name} (type VM) added: {guests[qemu_guest.name]}") else: - logger.debug(f'Metric for VM {guest["name"]} ignored because VM is not running.') + logger.debug(f'Metric for VM {qemu_guest.name} ignored because VM is not running.') # CT objects: Iterate over all VMs on the current node by the lxc API object. # Unlike the nodes we need to keep them even when being ignored to create proper # resource metrics for rebalancing to ensure that we do not overprovisiong the node. - for guest in proxmox_api.nodes(node).lxc.get(): - if guest['status'] == 'running': + for lxc_guest in proxmox_api.nodes(node).lxc.get.model(): + assert lxc_guest.cpus is not None + assert lxc_guest.disk is not None + assert lxc_guest.maxdisk is not None + assert lxc_guest.maxmem is not None + assert lxc_guest.mem is not None + assert lxc_guest.name is not None + if lxc_guest.status == 'running': - guest_tags = Tags.get_tags_from_guests(proxmox_api, node, guest['vmid'], GuestType.Ct) - guest_pools = Pools.get_pools_for_guest(guest['name'], pools) - guest_ha_rules = HaRules.get_ha_rules_for_guest(guest['name'], ha_rules, guest['vmid']) + guest_tags = Tags.get_tags_from_guests(proxmox_api, node, lxc_guest.vmid, GuestType.Ct) + guest_pools = Pools.get_pools_for_guest(lxc_guest.name, pools) + guest_ha_rules = HaRules.get_ha_rules_for_guest(lxc_guest.name, ha_rules, lxc_guest.vmid) - guests[guest['name']] = ProxLbData.Guest( + guests[lxc_guest.name] = ProxLbData.Guest( cpu=ProxLbData.Guest.Metric( - total=int(guest['cpus']), - used=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'cpu', None), - pressure_some_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'cpu', 'some'), - pressure_full_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'cpu', 'full'), - pressure_some_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'cpu', 'some', spikes=True), - pressure_full_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'cpu', 'full', spikes=True), + total=int(lxc_guest.cpus), + used=Guests.get_guest_rrd_data(proxmox_api, node, lxc_guest.vmid, lxc_guest.name, 'cpu', None), + pressure_some_percent=Guests.get_guest_rrd_data(proxmox_api, node, lxc_guest.vmid, lxc_guest.name, 'cpu', 'some'), + pressure_full_percent=Guests.get_guest_rrd_data(proxmox_api, node, lxc_guest.vmid, lxc_guest.name, 'cpu', 'full'), + pressure_some_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, lxc_guest.vmid, lxc_guest.name, 'cpu', 'some', spikes=True), + pressure_full_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, lxc_guest.vmid, lxc_guest.name, 'cpu', 'full', spikes=True), pressure_hot=False, ), disk=ProxLbData.Guest.Metric( - total=guest['maxdisk'], - used=guest['disk'], - pressure_some_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'disk', 'some'), - pressure_full_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'disk', 'full'), - pressure_some_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'disk', 'some', spikes=True), - pressure_full_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'disk', 'full', spikes=True), + total=lxc_guest.maxdisk, + used=lxc_guest.disk, + pressure_some_percent=Guests.get_guest_rrd_data(proxmox_api, node, lxc_guest.vmid, lxc_guest.name, 'disk', 'some'), + pressure_full_percent=Guests.get_guest_rrd_data(proxmox_api, node, lxc_guest.vmid, lxc_guest.name, 'disk', 'full'), + pressure_some_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, lxc_guest.vmid, lxc_guest.name, 'disk', 'some', spikes=True), + pressure_full_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, lxc_guest.vmid, lxc_guest.name, 'disk', 'full', spikes=True), pressure_hot=False, ), memory=ProxLbData.Guest.Metric( - total=guest['maxmem'], - used=guest['mem'], - pressure_some_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'memory', 'some'), - pressure_full_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'memory', 'full'), - pressure_some_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'memory', 'some', spikes=True), - pressure_full_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, guest['vmid'], guest['name'], 'memory', 'full', spikes=True), + total=lxc_guest.maxmem, + used=lxc_guest.mem, + pressure_some_percent=Guests.get_guest_rrd_data(proxmox_api, node, lxc_guest.vmid, lxc_guest.name, 'memory', 'some'), + pressure_full_percent=Guests.get_guest_rrd_data(proxmox_api, node, lxc_guest.vmid, lxc_guest.name, 'memory', 'full'), + pressure_some_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, lxc_guest.vmid, lxc_guest.name, 'memory', 'some', spikes=True), + pressure_full_spikes_percent=Guests.get_guest_rrd_data(proxmox_api, node, lxc_guest.vmid, lxc_guest.name, 'memory', 'full', spikes=True), pressure_hot=False, ), - name=guest['name'], - id=guest['vmid'], + name=lxc_guest.name, + id=lxc_guest.vmid, node_current=node, node_target=node, processed=False, @@ -182,9 +194,9 @@ def get_guests(proxmox_api: ProxmoxApi, pools: Dict[str, ProxLbData.Pool], ha_ru type=GuestType.Ct, ) - logger.debug(f"Resources of Guest {guest['name']} (type CT) added: {guests[guest['name']]}") + logger.debug(f"Resources of Guest {lxc_guest.name} (type CT) added: {guests[lxc_guest.name]}") else: - logger.debug(f'Metric for CT {guest["name"]} ignored because CT is not running.') + logger.debug(f'Metric for CT {lxc_guest.name} ignored because CT is not running.') logger.debug("Finished: get_guests.") return guests @@ -228,7 +240,7 @@ def get_guest_rrd_data(proxmox_api: ProxmoxApi, node_name: str, vm_id: int, vm_n # RRD data is collected every minute, so we look at the last 6 entries # and take the maximum value to represent the spike logger.debug(f"Getting RRD data (spike: {spikes}) of pressure for {object_name} {object_type} from guest: {vm_name}.") - _rrd_data_value = [row.get(lookup_key) for row in guest_data_rrd if row.get(lookup_key) is not None] + _rrd_data_value = [row[lookup_key] for row in guest_data_rrd if lookup_key in row and row[lookup_key] is not None] rrd_data_value = max(_rrd_data_value[-6:], default=0.0) else: # Calculate the average value from the RRD data entries diff --git a/proxlb/models/ha_rules.py b/proxlb/models/ha_rules.py index 98472fb..9f350be 100644 --- a/proxlb/models/ha_rules.py +++ b/proxlb/models/ha_rules.py @@ -68,38 +68,42 @@ def get_ha_rules(proxmox_api: ProxmoxApi, meta: ProxLbData.Meta) -> Dict[str, Pr else: logger.debug("Cluster running Proxmox VE 9 or newer, proceeding with HA rule retrieval.") - for rule in proxmox_api.cluster.ha.rules.get(): + def assert_str(obj: str | None) -> str: + assert isinstance(obj, str) + return obj + + for rule in proxmox_api.cluster.ha.rules.get.model(): # Skip disabled rules (disable key exists AND is truthy) - if rule.get("disable", 0): - logger.debug(f"Skipping ha-rule: {rule['rule']} of type {rule['type']} affecting guests: {rule['resources']}. Rule is disabled.") + if rule.disable: + logger.debug(f"Skipping ha-rule: {rule.rule} of type {rule.type} affecting guests: {rule.resources}. Rule is disabled.") continue # Create a resource list by splitting on commas and stripping whitespace containing # the VM and CT IDs that are part of this HA rule - resources_list_guests = [int(r.split(":")[1]) for r in rule["resources"].split(",") if r.strip()] + resources_list_guests = [int(r.split(":")[1]) for r in assert_str(rule.resources).split(",") if r.strip()] # Convert the affinity field to a more descriptive type affinity_type: AffinityType - if rule.get("affinity", None) == "negative": + if rule.affinity == "negative": affinity_type = AffinityType.NegativeAffinity else: affinity_type = AffinityType.PositiveAffinity # Create affected nodes list resources_list_nodes = [] - if rule.get("nodes", None): - resources_list_nodes = [n for n in rule["nodes"].split(",") if n] + if rule.nodes: + resources_list_nodes = [n for n in rule.nodes.split(",") if n] # Create the ha_rule element - ha_rules[rule['rule']] = ProxLbData.HaRule( - rule=rule['rule'], + ha_rules[rule.rule] = ProxLbData.HaRule( + rule=rule.rule, type=affinity_type, nodes=resources_list_nodes, members=resources_list_guests, ) - logger.debug(f"Got ha-rule: {rule['rule']} as type {affinity_type} affecting guests: {rule['resources']}") + logger.debug(f"Got ha-rule: {rule.rule} as type {affinity_type} affecting guests: {rule.resources}") logger.debug("Finished: ha_rules.") return ha_rules diff --git a/proxlb/models/nodes.py b/proxlb/models/nodes.py index 7f8e59b..5ada652 100644 --- a/proxlb/models/nodes.py +++ b/proxlb/models/nodes.py @@ -62,70 +62,76 @@ def get_nodes(proxmox_api: ProxmoxApi, proxlb_config: Config) -> Dict[str, ProxL logger.debug("Starting: get_nodes.") nodes: dict[str, ProxLbData.Node] = {} - for node in proxmox_api.nodes.get(): + for node in proxmox_api.nodes.get.model(): # Ignoring a node results into ignoring all placed guests on the ignored node! - if node["status"] == "online" and not Nodes.set_node_ignore(proxlb_config, node["node"]): - - cpu_used = node["cpu"] * node["maxcpu"] + if node.status == "online" and not Nodes.set_node_ignore(proxlb_config, node.node): + assert node.cpu + assert node.maxcpu + assert node.disk + assert node.maxdisk + assert node.mem + assert node.maxmem + + cpu_used = node.cpu * node.maxcpu # FIXME: This formula makes cpu_free negative?? - cpu_free = (node["maxcpu"]) - (node["cpu"] * node["maxcpu"]) - disk_free = node["maxdisk"] - node["disk"] - disk_used = node["disk"] - memory_used = node["mem"] - memory_free = node["maxmem"] - node["mem"] - - nodes[node["node"]] = ProxLbData.Node( - name=node["node"], - pve_version=Nodes.get_node_pve_version(proxmox_api, node["node"]), + cpu_free = (node.maxcpu) - (node.cpu * node.maxcpu) + disk_free = node.maxdisk - node.disk + disk_used = node.disk + memory_used = node.mem + memory_free = node.maxmem - node.mem + + nodes[node.node] = ProxLbData.Node( + name=node.node, + pve_version=Nodes.get_node_pve_version(proxmox_api, node.node), pressure_hot=False, maintenance=False, cpu=ProxLbData.Node.Metric( - total=node["maxcpu"], + total=node.maxcpu, assigned=0, used=cpu_used, free=cpu_free, assigned_percent=0, - free_percent=cpu_free / node["maxcpu"] * 100, - used_percent=cpu_used / node["maxcpu"] * 100, - pressure_some_percent=Nodes.get_node_rrd_data(proxmox_api, node["node"], "cpu", "some"), - pressure_full_percent=Nodes.get_node_rrd_data(proxmox_api, node["node"], "cpu", "full"), - pressure_some_spikes_percent=Nodes.get_node_rrd_data(proxmox_api, node["node"], "cpu", "some", spikes=True), - pressure_full_spikes_percent=Nodes.get_node_rrd_data(proxmox_api, node["node"], "cpu", "full", spikes=True), + free_percent=cpu_free / node.maxcpu * 100, + used_percent=cpu_used / node.maxcpu * 100, + pressure_some_percent=Nodes.get_node_rrd_data(proxmox_api, node.node, "cpu", "some"), + pressure_full_percent=Nodes.get_node_rrd_data(proxmox_api, node.node, "cpu", "full"), + pressure_some_spikes_percent=Nodes.get_node_rrd_data(proxmox_api, node.node, "cpu", "some", spikes=True), + pressure_full_spikes_percent=Nodes.get_node_rrd_data(proxmox_api, node.node, "cpu", "full", spikes=True), pressure_hot=False, ), disk=ProxLbData.Node.Metric( - total=node["maxdisk"], + total=node.maxdisk, assigned=0, - used=node["disk"], + used=node.disk, free=disk_free, assigned_percent=0, - free_percent=disk_free / node["maxdisk"] * 100, - used_percent=disk_used / node["maxdisk"] * 100, - pressure_some_percent=Nodes.get_node_rrd_data(proxmox_api, node["node"], "disk", "some"), - pressure_full_percent=Nodes.get_node_rrd_data(proxmox_api, node["node"], "disk", "full"), - pressure_some_spikes_percent=Nodes.get_node_rrd_data(proxmox_api, node["node"], "disk", "some", spikes=True), - pressure_full_spikes_percent=Nodes.get_node_rrd_data(proxmox_api, node["node"], "disk", "full", spikes=True), + free_percent=disk_free / node.maxdisk * 100, + used_percent=disk_used / node.maxdisk * 100, + pressure_some_percent=Nodes.get_node_rrd_data(proxmox_api, node.node, "disk", "some"), + pressure_full_percent=Nodes.get_node_rrd_data(proxmox_api, node.node, "disk", "full"), + pressure_some_spikes_percent=Nodes.get_node_rrd_data(proxmox_api, node.node, "disk", "some", spikes=True), + pressure_full_spikes_percent=Nodes.get_node_rrd_data(proxmox_api, node.node, "disk", "full", spikes=True), pressure_hot=False, ), memory=ProxLbData.Node.Metric( - total=Nodes.set_node_resource_reservation(node["node"], node["maxmem"], proxlb_config, BalancingResource.Memory), + total=Nodes.set_node_resource_reservation(node.node, node.maxmem, proxlb_config, BalancingResource.Memory), assigned=0, used=memory_used, free=memory_free, assigned_percent=0, - free_percent=memory_free / node["maxmem"] * 100, - used_percent=memory_used / node["maxmem"] * 100, - pressure_some_percent=Nodes.get_node_rrd_data(proxmox_api, node["node"], "memory", "some"), - pressure_full_percent=Nodes.get_node_rrd_data(proxmox_api, node["node"], "memory", "full"), - pressure_some_spikes_percent=Nodes.get_node_rrd_data(proxmox_api, node["node"], "memory", "some", spikes=True), - pressure_full_spikes_percent=Nodes.get_node_rrd_data(proxmox_api, node["node"], "memory", "full", spikes=True), + free_percent=memory_free / node.maxmem * 100, + used_percent=memory_used / node.maxmem * 100, + pressure_some_percent=Nodes.get_node_rrd_data(proxmox_api, node.node, "memory", "some"), + pressure_full_percent=Nodes.get_node_rrd_data(proxmox_api, node.node, "memory", "full"), + pressure_some_spikes_percent=Nodes.get_node_rrd_data(proxmox_api, node.node, "memory", "some", spikes=True), + pressure_full_spikes_percent=Nodes.get_node_rrd_data(proxmox_api, node.node, "memory", "full", spikes=True), pressure_hot=False, ), ) # Evaluate if node should be set to maintenance mode - if Nodes.set_node_maintenance(proxmox_api, proxlb_config, node["node"]): - nodes[node["node"]].maintenance = True + if Nodes.set_node_maintenance(proxmox_api, proxlb_config, node.node): + nodes[node.node].maintenance = True logger.debug(f"Node metrics collected: {nodes}") logger.debug("Finished: get_nodes.") @@ -157,10 +163,10 @@ def set_node_maintenance(proxmox_api: ProxmoxApi, proxlb_config: Config, node_na logger.debug(f"Node: {node_name} is not in maintenance mode by ProxLB config.") # Evaluate maintenance mode by Proxmox HA - for ha_element in proxmox_api.cluster.ha.status.current.get(): - if ha_element.get("status"): - if "maintenance mode" in ha_element.get("status"): - if ha_element.get("node") == node_name: + for ha_element in proxmox_api.cluster.ha.status.current.get.model(): + if ha_element.status: + if "maintenance mode" in ha_element.status: + if ha_element.node == node_name: logger.info(f"Node: {node_name} has been set to maintenance mode (by Proxmox HA API).") return True else: @@ -231,7 +237,7 @@ def get_node_rrd_data(proxmox_api: ProxmoxApi, node_name: str, object_name: str, if spikes: # RRD data is collected every minute, so we look at the last 6 entries # and take the maximum value to represent the spike - _rrd_data_value = [row.get(lookup_key) for row in node_data_rrd if row.get(lookup_key) is not None] + _rrd_data_value = [row[lookup_key] for row in node_data_rrd if lookup_key in row and row[lookup_key] is not None] rrd_data_value = max(_rrd_data_value[-6:], default=0.0) else: # Calculate the average value from the RRD data entries diff --git a/proxlb/models/pools.py b/proxlb/models/pools.py index ab43a88..e4c7f49 100644 --- a/proxlb/models/pools.py +++ b/proxlb/models/pools.py @@ -60,26 +60,26 @@ def get_pools(proxmox_api: ProxmoxApi) -> Dict[str, ProxLbData.Pool]: # Pool objects: iterate over all pools in the cluster. # We keep pool members even if their nodes are ignored so resource accounting # for rebalancing remains correct and we avoid overprovisioning nodes. - for pool in proxmox_api.pools.get(): - logger.debug(f"Got pool: {pool['poolid']}") - pools[pool['poolid']] = ProxLbData.Pool(name=pool['poolid']) + for pool in proxmox_api.pools.get.model(): + logger.debug(f"Got pool: {pool.poolid}") + pools[pool.poolid] = ProxLbData.Pool(name=pool.poolid) # Fetch pool details and collect member names try: - pool_details = proxmox_api.pools(pool['poolid']).get() + pool_details = proxmox_api.pools(pool.poolid).get.model() except Exception as e: - logger.error(f"Error fetching pool details for pool {pool['poolid']}: {e}") + logger.error(f"Error fetching pool details for pool {pool.poolid}: {e}") continue - for member in pool_details.get("members", []): + for member in pool_details.members: # We might also have objects without the key "name", e.g. storage pools - if "name" not in member: - logger.debug(f"Skipping member without name in pool: {pool['poolid']}") + if not member.name: + logger.debug(f"Skipping member without name in pool: {pool.poolid}") continue - logger.debug(f"Got member: {member['name']} for pool: {pool['poolid']}") - pools[pool['poolid']].members.append(member['name']) + logger.debug(f"Got member: {member.name} for pool: {pool.poolid}") + pools[pool.poolid].members.append(member.name) logger.debug("Finished: get_pools.") return pools diff --git a/proxlb/models/tags.py b/proxlb/models/tags.py index a8820b4..a7a8ab3 100644 --- a/proxlb/models/tags.py +++ b/proxlb/models/tags.py @@ -73,11 +73,11 @@ def get_tags_from_guests(proxmox_api: ProxmoxApi, node: str, guest_id: int, gues time.sleep(0.1) api_tags: str if guest_type == GuestType.Vm: - guest_config = proxmox_api.nodes(node).qemu(guest_id).config.get() - api_tags = guest_config.get("tags", "") + qemu_guest_config = proxmox_api.nodes(node).qemu(guest_id).config.get.model() + api_tags = qemu_guest_config.tags or "" elif guest_type == GuestType.Ct: - guest_config = proxmox_api.nodes(node).lxc(guest_id).config.get() - api_tags = guest_config.get("tags", "") + lxc_guest_config = proxmox_api.nodes(node).lxc(guest_id).config.get.model() + api_tags = lxc_guest_config.tags or "" else: assert_never(guest_type) diff --git a/proxlb/utils/proxmox_api.py b/proxlb/utils/proxmox_api.py index 94d3b1a..418ad47 100644 --- a/proxlb/utils/proxmox_api.py +++ b/proxlb/utils/proxmox_api.py @@ -16,6 +16,7 @@ import errno try: import proxmoxer + import proxmoxer_types.v9 import proxmoxer.backends.https PROXMOXER_PRESENT = True except ImportError: @@ -34,7 +35,7 @@ URLLIB3_PRESENT = True except ImportError: URLLIB3_PRESENT = False -from typing import Any, Optional +from typing import Optional from .helper import Helper from .logger import SystemdLogger from .config_parser import Config @@ -54,6 +55,7 @@ sys.exit(1) import proxmoxer +import proxmoxer_types.v9 # noqa: F811 # keep for pyright import requests # noqa: F811 # keep for pyright import urllib3 # noqa: F811 # keep for pyright @@ -115,11 +117,17 @@ def __init__(self, proxlb_config: Config) -> None: self.test_api_user_permissions(self.proxmox_api) logger.debug("Finished: ProxmoxApi initialization.") - def __getattr__(self, name: str) -> Any: - """ - Delegate attribute access to proxmox_api to the underlying proxmoxer module. - """ - return getattr(self.proxmox_api, name) + @property + def cluster(self) -> "proxmoxer_types.v9.ProxmoxAPI.Cluster": + return self.proxmox_api.cluster + + @property + def nodes(self) -> "proxmoxer_types.v9.ProxmoxAPI.Nodes": + return self.proxmox_api.nodes + + @property + def pools(self) -> "proxmoxer_types.v9.ProxmoxAPI.Pools": + return self.proxmox_api.pools def validate_config(self, proxlb_config: Config) -> None: """ @@ -320,7 +328,7 @@ def test_api_proxmox_host_ipv6(self, host: str, port: int = 8006, timeout: int = logger.debug("Finished: test_api_proxmox_host_ipv6.") return False - def test_api_user_permissions(self, proxmox_api: proxmoxer.ProxmoxAPI) -> None: + def test_api_user_permissions(self, proxmox_api: proxmoxer_types.v9.ProxmoxAPI) -> None: """ Test the permissions of the current user/token used for the Proxmox API. @@ -359,7 +367,7 @@ def test_api_user_permissions(self, proxmox_api: proxmoxer.ProxmoxAPI) -> None: logger.debug("Finished: test_api_user_permissions.") - def api_connect(self, proxlb_config: Config) -> proxmoxer.ProxmoxAPI: + def api_connect(self, proxlb_config: Config) -> proxmoxer_types.v9.ProxmoxAPI: """ Establishes a connection to the Proxmox API using the provided configuration. @@ -397,7 +405,7 @@ def api_connect(self, proxlb_config: Config) -> proxmoxer.ProxmoxAPI: try: if proxlb_config.proxmox_api.token_secret: - proxmox_api = proxmoxer.ProxmoxAPI( + proxmox_api = proxmoxer_types.v9.ProxmoxAPI( proxmox_api_endpoint.host, port=proxmox_api_endpoint.port, user=proxlb_config.proxmox_api.username, @@ -407,7 +415,7 @@ def api_connect(self, proxlb_config: Config) -> proxmoxer.ProxmoxAPI: timeout=proxlb_config.proxmox_api.timeout) logger.debug("Using API token authentication.") else: - proxmox_api = proxmoxer.ProxmoxAPI( + proxmox_api = proxmoxer_types.v9.ProxmoxAPI( proxmox_api_endpoint.host, port=proxmox_api_endpoint.port, user=proxlb_config.proxmox_api.username, diff --git a/setup.py b/setup.py index e8bcfbc..7682853 100644 --- a/setup.py +++ b/setup.py @@ -15,6 +15,7 @@ "requests", "urllib3", "proxmoxer", + "proxmoxer-stubs", "pydantic", "pyyaml", ],