diff --git a/src/pvecontrol/__init__.py b/src/pvecontrol/__init__.py index 23127bb..c759ccf 100644 --- a/src/pvecontrol/__init__.py +++ b/src/pvecontrol/__init__.py @@ -27,9 +27,7 @@ def _regexp_type(value): def _column_type(value): if not value in columns: choices = ", ".join([f"'{c}'" for c in columns]) - raise argparse.ArgumentTypeError( - f"invalid choice: '{value}' (choose from {choices})" - ) + raise argparse.ArgumentTypeError(f"invalid choice: '{value}' (choose from {choices})") return value while True: @@ -71,70 +69,43 @@ def _parser(): ## Add version in help output # Parser configuration - parser = argparse.ArgumentParser( - description="Proxmox VE control cli.", epilog="Made with love by Enix.io" - ) + parser = argparse.ArgumentParser(description="Proxmox VE control cli.", epilog="Made with love by Enix.io") parser.add_argument("-v", "--verbose", action="store_true") parser.add_argument("--debug", action="store_true") parser.add_argument( - "-c", - "--cluster", - action="store", - required=True, - help="Proxmox cluster name as defined in configuration", + "-c", "--cluster", action="store", required=True, help="Proxmox cluster name as defined in configuration" ) subparsers = parser.add_subparsers(required=True) # clusterstatus parser - parser_clusterstatus = subparsers.add_parser( - "clusterstatus", help="Show cluster status" - ) + parser_clusterstatus = subparsers.add_parser("clusterstatus", help="Show cluster status") parser_clusterstatus.set_defaults(func=actions.cluster.action_clusterstatus) # storagelist parser - parser_storagelist = subparsers.add_parser( - "storagelist", help="Show cluster status" - ) + parser_storagelist = subparsers.add_parser("storagelist", help="Show cluster status") add_table_related_arguments(parser_storagelist, storage.COLUMNS, "storage") parser_storagelist.set_defaults(func=actions.storage.action_storagelist) # nodelist parser - parser_nodelist = subparsers.add_parser( - "nodelist", help="List nodes in the cluster" - ) + parser_nodelist = subparsers.add_parser("nodelist", help="List nodes in the cluster") add_table_related_arguments(parser_nodelist, node.COLUMNS, "node") parser_nodelist.set_defaults(func=actions.node.action_nodelist) # nodeevacuate parser - parser_nodeevacuate = subparsers.add_parser( - "nodeevacuate", help="Evacuate an host by migrating all VMs" - ) - parser_nodeevacuate.add_argument( - "--node", action="store", required=True, help="Node to evacuate" - ) + parser_nodeevacuate = subparsers.add_parser("nodeevacuate", help="Evacuate an host by migrating all VMs") + parser_nodeevacuate.add_argument("--node", action="store", required=True, help="Node to evacuate") parser_nodeevacuate.add_argument( "--target", action="append", required=False, help="Destination Proxmox VE node, you can specify multiple target options", ) + parser_nodeevacuate.add_argument("-f", "--follow", action="store_true", help="Follow task log output") + parser_nodeevacuate.add_argument("-w", "--wait", action="store_true", help="Wait task end") parser_nodeevacuate.add_argument( - "-f", "--follow", action="store_true", help="Follow task log output" - ) - parser_nodeevacuate.add_argument( - "-w", "--wait", action="store_true", help="Wait task end" - ) - parser_nodeevacuate.add_argument( - "--online", - action="store_true", - help="Online migrate the VM, default True", - default=True, - ) - parser_nodeevacuate.add_argument( - "--no-skip-stopped", action="store_true", help="Don't skip VMs that are stopped" - ) - parser_nodeevacuate.add_argument( - "--dry-run", action="store_true", help="Dry run, do not execute migration" + "--online", action="store_true", help="Online migrate the VM, default True", default=True ) + parser_nodeevacuate.add_argument("--no-skip-stopped", action="store_true", help="Don't skip VMs that are stopped") + parser_nodeevacuate.add_argument("--dry-run", action="store_true", help="Dry run, do not execute migration") parser_nodeevacuate.set_defaults(func=actions.node.action_nodeevacuate) # vmlist parser @@ -142,30 +113,15 @@ def _parser(): add_table_related_arguments(parser_vmlist, vm.COLUMNS, "vmid") parser_vmlist.set_defaults(func=actions.vm.action_vmlist) # vmmigrate parser - parser_vmmigrate = subparsers.add_parser( - "vmmigrate", help="Migrate VMs in the cluster" - ) - parser_vmmigrate.add_argument( - "--vmid", action="store", required=True, type=int, help="VM to migrate" - ) - parser_vmmigrate.add_argument( - "--target", action="store", required=True, help="Destination Proxmox VE node" - ) - parser_vmmigrate.add_argument( - "--online", - action="store_true", - help="Online migrate the VM, default True", - default=True, - ) - parser_vmmigrate.add_argument( - "-f", "--follow", action="store_true", help="Follow task log output" - ) + parser_vmmigrate = subparsers.add_parser("vmmigrate", help="Migrate VMs in the cluster") + parser_vmmigrate.add_argument("--vmid", action="store", required=True, type=int, help="VM to migrate") + parser_vmmigrate.add_argument("--target", action="store", required=True, help="Destination Proxmox VE node") parser_vmmigrate.add_argument( - "-w", "--wait", action="store_true", help="Wait task end" - ) - parser_vmmigrate.add_argument( - "--dry-run", action="store_true", help="Dry run, do not execute migration" + "--online", action="store_true", help="Online migrate the VM, default True", default=True ) + parser_vmmigrate.add_argument("-f", "--follow", action="store_true", help="Follow task log output") + parser_vmmigrate.add_argument("-w", "--wait", action="store_true", help="Wait task end") + parser_vmmigrate.add_argument("--dry-run", action="store_true", help="Dry run, do not execute migration") parser_vmmigrate.set_defaults(func=actions.vm.action_vmmigrate) # tasklist parser @@ -174,24 +130,13 @@ def _parser(): parser_tasklist.set_defaults(func=actions.task.action_tasklist) # taskget parser parser_taskget = subparsers.add_parser("taskget", help="Get task detail") - parser_taskget.add_argument( - "--upid", - action="store", - required=True, - help="Proxmox tasks UPID to get informations", - ) - parser_taskget.add_argument( - "-f", "--follow", action="store_true", help="Follow task log output" - ) - parser_taskget.add_argument( - "-w", "--wait", action="store_true", help="Wait task end" - ) + parser_taskget.add_argument("--upid", action="store", required=True, help="Proxmox tasks UPID to get informations") + parser_taskget.add_argument("-f", "--follow", action="store_true", help="Follow task log output") + parser_taskget.add_argument("-w", "--wait", action="store_true", help="Wait task end") parser_taskget.set_defaults(func=actions.task.action_taskget) # sanitycheck parser - parser_sanitycheck = subparsers.add_parser( - "sanitycheck", help="Run Sanity checks on the cluster" - ) + parser_sanitycheck = subparsers.add_parser("sanitycheck", help="Run Sanity checks on the cluster") parser_sanitycheck.set_defaults(func=actions.cluster.action_sanitycheck) # _test parser, hidden from help @@ -222,9 +167,7 @@ def main(): sys.exit(1) # configure logging - logging.basicConfig( - encoding="utf-8", level=logging.DEBUG if args.debug else logging.INFO - ) + logging.basicConfig(encoding="utf-8", level=logging.DEBUG if args.debug else logging.INFO) logging.debug("Arguments: %s" % args) logging.info("Proxmox cluster: %s" % args.cluster) diff --git a/src/pvecontrol/actions/cluster.py b/src/pvecontrol/actions/cluster.py index 7972938..3758255 100644 --- a/src/pvecontrol/actions/cluster.py +++ b/src/pvecontrol/actions/cluster.py @@ -53,13 +53,13 @@ def action_sanitycheck(proxmox, args): for node in proxmox.nodes: if (node.maxcpu * proxmox.config["node"]["cpufactor"]) <= node.allocatedcpu: print( - "Node %s is in cpu overcommit status: %s allocated but %s available" - % (node.node, node.allocatedcpu, node.maxcpu) + f"Node {node.node} is in cpu overcommit status: ", + f"{node.allocatedcpu} allocated but {node.maxcpu} available", ) if (node.allocatedmem + proxmox.config["node"]["memoryminimum"]) >= node.maxmem: print( - "Node %s is in mem overcommit status: %s allocated but %s available" - % (node.node, node.allocatedmem, node.maxmem) + f"Node {node.node} is in mem overcommit status: ", + f"{node.allocatedmem} allocated but {node.maxmem} available", ) # More checks to implement # VM is started but 'startonboot' not set diff --git a/src/pvecontrol/actions/node.py b/src/pvecontrol/actions/node.py index fa7fdba..4240562 100644 --- a/src/pvecontrol/actions/node.py +++ b/src/pvecontrol/actions/node.py @@ -13,9 +13,7 @@ def action_nodelist(proxmox, args): """List proxmox nodes in the cluster using proxmoxer api""" - print_tableoutput( - proxmox.nodes, columns=args.columns, sortby=args.sort_by, filters=args.filter - ) + print_tableoutput(proxmox.nodes, columns=args.columns, sortby=args.sort_by, filters=args.filter) def action_nodeevacuate(proxmox, args): @@ -47,11 +45,7 @@ def action_nodeevacuate(proxmox, args): continue targets.append(tg) else: - targets = [ - n - for n in proxmox.nodes - if n.status == NodeStatus.online and n.node != srcnode.node - ] + targets = [n for n in proxmox.nodes if n.status == NodeStatus.online and n.node != srcnode.node] if len(targets) == 0: print("No target node available") return @@ -59,10 +53,7 @@ def action_nodeevacuate(proxmox, args): plan = [] for vm in srcnode.vms: - logging.debug( - "Selecting node for VM: %i, maxmem: %i, cpus: %i" - % (vm.vmid, vm.maxmem, vm.cpus) - ) + logging.debug("Selecting node for VM: %i, maxmem: %i, cpus: %i" % (vm.vmid, vm.maxmem, vm.cpus)) if vm.status != VmStatus.running and not args.no_skip_stopped: logging.debug("VM %i is not running, skipping" % (vm.vmid)) continue @@ -72,17 +63,18 @@ def action_nodeevacuate(proxmox, args): "Test target: %s, allocatedmem: %i, allocatedcpu: %i" % (target.node, target.allocatedmem, target.allocatedcpu) ) - if (vm.maxmem + target.allocatedmem) > ( - target.maxmem - proxmox.config["node"]["memoryminimum"] - ): + if (vm.maxmem + target.allocatedmem) > (target.maxmem - proxmox.config["node"]["memoryminimum"]): logging.debug("Discard target: %s, will overcommit ram" % (target.node)) - elif (vm.cpus + target.allocatedcpu) > ( - target.maxcpu * proxmox.config["node"]["cpufactor"] - ): + elif (vm.cpus + target.allocatedcpu) > (target.maxcpu * proxmox.config["node"]["cpufactor"]): logging.debug("Discard target: %s, will overcommit cpu" % (target.node)) else: plan.append( - {"vmid": vm.vmid, "vm": vm, "node": args.node, "target": target} + { + "vmid": vm.vmid, + "vm": vm, + "node": args.node, + "target": target, + } ) target.allocatedmem += vm.maxmem target.allocatedcpu += vm.cpus @@ -100,10 +92,7 @@ def action_nodeevacuate(proxmox, args): print("No VM to migrate") return for p in plan: - print( - "Migrating VM %s (%s) from %s to %s" - % (p["vmid"], p["vm"].name, p["node"], p["target"].node) - ) + print("Migrating VM %s (%s) from %s to %s" % (p["vmid"], p["vm"].name, p["node"], p["target"].node)) confirmation = input("Confirm (yes):") logging.debug("Confirmation input: %s" % confirmation) if confirmation.lower() != "yes": @@ -112,13 +101,8 @@ def action_nodeevacuate(proxmox, args): # run migrations for p in plan: - logging.debug( - "Migrating VM %s from %s to %s" % (p["vmid"], p["node"], p["target"].node) - ) - print( - "Migrate VM: %i / %s from %s to %s" - % (p["vmid"], p["vm"].name, p["node"], p["target"].node) - ) + logging.debug("Migrating VM %s from %s to %s" % (p["vmid"], p["node"], p["target"].node)) + print("Migrate VM: %i / %s from %s to %s" % (p["vmid"], p["vm"].name, p["node"], p["target"].node)) if not args.dry_run: upid = p["vm"].migrate(p["target"].node, args.online) logging.debug("Migration UPID: %s" % upid) diff --git a/src/pvecontrol/actions/storage.py b/src/pvecontrol/actions/storage.py index 32558d3..0616883 100644 --- a/src/pvecontrol/actions/storage.py +++ b/src/pvecontrol/actions/storage.py @@ -19,6 +19,4 @@ def action_storagelist(proxmox, args): for id, storage in storages.items(): storages[id]["nodes"] = ", ".join(storages[id]["nodes"]) - print_tableoutput( - storages.values(), COLUMNS, sortby=args.sort_by, filters=args.filter - ) + print_tableoutput(storages.values(), COLUMNS, sortby=args.sort_by, filters=args.filter) diff --git a/src/pvecontrol/actions/task.py b/src/pvecontrol/actions/task.py index 90c4cf4..36e09ee 100644 --- a/src/pvecontrol/actions/task.py +++ b/src/pvecontrol/actions/task.py @@ -3,7 +3,10 @@ def action_tasklist(proxmox, args): print_tableoutput( - proxmox.tasks, columns=args.columns, sortby=args.sort_by, filters=args.filter + proxmox.tasks, + columns=args.columns, + sortby=args.sort_by, + filters=args.filter, ) diff --git a/src/pvecontrol/actions/vm.py b/src/pvecontrol/actions/vm.py index 6f29dfa..59f871e 100644 --- a/src/pvecontrol/actions/vm.py +++ b/src/pvecontrol/actions/vm.py @@ -46,11 +46,7 @@ def action_vmmigrate(proxmox, args): # FIXME # Check que la migration est possible - check = ( - proxmox._api.nodes(node.node) - .qemu(vmid) - .migrate.get(node=node.node, target=target.node) - ) + check = proxmox._api.nodes(node.node).qemu(vmid).migrate.get(node=node.node, target=target.node) logging.debug("Migration check: %s" % check) options = {} options["node"] = node.node @@ -73,6 +69,4 @@ def action_vmmigrate(proxmox, args): def action_vmlist(proxmox, args): """List VMs in the Proxmox Cluster""" vms = proxmox.vms() - print_tableoutput( - vms, columns=args.columns, sortby=args.sort_by, filters=args.filter - ) + print_tableoutput(vms, columns=args.columns, sortby=args.sort_by, filters=args.filter) diff --git a/src/pvecontrol/cluster.py b/src/pvecontrol/cluster.py index b961870..c159d6e 100644 --- a/src/pvecontrol/cluster.py +++ b/src/pvecontrol/cluster.py @@ -11,9 +11,7 @@ class PVECluster: """Proxmox VE Cluster""" def __init__(self, name, host, user, password, config, verify_ssl=False): - self._api = ProxmoxAPI( - host, user=user, password=password, verify_ssl=verify_ssl - ) + self._api = ProxmoxAPI(host, user=user, password=password, verify_ssl=verify_ssl) self.name = name self.config = config self._initstatus() @@ -28,14 +26,7 @@ def _initstatus(self): self.storages = [] for storage in self.get_resources_storages(): - self.storages.append( - PVEStorage( - storage.pop("node"), - storage.pop("id"), - storage.pop("shared"), - **storage - ) - ) + self.storages.append(PVEStorage(storage.pop("node"), storage.pop("id"), storage.pop("shared"), **storage)) self.tasks = [] for task in self._api.cluster.tasks.get(): @@ -77,19 +68,13 @@ def find_task(self, upid): return False def is_healthy(self): - return bool( - [item for item in self.status if item.get("type") == "cluster"][0][ - "quorate" - ] - ) + return bool([item for item in self.status if item.get("type") == "cluster"][0]["quorate"]) def get_resources_nodes(self): return [resource for resource in self.resources if resource["type"] == "node"] def get_resources_storages(self): - return [ - resource for resource in self.resources if resource["type"] == "storage" - ] + return [resource for resource in self.resources if resource["type"] == "storage"] def cpu_metrics(self): nodes = self.get_resources_nodes() diff --git a/src/pvecontrol/config.py b/src/pvecontrol/config.py index ab0b393..e259448 100644 --- a/src/pvecontrol/config.py +++ b/src/pvecontrol/config.py @@ -45,8 +45,6 @@ def set_config(cluster_name): logging.debug("clusterconfig is %s" % clusterconfig) for k, v in validconfig.node.items(): - clusterconfig.node[k] = ( - clusterconfig.node[k] if clusterconfig.node.get(k) else v - ) + clusterconfig.node[k] = clusterconfig.node[k] if clusterconfig.node.get(k) else v return clusterconfig diff --git a/src/pvecontrol/node.py b/src/pvecontrol/node.py index 112152d..620402c 100644 --- a/src/pvecontrol/node.py +++ b/src/pvecontrol/node.py @@ -37,28 +37,12 @@ def __init__(self, api, node, status, input={}): def __str__(self): output = "Node: " + self.node + "\n" output += "Status: " + str(self.status) + "\n" - output += ( - "CPU: " - + str(self.cpu) - + "/" - + str(self.allocatedcpu) - + "/" - + str(self.maxcpu) - + "\n" - ) - output += ( - "Mem: " - + str(self.mem) - + "/" - + str(self.allocatedmem) - + "/" - + str(self.maxmem) - + "\n" - ) - output += "Disk: " + str(self.disk) + "/" + str(self.maxdisk) + "\n" + output += f"CPU: {self.cpu}/{self.allocatedcpu}/{self.maxcpu}\n" + output += f"Mem: {self.mem}/{self.allocatedmem}/{self.maxmem}\n" + output += f"Disk: {self.disk}/{self.maxdisk}\n" output += "VMs: \n" for vm in self.vms: - output += " - " + str(vm) + "\n" + output += f" - {vm}\n" return output def _init_vms(self): diff --git a/src/pvecontrol/task.py b/src/pvecontrol/task.py index 5d74c35..4041aba 100644 --- a/src/pvecontrol/task.py +++ b/src/pvecontrol/task.py @@ -43,11 +43,7 @@ def __init__(self, api, upid): self.refresh() def log(self, limit=0, start=0): - return ( - self._api.nodes(self.node) - .tasks(self.upid) - .log.get(limit=limit, start=start) - ) + return self._api.nodes(self.node).tasks(self.upid).log.get(limit=limit, start=start) def running(self): return self.runningstatus == TaskRunningStatus.running diff --git a/src/pvecontrol/utils.py b/src/pvecontrol/utils.py index 566a8e3..2a71d6d 100644 --- a/src/pvecontrol/utils.py +++ b/src/pvecontrol/utils.py @@ -9,16 +9,23 @@ from enum import Enum +NATURALSIZE_KEYS = [ + "mem", + "allocatedmem", + "maxmem", + "disk", + "allocateddisk", + "maxdisk", +] + + # Pretty output a table from a table of dicts # We assume all dicts have the same keys and are sorted by key def print_tableoutput(table, columns=[], sortby=None, filters=[]): if len(columns) == 0: columns = table[0].keys() else: - table = [ - filter_keys(n.__dict__ if hasattr(n, "__dict__") else n, columns) - for n in table - ] + table = [filter_keys(n.__dict__ if hasattr(n, "__dict__") else n, columns) for n in table] do_sort = not sortby is None @@ -31,14 +38,7 @@ def print_tableoutput(table, columns=[], sortby=None, filters=[]): line["sortby"] = line[sortby] if isinstance(line[sortby], Enum): line["sortby"] = str(line[sortby]) - for key in [ - "mem", - "allocatedmem", - "maxmem", - "disk", - "allocateddisk", - "maxdisk", - ]: + for key in NATURALSIZE_KEYS: if key in line: line[key] = naturalsize(line[key], binary=True) diff --git a/src/pvecontrol/vm.py b/src/pvecontrol/vm.py index 1e1298f..17faba5 100644 --- a/src/pvecontrol/vm.py +++ b/src/pvecontrol/vm.py @@ -32,18 +32,22 @@ def __init__(self, api, node, vmid, status, input={}): self.config = self._api.nodes(self.node).qemu(vmid).config.get() def __str__(self): - return "vmid: {}, status: {}, name: {}, lock: {}, cpus: {}, maxdisk: {}, maxmem: {}, uptime: {}, tags: {}, template: {}".format( - self.vmid, - self.status, - self.name, - self.lock, - self.cpus, - self.maxdisk, - self.maxmem, - self.uptime, - self.tags, - self.template, - ) + str_keys = [ + "vmid", + "status", + "name", + "lock", + "cpus", + "maxdisk", + "maxmem", + "uptime", + "tags", + "template", + ] + output = [] + for k in str_keys: + output.append(f"{k}: {getattr(self, k)}") + return ", ".join(output) def migrate(self, target, online=False): options = {}