From aea24ecd094635593c3d98d452ba8147f1b3533c Mon Sep 17 00:00:00 2001 From: Jean-Louis Dupond Date: Thu, 6 Jan 2022 14:07:36 +0100 Subject: [PATCH 01/12] Initial openstack support --- module/sources/__init__.py | 3 +- module/sources/openstack/connection.py | 1205 ++++++++++++++++++++++++ requirements.txt | 1 + 3 files changed, 1208 insertions(+), 1 deletion(-) create mode 100644 module/sources/openstack/connection.py diff --git a/module/sources/__init__.py b/module/sources/__init__.py index dc2b7dc..4fc9842 100644 --- a/module/sources/__init__.py +++ b/module/sources/__init__.py @@ -9,6 +9,7 @@ # define all available sources here from module.sources.vmware.connection import VMWareHandler +from module.sources.openstack.connection import OpenStackHandler from module.sources.check_redfish.import_inventory import CheckRedfish from module.common.logging import get_logger @@ -18,7 +19,7 @@ from module.config import source_config_section_name # list of valid sources -valid_sources = [VMWareHandler, CheckRedfish] +valid_sources = [VMWareHandler, OpenStackHandler, CheckRedfish] def validate_source(source_class_object=None, state="pre"): diff --git a/module/sources/openstack/connection.py b/module/sources/openstack/connection.py new file mode 100644 index 0000000..36e13e9 --- /dev/null +++ b/module/sources/openstack/connection.py @@ -0,0 +1,1205 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020 - 2021 Ricardo Bartels. All rights reserved. +# +# netbox-sync.py +# +# This work is licensed under the terms of the MIT license. +# For a copy, see file LICENSE.txt included in this +# repository or visit: . + +import pprint +import re +import ssl +from ipaddress import ip_address, ip_network, ip_interface +from socket import gaierror +from urllib.parse import unquote + +import openstack + +from module.sources.common.source_base import SourceBase +from module.common.logging import get_logger, DEBUG3 +from module.common.misc import grab, dump, get_string_or_none +from module.common.support import normalize_mac_address, ip_valid_to_add_to_netbox +from module.netbox.object_classes import ( + NetBoxObject, + NetBoxInterfaceType, + NBTag, + NBManufacturer, + NBDeviceType, + NBPlatform, + NBClusterType, + NBClusterGroup, + NBDeviceRole, + NBSite, + NBCluster, + NBDevice, + NBVM, + NBVMInterface, + NBInterface, + NBIPAddress, + NBPrefix, + NBTenant, + NBVRF, + NBVLAN, + NBCustomField +) + +log = get_logger() + + +# noinspection PyTypeChecker +class OpenStackHandler(SourceBase): + """ + Source class to import data from a Openstack instance and add/update NetBox objects based on gathered information + """ + + dependent_netbox_objects = [ + NBTag, + NBManufacturer, + NBDeviceType, + NBPlatform, + NBClusterType, + NBClusterGroup, + NBDeviceRole, + NBSite, + NBCluster, + NBDevice, + NBVM, + NBVMInterface, + NBInterface, + NBIPAddress, + NBPrefix, + NBTenant, + NBVRF, + NBVLAN, + NBCustomField + ] + + settings = { + "enabled": True, + "auth_url": None, + "project": None, + "username": None, + "password": None, + "region": None, + "user_domain": None, + "project_domain": None, + "group_name": "Openstack" + "validate_tls_certs": False, + "cluster_exclude_filter": None, + "cluster_include_filter": None, + "host_exclude_filter": None, + "host_include_filter": None, + "vm_exclude_filter": None, + "vm_include_filter": None, + "permitted_subnets": None, + "collect_hardware_asset_tag": True, + "match_host_by_serial": True, + "cluster_site_relation": None, + "cluster_tag_relation": None, + "cluster_tenant_relation": None, + "host_role_relation": None, + "host_site_relation": None, + "host_tag_relation": None, + "host_tenant_relation": None, + "vm_platform_relation": None, + "vm_role_relation": None, + "vm_tag_relation": None, + "vm_tenant_relation": None, + "dns_name_lookup": False, + "custom_dns_servers": None, + "set_primary_ip": "when-undefined", + "skip_vm_comments": False, + "skip_vm_templates": True, + "strip_host_domain_name": False, + "strip_vm_domain_name": False, + "sync_tags": False, + "sync_parent_tags": False, + "sync_custom_attributes": False + } + + deprecated_settings = {} + + removed_settings = { + "netbox_host_device_role": "host_role_relation", + "netbox_vm_device_role": "vm_role_relation" + } + + init_successful = False + inventory = None + name = None + source_tag = None + source_type = "openstack" + + # internal vars + session = None + tag_session = None + + site_name = None + + def __init__(self, name=None, settings=None, inventory=None): + + if name is None: + raise ValueError(f"Invalid value for attribute 'name': '{name}'.") + + self.inventory = inventory + self.name = name + + self.parse_config_settings(settings) + + self.source_tag = f"Source: {name}" + self.site_name = f"OpenStack: {name}" + + if self.enabled is False: + log.info(f"Source '{name}' is currently disabled. Skipping") + return + + self.create_openstack_session() + + if self.session is None: + log.info(f"Source '{name}' is currently unavailable. Skipping") + return + + self.init_successful = True + self.permitted_clusters = dict() + self.cluster_host_map = dict() + self.processed_host_names = dict() + self.processed_vm_names = dict() + self.processed_vm_uuid = list() + self.parsing_vms_the_first_time = True + + def parse_config_settings(self, config_settings): + """ + Validate parsed settings from config file + + Parameters + ---------- + config_settings: dict + dict of config settings + + """ + + validation_failed = False + + for setting in ["auth_url", "project", "username", "password", "region", "user_domain", "project_domain"]: + if config_settings.get(setting) is None: + log.error(f"Config option '{setting}' in 'source/{self.name}' can't be empty/undefined") + validation_failed = True + + # check permitted ip subnets + if config_settings.get("permitted_subnets") is None: + log.info(f"Config option 'permitted_subnets' in 'source/{self.name}' is undefined. " + f"No IP addresses will be populated to NetBox!") + else: + config_settings["permitted_subnets"] = \ + [x.strip() for x in config_settings.get("permitted_subnets").split(",") if x.strip() != ""] + + permitted_subnets = list() + for permitted_subnet in config_settings["permitted_subnets"]: + try: + permitted_subnets.append(ip_network(permitted_subnet)) + except Exception as e: + log.error(f"Problem parsing permitted subnet: {e}") + validation_failed = True + + config_settings["permitted_subnets"] = permitted_subnets + + # check include and exclude filter expressions + for setting in [x for x in config_settings.keys() if "filter" in x]: + if config_settings.get(setting) is None or config_settings.get(setting).strip() == "": + continue + + re_compiled = None + try: + re_compiled = re.compile(config_settings.get(setting)) + except Exception as e: + log.error(f"Problem parsing regular expression for '{setting}': {e}") + validation_failed = True + + config_settings[setting] = re_compiled + + for relation_option in [x for x in self.settings.keys() if "relation" in x]: + + if config_settings.get(relation_option) is None: + continue + + relation_data = list() + + relation_type = relation_option.split("_")[1] + + # obey quotations to be able to add names including a comma + # thanks to: https://stackoverflow.com/a/64333329 + for relation in re.split(r",(?=(?:[^\"']*[\"'][^\"']*[\"'])*[^\"']*$)", + config_settings.get(relation_option)): + + object_name = relation.split("=")[0].strip(' "') + relation_name = relation.split("=")[1].strip(' "') + + if len(object_name) == 0 or len(relation_name) == 0: + log.error(f"Config option '{relation}' malformed got '{object_name}' for " + f"object name and '{relation_name}' for {relation_type} name.") + validation_failed = True + + try: + re_compiled = re.compile(object_name) + except Exception as e: + log.error(f"Problem parsing regular expression '{object_name}' for '{relation}': {e}") + validation_failed = True + continue + + relation_data.append({ + "object_regex": re_compiled, + "assigned_name": relation_name + }) + + config_settings[relation_option] = relation_data + + if config_settings.get("dns_name_lookup") is True and config_settings.get("custom_dns_servers") is not None: + + custom_dns_servers = \ + [x.strip() for x in config_settings.get("custom_dns_servers").split(",") if x.strip() != ""] + + tested_custom_dns_servers = list() + for custom_dns_server in custom_dns_servers: + try: + tested_custom_dns_servers.append(str(ip_address(custom_dns_server))) + except ValueError: + log.error(f"Config option 'custom_dns_servers' value '{custom_dns_server}' " + f"does not appear to be an IP address.") + validation_failed = True + + config_settings["custom_dns_servers"] = tested_custom_dns_servers + + if validation_failed is True: + log.error("Config validation failed. Exit!") + exit(1) + + for setting in self.settings.keys(): + setattr(self, setting, config_settings.get(setting)) + + def create_openstack_session(self): + """ + Initialize session with OpenStack + + Returns + ------- + bool: if initialization was successful or not + """ + + if self.session is not None: + return True + + log.debug(f"Starting OpenStack connection to '{self.auth_url}'") + + ssl_context = ssl.create_default_context() + if bool(self.validate_tls_certs) is False: + ssl_context.check_hostname = False + ssl_context.verify_mode = ssl.CERT_NONE + + try: + self.session = openstack.connect( + auth_url=self.auth_url, + project_name=self.project, + username=self.username, + password=self.password, + region_name=self.region, + user_domain_name=self.user_domain, + project_domain_name=self.project_domain, + app_name='netbox-sync', + app_version='0.1', + ) + + except (gaierror, OSError) as e: + log.error( + f"Unable to connect to OpenStack instance '{self.auth_url}' on port {self.port}. " + f"Reason: {e}" + ) + return False + except Exception as e: + log.error(f"Unable to connect to OpenStack instance '{self.auth_url}' on port {self.port}. {e.msg}") + return False + + log.info(f"Successfully connected to OpenStack '{self.auth_url}'") + + return True + + def apply(self): + """ + Main source handler method. This method is called for each source from "main" program + to retrieve data from it source and apply it to the NetBox inventory. + + Every update of new/existing objects fot this source has to happen here. + """ + + log.info(f"Query data from OpenStack: '{self.auth_url}'") + + """ + Mapping of object type keywords to view types and handlers + + iterate over all VMs twice. + + To handle VMs with the same name in a cluster we first + iterate over all VMs and look only at the active ones + and sync these first. + Then we iterate a second time to catch the rest. + + This has been implemented to support migration scenarios + where you create the same machines with a different setup + like a new version or something. This way NetBox will be + updated primarily with the actual active VM data. + + """ + + availability_zones = self.session.compute.availability_zones(details=True) + for availability_zone in availability_zones: + self.add_cluster(availability_zone) + + servers = self.session.compute.servers(details=True, all_projects=True) + for server in servers: + self.add_virtual_machine(server) + + hypervisors = self.session.compute.hypervisors(details=True) + for hypervisor in hypervisors: + self.add_host(hypervisor) + + self.update_basic_data() + + @staticmethod + def passes_filter(name, include_filter, exclude_filter): + """ + checks if object name passes a defined object filter. + + Parameters + ---------- + name: str + name of the object to check + include_filter: regex object + regex object of include filter + exclude_filter: regex object + regex object of exclude filter + + Returns + ------- + bool: True if all filter passed, otherwise False + """ + + # first includes + if include_filter is not None and not include_filter.match(name): + log.debug(f"Object '{name}' did not match include filter '{include_filter.pattern}'. Skipping") + return False + + # second excludes + if exclude_filter is not None and exclude_filter.match(name): + log.debug(f"Object '{name}' matched exclude filter '{exclude_filter.pattern}'. Skipping") + return False + + return True + + def get_site_name(self, object_type, object_name, cluster_name=""): + """ + Return a site name for a NBCluster or NBDevice depending on config options + host_site_relation and cluster_site_relation + + Parameters + ---------- + object_type: (NBCluster, NBDevice) + object type to check site relation for + object_name: str + object name to check site relation for + cluster_name: str + cluster name of NBDevice to check for site name + + Returns + ------- + str: site name if a relation was found + """ + + if object_type not in [NBCluster, NBDevice]: + raise ValueError(f"Object must be a '{NBCluster.name}' or '{NBDevice.name}'.") + + log.debug2(f"Trying to find site name for {object_type.name} '{object_name}'") + + # check if site was provided in config + relation_name = "host_site_relation" if object_type == NBDevice else "cluster_site_relation" + + site_name = self.get_object_relation(object_name, relation_name) + + if object_type == NBDevice and site_name is None: + site_name = self.permitted_clusters.get(cluster_name) or \ + self.get_site_name(NBCluster, object_name, cluster_name) + log.debug2(f"Found a matching cluster site for {object_name}, using site '{site_name}'") + + # set default site name + if site_name is None: + site_name = self.site_name + log.debug(f"No site relation for '{object_name}' found, using default site '{site_name}'") + + return site_name + + def get_object_based_on_macs(self, object_type, mac_list=None): + """ + Try to find a NetBox object based on list of MAC addresses. + + Iterate over all interfaces of this object type and compare MAC address with list of desired MAC + addresses. If match was found store related machine object and count every correct match. + + If exactly one machine with matching interfaces was found then this one will be returned. + + If two or more machines with matching MACs are found compare the two machines with + the highest amount of matching interfaces. If the ration of matching interfaces + exceeds 2.0 then the top matching machine is chosen as desired object. + + If the ration is below 2.0 then None will be returned. The probability is to low that + this one is the correct one. + + None will also be returned if no machine was found at all. + + Parameters + ---------- + object_type: (NBDevice, NBVM) + type of NetBox device to find in inventory + mac_list: list + list of MAC addresses to compare against NetBox interface objects + + Returns + ------- + (NBDevice, NBVM, None): object instance of found device, otherwise None + """ + + object_to_return = None + + if object_type not in [NBDevice, NBVM]: + raise ValueError(f"Object must be a '{NBVM.name}' or '{NBDevice.name}'.") + + if mac_list is None or not isinstance(mac_list, list) or len(mac_list) == 0: + return + + interface_typ = NBInterface if object_type == NBDevice else NBVMInterface + + objects_with_matching_macs = dict() + matching_object = None + + for interface in self.inventory.get_all_items(interface_typ): + + if grab(interface, "data.mac_address") in mac_list: + + matching_object = grab(interface, f"data.{interface.secondary_key}") + if not isinstance(matching_object, (NBDevice, NBVM)): + continue + + log.debug2("Found matching MAC '%s' on %s '%s'" % + (grab(interface, "data.mac_address"), object_type.name, + matching_object.get_display_name(including_second_key=True))) + + if objects_with_matching_macs.get(matching_object) is None: + objects_with_matching_macs[matching_object] = 1 + else: + objects_with_matching_macs[matching_object] += 1 + + # try to find object based on amount of matching MAC addresses + num_devices_witch_matching_macs = len(objects_with_matching_macs.keys()) + + if num_devices_witch_matching_macs == 1 and isinstance(matching_object, (NBDevice, NBVM)): + + log.debug2("Found one %s '%s' based on MAC addresses and using it" % + (object_type.name, matching_object.get_display_name(including_second_key=True))) + + object_to_return = list(objects_with_matching_macs.keys())[0] + + elif num_devices_witch_matching_macs > 1: + + log.debug2(f"Found {num_devices_witch_matching_macs} {object_type.name}s with matching MAC addresses") + + # now select the two top matches + first_choice, second_choice = \ + sorted(objects_with_matching_macs, key=objects_with_matching_macs.get, reverse=True)[0:2] + + first_choice_matches = objects_with_matching_macs.get(first_choice) + second_choice_matches = objects_with_matching_macs.get(second_choice) + + log.debug2(f"The top candidate {first_choice.get_display_name()} with {first_choice_matches} matches") + log.debug2(f"The second candidate {second_choice.get_display_name()} with {second_choice_matches} matches") + + # get ratio between + matching_ration = first_choice_matches / second_choice_matches + + # only pick the first one if the ration exceeds 2 + if matching_ration >= 2.0: + log.debug2(f"The matching ratio of {matching_ration} is high enough " + f"to select {first_choice.get_display_name()} as desired {object_type.name}") + object_to_return = first_choice + else: + log.debug2("Both candidates have a similar amount of " + "matching interface MAC addresses. Using NONE of them!") + + return object_to_return + + def get_object_based_on_primary_ip(self, object_type, primary_ip4=None, primary_ip6=None): + """ + Try to find a NBDevice or NBVM based on the primary IP address. If an exact + match was found the device/vm object will be returned immediately without + checking of the other primary IP address (if defined). + + Parameters + ---------- + object_type: (NBDevice, NBVM) + object type to look for + primary_ip4: str + primary IPv4 address of object to find + primary_ip6: str + primary IPv6 address of object to find + + Returns + ------- + + """ + + def _matches_device_primary_ip(device_primary_ip, ip_needle): + + ip = None + if device_primary_ip is not None and ip_needle is not None: + if isinstance(device_primary_ip, dict): + ip = grab(device_primary_ip, "address") + + elif isinstance(device_primary_ip, int): + ip = self.inventory.get_by_id(NBIPAddress, nb_id=device_primary_ip) + ip = grab(ip, "data.address") + + if ip is not None and ip.split("/")[0] == ip_needle: + return True + + return False + + if object_type not in [NBDevice, NBVM]: + raise ValueError(f"Object must be a '{NBVM.name}' or '{NBDevice.name}'.") + + if primary_ip4 is None and primary_ip6 is None: + return + + if primary_ip4 is not None: + primary_ip4 = str(primary_ip4).split("/")[0] + + if primary_ip6 is not None: + primary_ip6 = str(primary_ip6).split("/")[0] + + for device in self.inventory.get_all_items(object_type): + + if _matches_device_primary_ip(grab(device, "data.primary_ip4"), primary_ip4) is True: + log.debug2(f"Found existing host '{device.get_display_name()}' " + f"based on the primary IPv4 '{primary_ip4}'") + return device + + if _matches_device_primary_ip(grab(device, "data.primary_ip6"), primary_ip6) is True: + log.debug2(f"Found existing host '{device.get_display_name()}' " + f"based on the primary IPv6 '{primary_ip6}'") + return device + + def get_object_relation(self, name, relation, fallback=None): + """ + + Parameters + ---------- + name: str + name of the object to find a relation for + relation: str + name of the config variable relation (i.e: vm_tag_relation) + fallback: str + fallback string if no relation matched + + Returns + ------- + data: str, list, None + string of matching relation or list of matching tags + """ + + resolved_list = list() + for single_relation in grab(self, relation, fallback=list()): + object_regex = single_relation.get("object_regex") + if object_regex.match(name): + resolved_name = single_relation.get("assigned_name") + log.debug2(f"Found a matching {relation} '{resolved_name}' ({object_regex.pattern}) for {name}.") + resolved_list.append(resolved_name) + + if grab(f"{relation}".split("_"), "1") == "tag": + return resolved_list + + else: + resolved_name = fallback + if len(resolved_list) >= 1: + resolved_name = resolved_list[0] + if len(resolved_list) > 1: + log.debug(f"Found {len(resolved_list)} matches for {name} in {relation}." + f" Using first on: {resolved_name}") + + return resolved_name + + def get_cluster_for_host(self, hostname): + for cluster, hosts in self.cluster_host_map.items(): + if hostname in hosts: + return cluster + return None + + def add_device_vm_to_inventory(self, object_type, object_data, pnic_data=None, vnic_data=None, + nic_ips=None, p_ipv4=None, p_ipv6=None): + """ + Add/update device/VM object in inventory based on gathered data. + + Try to find object first based on the object data, interface MAC addresses and primary IPs. + 1. try to find by name and cluster/site + 2. try to find by mac addresses interfaces + 3. try to find by serial number (1st) or asset tag (2nd) (ESXi host) + 4. try to find by primary IP + + IP addresses for each interface are added here as well. First they will be checked and added + if all checks pass. For each IP address a matching IP prefix will be searched for. First we + look for longest matching IP Prefix in the same site. If this failed we try to find the longest + matching global IP Prefix. + + If a IP Prefix was found then we try to get the VRF and VLAN for this prefix. Now we compare + if interface VLAN and prefix VLAN match up and warn if they don't. Then we try to add data to + the IP address if not already set: + + add prefix VRF if VRF for this IP is undefined + add tenant if tenant for this IP is undefined + 1. try prefix tenant + 2. if prefix tenant is undefined try VLAN tenant + + And we also set primary IP4/6 for this object depending on the "set_primary_ip" setting. + + If a IP address is set as primary IP for another device then using this IP on another + device will be rejected by NetBox. + + Setting "always": + check all NBDevice and NBVM objects if this IP address is set as primary IP to any + other object then this one. If we found another object, then we unset the primary_ip* + for the found object and assign it to this object. + + This setting will also reset the primary IP if it has been changed in NetBox + + Setting "when-undefined": + Will set the primary IP for this object if primary_ip4/6 is undefined. Will cause a + NetBox error if IP has been assigned to a different object as well + + Setting "never": + Well, the attribute primary_ip4/6 will never be touched/changed. + + Parameters + ---------- + object_type: (NBDevice, NBVM) + NetBoxObject sub class of object to add + object_data: dict + data of object to add/update + pnic_data: dict + data of physical interfaces of this object, interface name as key + vnic_data: dict + data of virtual interfaces of this object, interface name as key + nic_ips: dict + dict of ips per interface of this object, interface name as key + p_ipv4: str + primary IPv4 as string including netmask/prefix + p_ipv6: str + primary IPv6 as string including netmask/prefix + + """ + + if object_type not in [NBDevice, NBVM]: + raise ValueError(f"Object must be a '{NBVM.name}' or '{NBDevice.name}'.") + + if log.level == DEBUG3: + + log.debug3("function: add_device_vm_to_inventory") + log.debug3(f"Object type {object_type}") + pprint.pprint(object_data) + pprint.pprint(pnic_data) + pprint.pprint(vnic_data) + pprint.pprint(nic_ips) + pprint.pprint(p_ipv4) + pprint.pprint(p_ipv6) + + # check existing Devices for matches + log.debug2(f"Trying to find a {object_type.name} based on the collected name, cluster, IP and MAC addresses") + + device_vm_object = self.inventory.get_by_data(object_type, data=object_data) + + if device_vm_object is not None: + log.debug2("Found a exact matching %s object: %s" % + (object_type.name, device_vm_object.get_display_name(including_second_key=True))) + + # keep searching if no exact match was found + else: + + log.debug2(f"No exact match found. Trying to find {object_type.name} based on MAC addresses") + + # on VMs vnic data is used, on physical devices pnic data is used + mac_source_data = vnic_data if object_type == NBVM else pnic_data + + nic_macs = [x.get("mac_address") for x in mac_source_data.values()] + + device_vm_object = self.get_object_based_on_macs(object_type, nic_macs) + + # look for devices with same serial or asset tag + if object_type == NBDevice: + + if device_vm_object is None and object_data.get("serial") is not None and \ + bool(self.match_host_by_serial) is True: + log.debug2(f"No match found. Trying to find {object_type.name} based on serial number") + + device_vm_object = self.inventory.get_by_data(object_type, data={"serial": object_data.get("serial")}) + + if device_vm_object is None and object_data.get("asset_tag") is not None: + log.debug2(f"No match found. Trying to find {object_type.name} based on asset tag") + + device_vm_object = self.inventory.get_by_data(object_type, + data={"asset_tag": object_data.get("asset_tag")}) + + if device_vm_object is not None: + log.debug2("Found a matching %s object: %s" % + (object_type.name, device_vm_object.get_display_name(including_second_key=True))) + + # keep looking for devices with the same primary IP + else: + + log.debug2(f"No match found. Trying to find {object_type.name} based on primary IP addresses") + + device_vm_object = self.get_object_based_on_primary_ip(object_type, p_ipv4, p_ipv6) + + if device_vm_object is None: + object_name = object_data.get(object_type.primary_key) + log.debug(f"No existing {object_type.name} object for {object_name}. Creating a new {object_type.name}.") + device_vm_object = self.inventory.add_object(object_type, data=object_data, source=self) + else: + device_vm_object.update(data=object_data, source=self) + + # update role according to config settings + object_name = object_data.get(object_type.primary_key) + role_name = self.get_object_relation(object_name, + "host_role_relation" if object_type == NBDevice else "vm_role_relation", + fallback="Server") + + if object_type == NBDevice: + device_vm_object.update(data={"device_role": {"name": role_name}}) + if object_type == NBVM: + device_vm_object.update(data={"role": {"name": role_name}}) + + # compile all nic data into one dictionary + if object_type == NBVM: + nic_data = vnic_data + else: + nic_data = {**pnic_data, **vnic_data} + + # map interfaces of existing object with discovered interfaces + nic_object_dict = self.map_object_interfaces_to_current_interfaces(device_vm_object, nic_data) + + if object_data.get("status", "") == "active" and (nic_ips is None or len(nic_ips.keys()) == 0): + log.debug(f"No IP addresses for '{object_name}' found!") + + primary_ipv4_object = None + primary_ipv6_object = None + + if p_ipv4 is not None: + try: + primary_ipv4_object = ip_interface(p_ipv4) + except ValueError: + log.error(f"Primary IPv4 ({p_ipv4}) does not appear to be a valid IP address (needs included suffix).") + + if p_ipv6 is not None: + try: + primary_ipv6_object = ip_interface(p_ipv6) + except ValueError: + log.error(f"Primary IPv6 ({p_ipv6}) does not appear to be a valid IP address (needs included suffix).") + + for int_name, int_data in nic_data.items(): + + # add/update interface with retrieved data + nic_object, ip_address_objects = self.add_update_interface(nic_object_dict.get(int_name), device_vm_object, + int_data, nic_ips.get(int_name, list())) + + # add all interface IPs + for ip_object in ip_address_objects: + + ip_interface_object = ip_interface(grab(ip_object, "data.address")) + + if ip_object is None: + continue + + # continue if address is not a primary IP + if ip_interface_object not in [primary_ipv4_object, primary_ipv6_object]: + continue + + # set/update/remove primary IP addresses + set_this_primary_ip = False + ip_version = ip_interface_object.ip.version + if self.set_primary_ip == "always": + + for object_type in [NBDevice, NBVM]: + + # new IPs don't need to be removed from other devices/VMs + if ip_object.is_new is True: + break + + for devices_vms in self.inventory.get_all_items(object_type): + + # device has no primary IP of this version + this_primary_ip = grab(devices_vms, f"data.primary_ip{ip_version}") + + # we found this exact object + if devices_vms == device_vm_object: + continue + + # device has the same object assigned + if this_primary_ip == ip_object: + devices_vms.unset_attribute(f"primary_ip{ip_version}") + + set_this_primary_ip = True + + elif self.set_primary_ip != "never" and grab(device_vm_object, f"data.primary_ip{ip_version}") is None: + set_this_primary_ip = True + + if set_this_primary_ip is True: + + log.debug(f"Setting IP '{grab(ip_object, 'data.address')}' as primary IPv{ip_version} for " + f"'{device_vm_object.get_display_name()}'") + device_vm_object.update(data={f"primary_ip{ip_version}": ip_object}) + + return + + def add_cluster(self, obj): + """ + Add a OpenStack Availability Zone as a NBCluster to NetBox. Cluster name is checked against + cluster_include_filter and cluster_exclude_filter config setting. Also adds + cluster and site_name to "self.permitted_clusters" so hosts and VMs can be + checked if they are part of a permitted cluster. + + Parameters + ---------- + obj: openstack.compute.v2.availability_zone.AvailabilityZone + cluster to add + """ + + name = get_string_or_none(obj.name) + group = self.group_name + + if name is None or group is None: + return + + log.debug(f"Parsing OpenStack AZ: {name}") + + if self.passes_filter(name, self.cluster_include_filter, self.cluster_exclude_filter) is False: + return + + site_name = self.get_site_name(NBCluster, name) + + data = { + "name": name, + "type": {"name": "Openstack"}, + "group": {"name": group}, + "site": {"name": site_name} + } + + self.inventory.add_update_object(NBCluster, data=data, source=self) + + self.cluster_host_map[name] = list() + for host in obj.hosts: + self.cluster_host_map[name].append(host) + + self.permitted_clusters[name] = site_name + + def add_host(self, obj): + """ + Parse a Openstack host to NetBox once all data is gathered. + + First host is filtered: + host has a cluster and is it permitted + was host with same name and site already parsed + does the host pass the host_include_filter and host_exclude_filter + + Then all necessary host data will be collected. + host model, manufacturer, serial, physical interfaces, virtual interfaces, + virtual switches, proxy switches, host port groups, interface VLANs, IP addresses + + Primary IPv4/6 will be determined by + 1. if the interface port group name contains + "management" or "mngt" + 2. interface is the default route of this host + + Parameters + ---------- + obj: Hypervisor + host object to parse + """ + + name = get_string_or_none(obj.name) + + if name is not None and self.strip_host_domain_name is True: + name = name.split(".")[0] + + # parse data + log.debug(f"Parsing Openstack host: {name}") + + # + # Filtering + # + + # manage site and cluster + short_name = get_string_or_none(obj.service_details["host"]) + cluster_name = self.get_cluster_for_host(short_name) + + if cluster_name is None: + log.error(f"Requesting cluster for host '{name}' failed. Skipping.") + return + + if log.level == DEBUG3: + try: + log.info("Cluster data") + dump(obj.service_details) + except Exception as e: + log.error(e) + + if self.permitted_clusters.get(cluster_name) is None: + log.debug(f"Host '{name}' is not part of a permitted cluster. Skipping") + return + + # get a site for this host + site_name = self.get_site_name(NBDevice, name, cluster_name) + + if name in self.processed_host_names.get(site_name, list()): + log.warning(f"Host '{name}' for site '{site_name}' already parsed. " + "Make sure to use unique host names. Skipping") + return + + # add host to processed list + if self.processed_host_names.get(site_name) is None: + self.processed_host_names[site_name] = list() + + self.processed_host_names[site_name].append(name) + + # filter hosts by name + if self.passes_filter(name, self.host_include_filter, self.host_exclude_filter) is False: + return + + # + # Collecting data + # + + # collect all necessary data + manufacturer = None + model = None + product_name = get_string_or_none(obj.hypervisor_type) + product_version = get_string_or_none(obj.hypervisor_version) + platform = f"{product_name} {product_version}" + + # if the device vendor/model cannot be retrieved (due to problem on the host), + # set a dummy value so the host still gets synced + if manufacturer is None: + manufacturer = "Generic Vendor" + if model is None: + model = "Generic Model" + + # get status + status = "offline" + if get_string_or_none(obj.status) == "enabled": + status = "active" + + # add asset tag if desired and present + asset_tag = None + + # get host_tenant_relation + tenant_name = self.get_object_relation(name, "host_tenant_relation") + + # get host_tag_relation + host_tags = self.get_object_relation(name, "host_tag_relation") + + # prepare host data model + host_data = { + "name": name, + "device_type": { + "model": model, + "manufacturer": { + "name": manufacturer + } + }, + "site": {"name": site_name}, + "cluster": {"name": cluster_name}, + "status": status + } + + # add data if present + if asset_tag is not None: + host_data["asset_tag"] = asset_tag + if platform is not None: + host_data["platform"] = {"name": platform} + if tenant_name is not None: + host_data["tenant"] = {"name": tenant_name} + if len(host_tags) > 0: + host_data["tags"] = host_tags + + host_primary_ip4 = obj.host_ip + host_primary_ip6 = None + + # add host to inventory + self.add_device_vm_to_inventory(NBDevice, object_data=host_data, pnic_data=dict(), + vnic_data=dict(), nic_ips=None, + p_ipv4=host_primary_ip4, p_ipv6=host_primary_ip6) + + return + + def add_virtual_machine(self, obj): + """ + Parse a OpenStack VM add to NetBox once all data is gathered. + + Parameters + ---------- + obj: openstack.compute.v2.server.Server + virtual machine object to parse + """ + + name = get_string_or_none(obj.name) + + if name is not None and self.strip_vm_domain_name is True: + name = name.split(".")[0] + + log.debug(f"Parsing OpenStack VM: {name}") + + # get VM power state + status = "active" if get_string_or_none(obj.status) == "ACTIVE" else "offline" + + # hypervisor_name = get_string_or_none(obj.hypervisor_hostname) + cluster_name = get_string_or_none(obj.availability_zone) + + # honor strip_host_domain_name + if cluster_name is not None and self.strip_host_domain_name is True: + cluster_name = cluster_name.split(".")[0] + + # check VM cluster + if cluster_name is None: + log.error(f"Requesting cluster for Virtual Machine '{name}' failed. Skipping.") + return + + elif self.permitted_clusters.get(cluster_name) is None: + log.debug(f"Virtual machine '{name}' is not part of a permitted cluster. Skipping") + return + + if name in self.processed_vm_names.get(cluster_name, list()): + log.warning(f"Virtual machine '{name}' for cluster '{cluster_name}' already parsed. " + "Make sure to use unique VM names. Skipping") + return + + # add host to processed list + if self.processed_vm_names.get(cluster_name) is None: + self.processed_vm_names[cluster_name] = list() + + self.processed_vm_names[cluster_name].append(name) + + # filter VMs by name + if self.passes_filter(name, self.vm_include_filter, self.vm_exclude_filter) is False: + return + + # + # Collect data + # + + # check if cluster is a Standalone ESXi + site_name = self.permitted_clusters.get(cluster_name) + if site_name is None: + site_name = self.get_site_name(NBCluster, cluster_name) + + # first check against vm_platform_relation + platform = get_string_or_none(obj.flavor["original_name"]) + + if platform is not None: + platform = self.get_object_relation(platform, "vm_platform_relation", fallback=platform) + + disk = int(obj.flavor["disk"]) + + annotation = None + if bool(self.skip_vm_comments) is False: + annotation = get_string_or_none(obj.id) + + # assign vm_tenant_relation + tenant_name = self.get_object_relation(name, "vm_tenant_relation") + + vm_data = { + "name": name, + "cluster": {"name": cluster_name}, + "status": status, + "memory": obj.flavor["ram"], + "vcpus": obj.flavor["vcpus"], + "disk": disk + } + + if platform is not None: + vm_data["platform"] = {"name": platform} + if annotation is not None: + vm_data["comments"] = annotation + if tenant_name is not None: + vm_data["tenant"] = {"name": tenant_name} + + vm_primary_ip4 = None + vm_primary_ip6 = None + vm_nic_dict = dict() + nic_ips = dict() + count = 0 + + for network, addresses in obj.addresses.items(): + count += 1 + nic_ips[network] = list() + for address in addresses: + nic_ips[network].append(address["addr"]) + if address["version"] == 4: + vm_primary_ip4 = address["addr"] + if address["version"] == 6: + vm_primary_ip6 = address["addr"] + full_name = unquote(f"vNIC{count} ({network})") + vm_nic_data = { + "name": full_name, + "virtual_machine": None, + "mac_address": normalize_mac_address(address["OS-EXT-IPS-MAC:mac_addr"]), + "description": full_name, + "enabled": True, + } + if ip_valid_to_add_to_netbox(address["addr"], self.permitted_subnets, full_name) is True: + vm_nic_dict[network] = vm_nic_data + + # add VM to inventory + self.add_device_vm_to_inventory(NBVM, object_data=vm_data, vnic_data=vm_nic_dict, + nic_ips=nic_ips, p_ipv4=vm_primary_ip4, p_ipv6=vm_primary_ip6) + + return + + def update_basic_data(self): + """ + + Returns + ------- + + """ + + # add source identification tag + self.inventory.add_update_object(NBTag, data={ + "name": self.source_tag, + "description": f"Marks objects synced from Openstack '{self.name}' " + f"({self.auth_url}) to this NetBox Instance." + }) + + # update virtual site if present + this_site_object = self.inventory.get_by_data(NBSite, data={"name": self.site_name}) + + if this_site_object is not None: + this_site_object.update(data={ + "name": self.site_name, + "comments": "A default virtual site created to house objects " + "that have been synced from this Openstack instance " + "and have no predefined site assigned." + }) + + server_role_object = self.inventory.get_by_data(NBDeviceRole, data={"name": "Server"}) + + if server_role_object is not None: + server_role_object.update(data={ + "name": "Server", + "color": "9e9e9e", + "vm_role": True + }) + + +# EOF diff --git a/requirements.txt b/requirements.txt index 2651f24..9ea7cac 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,3 +6,4 @@ pyvmomi==7.0.3 aiodns==3.0.0 setuptools>=62.00.0 pyyaml==6.0 +openstack From f6ac7e33893433518bb9343379974e139f22dc5b Mon Sep 17 00:00:00 2001 From: Jean-Louis Dupond Date: Mon, 10 Jan 2022 11:19:05 +0100 Subject: [PATCH 02/12] Fix disk sizes --- module/sources/openstack/connection.py | 39 +++++++++++++++++++------- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/module/sources/openstack/connection.py b/module/sources/openstack/connection.py index 36e13e9..645538a 100644 --- a/module/sources/openstack/connection.py +++ b/module/sources/openstack/connection.py @@ -84,7 +84,7 @@ class OpenStackHandler(SourceBase): "region": None, "user_domain": None, "project_domain": None, - "group_name": "Openstack" + "group_name": "Openstack", "validate_tls_certs": False, "cluster_exclude_filter": None, "cluster_include_filter": None, @@ -163,6 +163,7 @@ def __init__(self, name=None, settings=None, inventory=None): self.init_successful = True self.permitted_clusters = dict() self.cluster_host_map = dict() + self.volume_map = dict() self.processed_host_names = dict() self.processed_vm_names = dict() self.processed_vm_uuid = list() @@ -354,14 +355,18 @@ def apply(self): for availability_zone in availability_zones: self.add_cluster(availability_zone) - servers = self.session.compute.servers(details=True, all_projects=True) - for server in servers: - self.add_virtual_machine(server) - hypervisors = self.session.compute.hypervisors(details=True) for hypervisor in hypervisors: self.add_host(hypervisor) + volumes = self.session.block_storage.volumes(details=True, all_projects=True) + for volume in volumes: + self.add_volume(volume) + + servers = self.session.compute.servers(details=True, all_projects=True) + for server in servers: + self.add_virtual_machine(server) + self.update_basic_data() @staticmethod @@ -951,7 +956,7 @@ def add_host(self, obj): if log.level == DEBUG3: try: log.info("Cluster data") - dump(obj.service_details) + dump(obj.service_details.to_dict()) except Exception as e: log.error(e) @@ -1043,9 +1048,19 @@ def add_host(self, obj): return + def add_volume(self, obj): + """ + Parse OpenStack volume and store in in a map. + """ + + id = obj.id + size = obj.size + + self.volume_map[id] = size + def add_virtual_machine(self, obj): """ - Parse a OpenStack VM add to NetBox once all data is gathered. + Parse a OpenStack VM add to NetBox once all data is gathered. Parameters ---------- @@ -1109,7 +1124,11 @@ def add_virtual_machine(self, obj): if platform is not None: platform = self.get_object_relation(platform, "vm_platform_relation", fallback=platform) - disk = int(obj.flavor["disk"]) + disk = 0 + for volume in obj.attached_volumes: + volid = volume["id"] + size = self.volume_map[volid] + disk += int(size) annotation = None if bool(self.skip_vm_comments) is False: @@ -1145,9 +1164,9 @@ def add_virtual_machine(self, obj): nic_ips[network] = list() for address in addresses: nic_ips[network].append(address["addr"]) - if address["version"] == 4: + if int(address["version"]) == 4: vm_primary_ip4 = address["addr"] - if address["version"] == 6: + if int(address["version"]) == 6: vm_primary_ip6 = address["addr"] full_name = unquote(f"vNIC{count} ({network})") vm_nic_data = { From 14571f1cc60cbd3d2baef388f6ea13d22a997df0 Mon Sep 17 00:00:00 2001 From: Jean-Louis Dupond Date: Thu, 5 May 2022 11:22:57 +0200 Subject: [PATCH 03/12] Code cleanup + fixes and docs --- README.md | 1 + module/sources/openstack/connection.py | 57 ++--------- settings-example.ini | 134 +++++++++++++++++++++++++ 3 files changed, 146 insertions(+), 46 deletions(-) diff --git a/README.md b/README.md index fa84074..d936b31 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,7 @@ This ensures stale objects are removed from NetBox keeping an accurate current s * aiodns==2.0.0 * setuptools>=62.00.0 * pyyaml==6.0 +* openstack ### Environment * NetBox >= 2.9 diff --git a/module/sources/openstack/connection.py b/module/sources/openstack/connection.py index 645538a..4a42f24 100644 --- a/module/sources/openstack/connection.py +++ b/module/sources/openstack/connection.py @@ -85,16 +85,13 @@ class OpenStackHandler(SourceBase): "user_domain": None, "project_domain": None, "group_name": "Openstack", - "validate_tls_certs": False, + "permitted_subnets": None, "cluster_exclude_filter": None, "cluster_include_filter": None, "host_exclude_filter": None, "host_include_filter": None, "vm_exclude_filter": None, "vm_include_filter": None, - "permitted_subnets": None, - "collect_hardware_asset_tag": True, - "match_host_by_serial": True, "cluster_site_relation": None, "cluster_tag_relation": None, "cluster_tenant_relation": None, @@ -108,14 +105,12 @@ class OpenStackHandler(SourceBase): "vm_tenant_relation": None, "dns_name_lookup": False, "custom_dns_servers": None, + "validate_tls_certs": False, "set_primary_ip": "when-undefined", + "skip_vm_platform": False, "skip_vm_comments": False, - "skip_vm_templates": True, "strip_host_domain_name": False, - "strip_vm_domain_name": False, - "sync_tags": False, - "sync_parent_tags": False, - "sync_custom_attributes": False + "strip_vm_domain_name": False } deprecated_settings = {} @@ -652,8 +647,7 @@ def add_device_vm_to_inventory(self, object_type, object_data, pnic_data=None, v Try to find object first based on the object data, interface MAC addresses and primary IPs. 1. try to find by name and cluster/site 2. try to find by mac addresses interfaces - 3. try to find by serial number (1st) or asset tag (2nd) (ESXi host) - 4. try to find by primary IP + 3. try to find by primary IP IP addresses for each interface are added here as well. First they will be checked and added if all checks pass. For each IP address a matching IP prefix will be searched for. First we @@ -742,21 +736,6 @@ def add_device_vm_to_inventory(self, object_type, object_data, pnic_data=None, v device_vm_object = self.get_object_based_on_macs(object_type, nic_macs) - # look for devices with same serial or asset tag - if object_type == NBDevice: - - if device_vm_object is None and object_data.get("serial") is not None and \ - bool(self.match_host_by_serial) is True: - log.debug2(f"No match found. Trying to find {object_type.name} based on serial number") - - device_vm_object = self.inventory.get_by_data(object_type, data={"serial": object_data.get("serial")}) - - if device_vm_object is None and object_data.get("asset_tag") is not None: - log.debug2(f"No match found. Trying to find {object_type.name} based on asset tag") - - device_vm_object = self.inventory.get_by_data(object_type, - data={"asset_tag": object_data.get("asset_tag")}) - if device_vm_object is not None: log.debug2("Found a matching %s object: %s" % (object_type.name, device_vm_object.get_display_name(including_second_key=True))) @@ -919,13 +898,8 @@ def add_host(self, obj): does the host pass the host_include_filter and host_exclude_filter Then all necessary host data will be collected. - host model, manufacturer, serial, physical interfaces, virtual interfaces, - virtual switches, proxy switches, host port groups, interface VLANs, IP addresses - Primary IPv4/6 will be determined by - 1. if the interface port group name contains - "management" or "mngt" - 2. interface is the default route of this host + Primary IPv4/6 will be determined by 'host_ip' value Parameters ---------- @@ -1005,9 +979,6 @@ def add_host(self, obj): if get_string_or_none(obj.status) == "enabled": status = "active" - # add asset tag if desired and present - asset_tag = None - # get host_tenant_relation tenant_name = self.get_object_relation(name, "host_tenant_relation") @@ -1029,8 +1000,6 @@ def add_host(self, obj): } # add data if present - if asset_tag is not None: - host_data["asset_tag"] = asset_tag if platform is not None: host_data["platform"] = {"name": platform} if tenant_name is not None: @@ -1078,7 +1047,6 @@ def add_virtual_machine(self, obj): # get VM power state status = "active" if get_string_or_none(obj.status) == "ACTIVE" else "offline" - # hypervisor_name = get_string_or_none(obj.hypervisor_hostname) cluster_name = get_string_or_none(obj.availability_zone) # honor strip_host_domain_name @@ -1113,16 +1081,13 @@ def add_virtual_machine(self, obj): # Collect data # - # check if cluster is a Standalone ESXi - site_name = self.permitted_clusters.get(cluster_name) - if site_name is None: - site_name = self.get_site_name(NBCluster, cluster_name) - # first check against vm_platform_relation - platform = get_string_or_none(obj.flavor["original_name"]) + platform = None + if bool(self.skip_vm_platform) is False: + platform = get_string_or_none(obj.flavor["original_name"]) - if platform is not None: - platform = self.get_object_relation(platform, "vm_platform_relation", fallback=platform) + if platform is not None: + platform = self.get_object_relation(platform, "vm_platform_relation", fallback=platform) disk = 0 for volume in obj.attached_volumes: diff --git a/settings-example.ini b/settings-example.ini index 29618a2..47d1e50 100644 --- a/settings-example.ini +++ b/settings-example.ini @@ -403,4 +403,138 @@ inventory_file_path = /full/path/to/inventory/files ; If the device has a tenant then this one will be used. If not, the prefix tenant will be used if defined ;ip_tenant_inheritance_order = device, prefix + +[source/my-openstack-example] + +# Defines if this source is enabled or not +#enabled = true + +# type of source. This defines which source handler to use. +type = openstack + +# URL to the Openstack API +auth_url = https://api.openstack-instance.local:5000/v3 + +# Openstack project to connect to +project = MyProject + +# Openstack region +region = RegionOne + +# Openstack user_domain +user_domain = Default + +# Openstack project_domain +project_domain = Default + +# Enforces TLS certificate validation. If Openstack API uses a valid TLS certificate then +# this option should be set to 'true' to ensure a secure connection. +#validate_tls_certs = false + +# username and password to use to log into vCenter +username = vcenteruser +password = supersecret + +# IP networks eligible to be synced to NetBox. +# If an IP address is not part of this networks then it WON'T be synced to NetBox +permitted_subnets = 172.16.0.0/12, 10.0.0.0/8, 192.168.0.0/16, fd00::/8 + +# filters can be used to include/exclude certain objects from importing into NetBox +# Include filters are checked first and exclude filters after. An object name has to +# pass both filters to be synced to NetBox. If a filter is unset it will be ignored. +# Filters are all treated as regex expressions! + +# If a cluster is excluded from sync then ALL VMs and HOSTS inside the cluster will +# be ignored! +#cluster_exclude_filter = +#cluster_include_filter = + +# This will only include/exclude the host, not the VM if Host is part of a multi host +# cluster. +#host_exclude_filter = +#host_include_filter = + +# simply include/exclude VMs +#vm_exclude_filter = +#vm_include_filter = + +# This option defines which Openstack Availability Zones is part of a NetBox site. This is done +# with a comma separated key = value list. +# key: defines the cluster name as regex +# value: defines the NetBox site name (use quotes if name contains commas) +# This is a quite important config setting as IP addresses, prefixes, VLANs and +# VRFs are site dependent. In order to assign the correct prefix to an IP +# address it is important to pick the correct site. +# A VM always depends on the cluster site relation +#cluster_site_relation = Cluster_NYC = New York , Cluster_FFM.* = Frankfurt + +# Same as cluster site but on host level. If unset it will fall back +# to cluster_site_relation. +#host_site_relation = nyc02.* = New York, ffm01.* = Frankfurt + +# This option defines which cluster/host/VM belongs to which tenant. This is done +# with a comma separated key = value list. +# key: defines a hosts/VM name as regex +# value: defines the NetBox tenant name (use quotes if name contains commas) +#cluster_tenant_relation = Cluster_NYC.* = Customer A +#host_tenant_relation = esxi300.* = Infrastructure +#vm_tenant_relation = grafana.* = Infrastructure + +# This option defines custom platforms if the used Flavors are not suitable. +# Pretty much a mapping of Openstack flavor name to your own platform name. +# This is done with a comma separated key = value list. +# key: defines a Openstack returned flavor name +# value: defines the desired NetBox platform name +#vm_platform_relation = centos-7.* = centos7, microsoft-windows-server-2016.* = Windows2016 + +# Define the NetBox device role used for hosts and VMs. The default is set to "Server". This is done +# with a comma separated key = value list. +# key: defines a hosts/VM name as regex +# value: defines the NetBox role name (use quotes if name contains commas) +#host_role_relation = .* = Server +#vm_role_relation = .* = Server + +# Define NetBox tags which are assigned to a cluster, host or VM. This is done +# with a comma separated key = value list. +# key: defines a hosts/VM name as regex +# value: defines the NetBox tag (use quotes if name contains commas) +#cluster_tag_relation = Cluster_NYC.* = Infrastructure +#host_tag_relation = esxi300.* = Infrastructure +#vm_tag_relation = grafana.* = Infrastructure + +# Perform a reverse lookup for all collected IP addresses. If a dns name +# was found it will be added to the IP address object in NetBox +#dns_name_lookup = True + +# use custom DNS server to do the reverse lookups +#custom_dns_servers = 192.168.1.11, 192.168.1.12 + +# define how the primary IPs should be set +# possible values +# +# always: will remove primary IP from the object where this address is +# currently set as primary and moves it to new object +# +# when-undefined: (default) +# only sets primary IP if undefined, will cause ERRORs if same IP is +# assigned more then once to different hosts and IP is set as the +# objects primary IP +# +# never: don't set any primary IPs, will cause the same ERRORs +# as "when-undefined" + +#set_primary_ip = when-undefined + +# Do not sync flavors from a VM in Openstack to the comments field on a VM in netbox +#skip_vm_platform = False + +# Do not sync ID from a VM in Openstack to the comments field on a VM in netbox +#skip_vm_comments = False + +# strip domain part from host name before syncing device to NetBox +#strip_host_domain_name = False + +# strip domain part from VM name before syncing VM to NetBox +#strip_vm_domain_name = False + ;EOF From 8936dcf258c465369dea59c94a929974d31d8a62 Mon Sep 17 00:00:00 2001 From: Jean-Louis Dupond Date: Thu, 12 May 2022 12:54:14 +0200 Subject: [PATCH 04/12] Add host_ip interface --- module/sources/openstack/connection.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/module/sources/openstack/connection.py b/module/sources/openstack/connection.py index 4a42f24..dd3dee4 100644 --- a/module/sources/openstack/connection.py +++ b/module/sources/openstack/connection.py @@ -1007,12 +1007,25 @@ def add_host(self, obj): if len(host_tags) > 0: host_data["tags"] = host_tags + pnic_data_dict = dict() + pnic_data = { + "name": "eth0", + "device": None, + "enabled": True, + "type": "10gbase-t" + } + pnic_data_dict["eth0"] = pnic_data + + vnic_ips = dict() + vnic_ips["eth0"] = list() + vnic_ips["eth0"].append(obj.host_ip) + host_primary_ip4 = obj.host_ip host_primary_ip6 = None # add host to inventory - self.add_device_vm_to_inventory(NBDevice, object_data=host_data, pnic_data=dict(), - vnic_data=dict(), nic_ips=None, + self.add_device_vm_to_inventory(NBDevice, object_data=host_data, pnic_data=pnic_data_dict, + vnic_data=dict(), nic_ips=vnic_ips, p_ipv4=host_primary_ip4, p_ipv6=host_primary_ip6) return From 57fb6553cc60255e812161bdc1ec1ea014ed293a Mon Sep 17 00:00:00 2001 From: Jean-Louis Dupond Date: Fri, 13 May 2022 13:11:19 +0200 Subject: [PATCH 05/12] Fix setting primary IP --- module/sources/openstack/connection.py | 29 ++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/module/sources/openstack/connection.py b/module/sources/openstack/connection.py index dd3dee4..6e64dab 100644 --- a/module/sources/openstack/connection.py +++ b/module/sources/openstack/connection.py @@ -1016,11 +1016,18 @@ def add_host(self, obj): } pnic_data_dict["eth0"] = pnic_data + ip_addr = obj.host_ip + prefix = None + matched_prefix = self.return_longest_matching_prefix_for_ip(ip_interface(ip_addr)) + if matched_prefix is not None: + prefix = matched_prefix.data["prefix"].prefixlen + ip_addr = f"{ip_addr}/{prefix}" + vnic_ips = dict() vnic_ips["eth0"] = list() - vnic_ips["eth0"].append(obj.host_ip) + vnic_ips["eth0"].append(ip_addr) - host_primary_ip4 = obj.host_ip + host_primary_ip4 = ip_addr host_primary_ip6 = None # add host to inventory @@ -1141,11 +1148,19 @@ def add_virtual_machine(self, obj): count += 1 nic_ips[network] = list() for address in addresses: - nic_ips[network].append(address["addr"]) + ip_addr = address["addr"] + prefix = None + + matched_prefix = self.return_longest_matching_prefix_for_ip(ip_interface(ip_addr)) + if matched_prefix is not None: + prefix = matched_prefix.data["prefix"].prefixlen + ip_addr = f"{ip_addr}/{prefix}" + + nic_ips[network].append(ip_addr) if int(address["version"]) == 4: - vm_primary_ip4 = address["addr"] + vm_primary_ip4 = ip_addr if int(address["version"]) == 6: - vm_primary_ip6 = address["addr"] + vm_primary_ip6 = ip_addr full_name = unquote(f"vNIC{count} ({network})") vm_nic_data = { "name": full_name, @@ -1154,8 +1169,10 @@ def add_virtual_machine(self, obj): "description": full_name, "enabled": True, } - if ip_valid_to_add_to_netbox(address["addr"], self.permitted_subnets, full_name) is True: + if ip_valid_to_add_to_netbox(ip_addr, self.permitted_subnets, full_name) is True: vm_nic_dict[network] = vm_nic_data + else: + log.debug(f"Virtual machine '{name}' address '{ip_addr}' is not valid to add. Skipping") # add VM to inventory self.add_device_vm_to_inventory(NBVM, object_data=vm_data, vnic_data=vm_nic_dict, From 72509fbe7df8638d8805305750fb010c11aa13d4 Mon Sep 17 00:00:00 2001 From: Jean-Louis Dupond Date: Tue, 17 May 2022 14:38:10 +0200 Subject: [PATCH 06/12] Fix matching when MAC is None --- module/sources/openstack/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/module/sources/openstack/connection.py b/module/sources/openstack/connection.py index 6e64dab..6cc8d26 100644 --- a/module/sources/openstack/connection.py +++ b/module/sources/openstack/connection.py @@ -732,7 +732,7 @@ def add_device_vm_to_inventory(self, object_type, object_data, pnic_data=None, v # on VMs vnic data is used, on physical devices pnic data is used mac_source_data = vnic_data if object_type == NBVM else pnic_data - nic_macs = [x.get("mac_address") for x in mac_source_data.values()] + nic_macs = [x.get("mac_address") for x in mac_source_data.values() if x.get("mac_address") is not None] device_vm_object = self.get_object_based_on_macs(object_type, nic_macs) From dd4976406a4121319325c2ea84421905af91b1f8 Mon Sep 17 00:00:00 2001 From: Jean-Louis Dupond Date: Wed, 23 Aug 2023 15:52:01 +0200 Subject: [PATCH 07/12] Adapt OpenStack module to latest netbox-sync version --- module/sources/openstack/config.py | 377 ++++++++++++++++++++++ module/sources/openstack/connection.py | 428 +++++++++++-------------- settings-example.ini | 8 +- 3 files changed, 571 insertions(+), 242 deletions(-) create mode 100644 module/sources/openstack/config.py diff --git a/module/sources/openstack/config.py b/module/sources/openstack/config.py new file mode 100644 index 0000000..e5f5039 --- /dev/null +++ b/module/sources/openstack/config.py @@ -0,0 +1,377 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020 - 2023 Ricardo Bartels. All rights reserved. +# +# netbox-sync.py +# +# This work is licensed under the terms of the MIT license. +# For a copy, see file LICENSE.txt included in this +# repository or visit: . + +import re +from ipaddress import ip_address + +from module.common.misc import quoted_split +from module.config import source_config_section_name +from module.config.base import ConfigBase +from module.config.option import ConfigOption +from module.config.group import ConfigOptionGroup +from module.sources.common.conifg import * +from module.sources.common.permitted_subnets import PermittedSubnets +from module.common.logging import get_logger +from module.common.support import normalize_mac_address + +log = get_logger() + + +class OpenStackConfig(ConfigBase): + + section_name = source_config_section_name + source_name = None + source_name_example = "my-openstack-example" + + def __init__(self): + self.options = [ + ConfigOption(**config_option_enabled_definition), + + ConfigOption(**{**config_option_type_definition, "config_example": "openstack"}), + + ConfigOption("auth_url", + str, + description="host name / IP address of the OpenStack", + config_example="https://api.openstack-instance.local:5000/v3", + mandatory=True), + + ConfigOption("project", + str, + description="Name of the OpenStack project", + config_example="MyProject", + mandatory=True), + + ConfigOption("username", + str, + description="username to use to log into OpenStack", + config_example="openstackuser", + mandatory=True), + + ConfigOption("password", + str, + description="password to use to log into OpenStack", + config_example="supersecret", + sensitive=True, + mandatory=True), + + ConfigOption("region", + str, + description="Which OpenStack region to use", + config_example="RegionOne", + mandatory=True), + + ConfigOption("user_domain", + str, + description="Which OpenStack user domain to use", + config_example="Default", + mandatory=True), + + ConfigOption("project_domain", + str, + description="Which OpenStack project domain to use", + config_example="Default", + mandatory=True), + + ConfigOption("group_name", + str, + description="OpenStack group name", + default_value="Openstack"), + + ConfigOption("validate_tls_certs", + bool, + description="""Enforces TLS certificate validation. + If vCenter uses a valid TLS certificate then this option should be set + to 'true' to ensure a secure connection.""", + default_value=False), + + ConfigOption(**config_option_permitted_subnets_definition), + + ConfigOptionGroup(title="filter", + description="""filters can be used to include/exclude certain objects from importing + into NetBox. Include filters are checked first and exclude filters after. + An object name has to pass both filters to be synced to NetBox. + If a filter is unset it will be ignored. Filters are all treated as regex expressions! + If more then one expression should match, a '|' needs to be used + """, + config_example="""Example: (exclude all VMs with "replica" in their name + and all VMs starting with "backup"): vm_exclude_filter = .*replica.*|^backup.*""", + options=[ + ConfigOption("cluster_exclude_filter", + str, + description="""If a cluster is excluded from sync then ALL VMs and HOSTS + inside the cluster will be ignored! a cluster can be specified + as "Cluster-name" or "Datacenter-name/Cluster-name" if + multiple clusters have the same name"""), + ConfigOption("cluster_include_filter", str), + ConfigOption("host_exclude_filter", + str, + description="""This will only include/exclude the host, + not the VM if Host is part of a multi host cluster"""), + ConfigOption("host_include_filter", str), + ConfigOption("vm_exclude_filter", + str, description="simply include/exclude VMs"), + ConfigOption("vm_include_filter", str) + ]), + ConfigOptionGroup(title="relations", + options=[ + ConfigOption("cluster_site_relation", + str, + description="""\ + This option defines which vCenter cluster is part of a NetBox site. + This is done with a comma separated key = value list. + key: defines the cluster name as regex + value: defines the NetBox site name (use quotes if name contains commas) + This is a quite important config setting as IP addresses, prefixes, VLANs + and VRFs are site dependent. In order to assign the correct prefix to an IP + address it is important to pick the correct site. + A VM always depends on the cluster site relation + a cluster can be specified as "Cluster-name" or + "Datacenter-name/Cluster-name" if multiple clusters have the same name + """, + config_example="Cluster_NYC = New York, Cluster_FFM.* = Frankfurt, Datacenter_TOKIO/.* = Tokio"), + ConfigOption("host_site_relation", + str, + description="""Same as cluster site but on host level. + If unset it will fall back to cluster_site_relation""", + config_example="nyc02.* = New York, ffm01.* = Frankfurt"), + ConfigOption("cluster_tenant_relation", + str, + description="""\ + This option defines which cluster/host/VM belongs to which tenant. + This is done with a comma separated key = value list. + key: defines a hosts/VM name as regex + value: defines the NetBox tenant name (use quotes if name contains commas) + a cluster can be specified as "Cluster-name" or + "Datacenter-name/Cluster-name" if multiple clusters have the same name + """, + config_example="Cluster_NYC.* = Customer A"), + ConfigOption("host_tenant_relation", str, config_example="esxi300.* = Infrastructure"), + ConfigOption("vm_tenant_relation", str, config_example="grafana.* = Infrastructure"), + ConfigOption("vm_platform_relation", + str, + description="""\ + This option defines custom platforms if the VMWare created platforms are not suitable. + Pretty much a mapping of VMWare platform name to your own platform name. + This is done with a comma separated key = value list. + key: defines a VMWare returned platform name + value: defines the desired NetBox platform name""", + config_example="centos-7.* = centos7, microsoft-windows-server-2016.* = Windows2016"), + ConfigOption("host_role_relation", + str, + description="""\ + Define the NetBox device role used for hosts. The default is + set to "Server". This is done with a comma separated key = value list. + key: defines host(s) name as regex + value: defines the NetBox role name (use quotes if name contains commas) + """, + default_value=".* = Server"), + ConfigOption("vm_role_relation", + str, + description="""\ + Define the NetBox device role used for VMs. This is done with a + comma separated key = value list, same as 'host_role_relation'. + key: defines VM(s) name as regex + value: defines the NetBox role name (use quotes if name contains commas) + """, + config_example=".* = Server"), + ConfigOption("cluster_tag_relation", + str, + description="""\ + Define NetBox tags which are assigned to a cluster, host or VM. This is + done with a comma separated key = value list. + key: defines a hosts/VM name as regex + value: defines the NetBox tag (use quotes if name contains commas) + a cluster can be specified as "Cluster-name" or + "Datacenter-name/Cluster-name" if multiple clusters have the same name""", + config_example="Cluster_NYC.* = Infrastructure"), + ConfigOption("host_tag_relation", str, config_example="esxi300.* = Infrastructure"), + ConfigOption("vm_tag_relation", str, config_example="grafana.* = Infrastructure") + ]), + ConfigOption("dns_name_lookup", + bool, + description="""Perform a reverse lookup for all collected IP addresses. + If a dns name was found it will be added to the IP address object in NetBox + """, + default_value=True), + ConfigOption("custom_dns_servers", + str, + description="use custom DNS server to do the reverse lookups", + config_example="192.168.1.11, 192.168.1.12"), + ConfigOption("set_primary_ip", + str, + description="""\ + define how the primary IPs should be set + possible values: + + always: will remove primary IP from the object where this address is + currently set as primary and moves it to new object + + when-undefined: + only sets primary IP if undefined, will cause ERRORs if same IP is + assigned more then once to different hosts and IP is set as the + objects primary IP + + never: don't set any primary IPs, will cause the same ERRORs + as "when-undefined" + """, + default_value="when-undefined"), + ConfigOption("skip_vm_comments", + bool, + description="do not set notes to the UUID or name of the VM", + default_value=False), + ConfigOption("skip_vm_platform", + bool, + description="do not sync flavors from a VM in Openstack to the comments field on a VM in netbox", + default_value=False), + ConfigOption("strip_host_domain_name", + bool, + description="strip domain part from host name before syncing device to NetBox", + default_value=False), + ConfigOption("strip_vm_domain_name", + bool, + description="strip domain part from VM name before syncing VM to NetBox", + default_value=False), + ConfigOptionGroup(title="custom object attributes", + description="""\ + add arbitrary host/vm object attributes as custom fields to NetBox. + multiple attributes can be defined comma separated. + to get a list of available attributes use '-l DEBUG3' as cli param (CAREFUL: output might be long) + and here 'https://gist.github.com/bb-Ricardo/538768487bdac4efafabe56e005cb4ef' can be seen how to + access these attributes + """, + options=[ + ConfigOption("host_custom_object_attributes", + str, + config_example="uuid"), + ConfigOption("vm_custom_object_attributes", + str, + config_example="uuid") + ]), + ConfigOption("set_vm_name_to_uuid", + bool, + description="Set the name in Netbox to the VM UUID instead of name", + default_value=False), + + # removed settings + ConfigOption("netbox_host_device_role", + str, + deprecation_message="You need to switch to 'host_role_relation'.", + removed=True), + ConfigOption("netbox_vm_device_role", + str, + deprecation_message="You need to switch to 'vm_role_relation'.", + removed=True), + ConfigOption("sync_tags", + bool, + deprecation_message="You need to switch to 'host_tag_source', " + + "'vm_tag_source' or 'cluster_tag_source'", + removed=True), + ConfigOption("sync_parent_tags", + bool, + deprecation_message="You need to switch to 'host_tag_source', " + + "'vm_tag_source' or 'cluster_tag_source'", + removed=True) + ] + + super().__init__() + + def validate_options(self): + + for option in self.options: + + if option.value is None: + continue + + if "filter" in option.key: + + re_compiled = None + try: + re_compiled = re.compile(option.value) + except Exception as e: + log.error(f"Problem parsing regular expression for '{self.source_name}.{option.key}': {e}") + self.set_validation_failed() + + option.set_value(re_compiled) + + continue + + if "relation" in option.key: + + relation_data = list() + + relation_type = option.key.split("_")[1] + + for relation in quoted_split(option.value): + + object_name = relation.split("=")[0].strip(' "') + relation_name = relation.split("=")[1].strip(' "') + + if len(object_name) == 0 or len(relation_name) == 0: + log.error(f"Config option '{relation}' malformed got '{object_name}' for " + f"object name and '{relation_name}' for {relation_type} name.") + self.set_validation_failed() + continue + + try: + re_compiled = re.compile(object_name) + except Exception as e: + log.error(f"Problem parsing regular expression '{object_name}' for '{relation}': {e}") + self.set_validation_failed() + continue + + relation_data.append({ + "object_regex": re_compiled, + "assigned_name": relation_name + }) + + option.set_value(relation_data) + + continue + + if "custom_object_attributes" in option.key: + + option.set_value(quoted_split(option.value)) + + continue + + if option.key == "set_primary_ip": + if option.value not in ["always", "when-undefined", "never"]: + log.error(f"Primary IP option '{option.key}' value '{option.value}' invalid.") + self.set_validation_failed() + + if option.key == "custom_dns_servers": + + dns_name_lookup = self.get_option_by_name("dns_name_lookup") + + if not isinstance(dns_name_lookup, ConfigOption) or dns_name_lookup.value is False: + continue + + custom_dns_servers = quoted_split(option.value) + + tested_custom_dns_servers = list() + for custom_dns_server in custom_dns_servers: + try: + tested_custom_dns_servers.append(str(ip_address(custom_dns_server))) + except ValueError: + log.error(f"Config option 'custom_dns_servers' value '{custom_dns_server}' " + f"does not appear to be an IP address.") + self.set_validation_failed() + + option.set_value(tested_custom_dns_servers) + + continue + + permitted_subnets_option = self.get_option_by_name("permitted_subnets") + + if permitted_subnets_option is not None: + permitted_subnets = PermittedSubnets(permitted_subnets_option.value) + if permitted_subnets.validation_failed is True: + self.set_validation_failed() + + permitted_subnets_option.set_value(permitted_subnets) diff --git a/module/sources/openstack/connection.py b/module/sources/openstack/connection.py index 6cc8d26..666fe82 100644 --- a/module/sources/openstack/connection.py +++ b/module/sources/openstack/connection.py @@ -8,41 +8,21 @@ # repository or visit: . import pprint -import re import ssl -from ipaddress import ip_address, ip_network, ip_interface +from ipaddress import ip_interface from socket import gaierror from urllib.parse import unquote +from packaging import version import openstack from module.sources.common.source_base import SourceBase +from module.sources.openstack.config import OpenStackConfig from module.common.logging import get_logger, DEBUG3 from module.common.misc import grab, dump, get_string_or_none -from module.common.support import normalize_mac_address, ip_valid_to_add_to_netbox -from module.netbox.object_classes import ( - NetBoxObject, - NetBoxInterfaceType, - NBTag, - NBManufacturer, - NBDeviceType, - NBPlatform, - NBClusterType, - NBClusterGroup, - NBDeviceRole, - NBSite, - NBCluster, - NBDevice, - NBVM, - NBVMInterface, - NBInterface, - NBIPAddress, - NBPrefix, - NBTenant, - NBVRF, - NBVLAN, - NBCustomField -) +from module.common.support import normalize_mac_address +from module.netbox.inventory import NetBoxInventory +from module.netbox import * log = get_logger() @@ -75,55 +55,6 @@ class OpenStackHandler(SourceBase): NBCustomField ] - settings = { - "enabled": True, - "auth_url": None, - "project": None, - "username": None, - "password": None, - "region": None, - "user_domain": None, - "project_domain": None, - "group_name": "Openstack", - "permitted_subnets": None, - "cluster_exclude_filter": None, - "cluster_include_filter": None, - "host_exclude_filter": None, - "host_include_filter": None, - "vm_exclude_filter": None, - "vm_include_filter": None, - "cluster_site_relation": None, - "cluster_tag_relation": None, - "cluster_tenant_relation": None, - "host_role_relation": None, - "host_site_relation": None, - "host_tag_relation": None, - "host_tenant_relation": None, - "vm_platform_relation": None, - "vm_role_relation": None, - "vm_tag_relation": None, - "vm_tenant_relation": None, - "dns_name_lookup": False, - "custom_dns_servers": None, - "validate_tls_certs": False, - "set_primary_ip": "when-undefined", - "skip_vm_platform": False, - "skip_vm_comments": False, - "strip_host_domain_name": False, - "strip_vm_domain_name": False - } - - deprecated_settings = {} - - removed_settings = { - "netbox_host_device_role": "host_role_relation", - "netbox_vm_device_role": "vm_role_relation" - } - - init_successful = False - inventory = None - name = None - source_tag = None source_type = "openstack" # internal vars @@ -132,20 +63,23 @@ class OpenStackHandler(SourceBase): site_name = None - def __init__(self, name=None, settings=None, inventory=None): + def __init__(self, name=None): if name is None: raise ValueError(f"Invalid value for attribute 'name': '{name}'.") - self.inventory = inventory + self.inventory = NetBoxInventory() self.name = name - self.parse_config_settings(settings) + # parse settings + settings_handler = OpenStackConfig() + settings_handler.source_name = self.name + self.settings = settings_handler.parse() - self.source_tag = f"Source: {name}" + self.set_source_tag() self.site_name = f"OpenStack: {name}" - if self.enabled is False: + if self.settings.enabled is False: log.info(f"Source '{name}' is currently disabled. Skipping") return @@ -162,117 +96,9 @@ def __init__(self, name=None, settings=None, inventory=None): self.processed_host_names = dict() self.processed_vm_names = dict() self.processed_vm_uuid = list() + self.object_cache = dict() self.parsing_vms_the_first_time = True - def parse_config_settings(self, config_settings): - """ - Validate parsed settings from config file - - Parameters - ---------- - config_settings: dict - dict of config settings - - """ - - validation_failed = False - - for setting in ["auth_url", "project", "username", "password", "region", "user_domain", "project_domain"]: - if config_settings.get(setting) is None: - log.error(f"Config option '{setting}' in 'source/{self.name}' can't be empty/undefined") - validation_failed = True - - # check permitted ip subnets - if config_settings.get("permitted_subnets") is None: - log.info(f"Config option 'permitted_subnets' in 'source/{self.name}' is undefined. " - f"No IP addresses will be populated to NetBox!") - else: - config_settings["permitted_subnets"] = \ - [x.strip() for x in config_settings.get("permitted_subnets").split(",") if x.strip() != ""] - - permitted_subnets = list() - for permitted_subnet in config_settings["permitted_subnets"]: - try: - permitted_subnets.append(ip_network(permitted_subnet)) - except Exception as e: - log.error(f"Problem parsing permitted subnet: {e}") - validation_failed = True - - config_settings["permitted_subnets"] = permitted_subnets - - # check include and exclude filter expressions - for setting in [x for x in config_settings.keys() if "filter" in x]: - if config_settings.get(setting) is None or config_settings.get(setting).strip() == "": - continue - - re_compiled = None - try: - re_compiled = re.compile(config_settings.get(setting)) - except Exception as e: - log.error(f"Problem parsing regular expression for '{setting}': {e}") - validation_failed = True - - config_settings[setting] = re_compiled - - for relation_option in [x for x in self.settings.keys() if "relation" in x]: - - if config_settings.get(relation_option) is None: - continue - - relation_data = list() - - relation_type = relation_option.split("_")[1] - - # obey quotations to be able to add names including a comma - # thanks to: https://stackoverflow.com/a/64333329 - for relation in re.split(r",(?=(?:[^\"']*[\"'][^\"']*[\"'])*[^\"']*$)", - config_settings.get(relation_option)): - - object_name = relation.split("=")[0].strip(' "') - relation_name = relation.split("=")[1].strip(' "') - - if len(object_name) == 0 or len(relation_name) == 0: - log.error(f"Config option '{relation}' malformed got '{object_name}' for " - f"object name and '{relation_name}' for {relation_type} name.") - validation_failed = True - - try: - re_compiled = re.compile(object_name) - except Exception as e: - log.error(f"Problem parsing regular expression '{object_name}' for '{relation}': {e}") - validation_failed = True - continue - - relation_data.append({ - "object_regex": re_compiled, - "assigned_name": relation_name - }) - - config_settings[relation_option] = relation_data - - if config_settings.get("dns_name_lookup") is True and config_settings.get("custom_dns_servers") is not None: - - custom_dns_servers = \ - [x.strip() for x in config_settings.get("custom_dns_servers").split(",") if x.strip() != ""] - - tested_custom_dns_servers = list() - for custom_dns_server in custom_dns_servers: - try: - tested_custom_dns_servers.append(str(ip_address(custom_dns_server))) - except ValueError: - log.error(f"Config option 'custom_dns_servers' value '{custom_dns_server}' " - f"does not appear to be an IP address.") - validation_failed = True - - config_settings["custom_dns_servers"] = tested_custom_dns_servers - - if validation_failed is True: - log.error("Config validation failed. Exit!") - exit(1) - - for setting in self.settings.keys(): - setattr(self, setting, config_settings.get(setting)) - def create_openstack_session(self): """ Initialize session with OpenStack @@ -285,37 +111,37 @@ def create_openstack_session(self): if self.session is not None: return True - log.debug(f"Starting OpenStack connection to '{self.auth_url}'") + log.debug(f"Starting OpenStack connection to '{self.settings.auth_url}'") ssl_context = ssl.create_default_context() - if bool(self.validate_tls_certs) is False: + if self.settings.validate_tls_certs is False: ssl_context.check_hostname = False ssl_context.verify_mode = ssl.CERT_NONE try: self.session = openstack.connect( - auth_url=self.auth_url, - project_name=self.project, - username=self.username, - password=self.password, - region_name=self.region, - user_domain_name=self.user_domain, - project_domain_name=self.project_domain, + auth_url=self.settings.auth_url, + project_name=self.settings.project, + username=self.settings.username, + password=self.settings.password, + region_name=self.settings.region, + user_domain_name=self.settings.user_domain, + project_domain_name=self.settings.project_domain, app_name='netbox-sync', app_version='0.1', ) except (gaierror, OSError) as e: log.error( - f"Unable to connect to OpenStack instance '{self.auth_url}' on port {self.port}. " + f"Unable to connect to OpenStack instance '{self.settings.auth_url}' on port {self.settings.port}. " f"Reason: {e}" ) return False except Exception as e: - log.error(f"Unable to connect to OpenStack instance '{self.auth_url}' on port {self.port}. {e.msg}") + log.error(f"Unable to connect to OpenStack instance '{self.settings.auth_url}' on port {self.settings.port}. {e.msg}") return False - log.info(f"Successfully connected to OpenStack '{self.auth_url}'") + log.info(f"Successfully connected to OpenStack '{self.settings.auth_url}'") return True @@ -327,7 +153,7 @@ def apply(self): Every update of new/existing objects fot this source has to happen here. """ - log.info(f"Query data from OpenStack: '{self.auth_url}'") + log.info(f"Query data from OpenStack: '{self.settings.auth_url}'") """ Mapping of object type keywords to view types and handlers @@ -343,9 +169,10 @@ def apply(self): where you create the same machines with a different setup like a new version or something. This way NetBox will be updated primarily with the actual active VM data. - """ + self.inventory.add_update_object(NBClusterGroup, data={"name": self.settings.group_name}, source=self) + availability_zones = self.session.compute.availability_zones(details=True) for availability_zone in availability_zones: self.add_cluster(availability_zone) @@ -425,9 +252,9 @@ def get_site_name(self, object_type, object_name, cluster_name=""): site_name = self.get_object_relation(object_name, relation_name) if object_type == NBDevice and site_name is None: - site_name = self.permitted_clusters.get(cluster_name) or \ - self.get_site_name(NBCluster, object_name, cluster_name) - log.debug2(f"Found a matching cluster site for {object_name}, using site '{site_name}'") + site_name = self.get_site_name(NBCluster, cluster_name) + if site_name is not None: + log.debug2(f"Found a matching cluster site for {object_name}, using site '{site_name}'") # set default site name if site_name is None: @@ -449,7 +276,7 @@ def get_object_based_on_macs(self, object_type, mac_list=None): the highest amount of matching interfaces. If the ration of matching interfaces exceeds 2.0 then the top matching machine is chosen as desired object. - If the ration is below 2.0 then None will be returned. The probability is to low that + If the ration is below 2.0 then None will be returned. The probability is too low that this one is the correct one. None will also be returned if no machine was found at all. @@ -613,12 +440,25 @@ def get_object_relation(self, name, relation, fallback=None): """ resolved_list = list() - for single_relation in grab(self, relation, fallback=list()): + for single_relation in grab(self.settings, relation, fallback=list()): object_regex = single_relation.get("object_regex") + match_found = False if object_regex.match(name): resolved_name = single_relation.get("assigned_name") - log.debug2(f"Found a matching {relation} '{resolved_name}' ({object_regex.pattern}) for {name}.") + log.debug2(f"Found a matching {relation} '{resolved_name}' ({object_regex.pattern}) for {name}") resolved_list.append(resolved_name) + match_found = True + + # special cluster condition + if match_found is False and grab(f"{relation}".split("_"), "0") == "cluster": + + stripped_name = "/".join(name.split("/")[1:]) + if object_regex.match(stripped_name): + + resolved_name = single_relation.get("assigned_name") + log.debug2(f"Found a matching {relation} '{resolved_name}' ({object_regex.pattern}) " + f"for {stripped_name}") + resolved_list.append(resolved_name) if grab(f"{relation}".split("_"), "1") == "tag": return resolved_list @@ -757,12 +597,13 @@ def add_device_vm_to_inventory(self, object_type, object_data, pnic_data=None, v # update role according to config settings object_name = object_data.get(object_type.primary_key) role_name = self.get_object_relation(object_name, - "host_role_relation" if object_type == NBDevice else "vm_role_relation", - fallback="Server") + "host_role_relation" if object_type == NBDevice else "vm_role_relation") if object_type == NBDevice: + if role_name is None: + role_name = "Server" device_vm_object.update(data={"device_role": {"name": role_name}}) - if object_type == NBVM: + if object_type == NBVM and role_name is not None: device_vm_object.update(data={"role": {"name": role_name}}) # compile all nic data into one dictionary @@ -801,11 +642,11 @@ def add_device_vm_to_inventory(self, object_type, object_data, pnic_data=None, v # add all interface IPs for ip_object in ip_address_objects: - ip_interface_object = ip_interface(grab(ip_object, "data.address")) - if ip_object is None: continue + ip_interface_object = ip_interface(grab(ip_object, "data.address")) + # continue if address is not a primary IP if ip_interface_object not in [primary_ipv4_object, primary_ipv6_object]: continue @@ -813,7 +654,7 @@ def add_device_vm_to_inventory(self, object_type, object_data, pnic_data=None, v # set/update/remove primary IP addresses set_this_primary_ip = False ip_version = ip_interface_object.ip.version - if self.set_primary_ip == "always": + if self.settings.set_primary_ip == "always": for object_type in [NBDevice, NBVM]: @@ -836,7 +677,8 @@ def add_device_vm_to_inventory(self, object_type, object_data, pnic_data=None, v set_this_primary_ip = True - elif self.set_primary_ip != "never" and grab(device_vm_object, f"data.primary_ip{ip_version}") is None: + elif self.settings.set_primary_ip != "never" and \ + grab(device_vm_object, f"data.primary_ip{ip_version}") is None: set_this_primary_ip = True if set_this_primary_ip is True: @@ -847,6 +689,38 @@ def add_device_vm_to_inventory(self, object_type, object_data, pnic_data=None, v return + def add_object_to_cache(self, obj_type, key, netbox_object): + + if None in [type, key, netbox_object]: + return + + # noinspection PyBroadException + try: + vm_class_name = obj_type.__class__.__name__ + except Exception: + return + + if self.object_cache.get(vm_class_name) is None: + self.object_cache[vm_class_name] = dict() + + self.object_cache[vm_class_name][key] = netbox_object + + def get_object_from_cache(self, obj_type, key): + + if obj_type is None or key is None: + return + + # noinspection PyBroadException + try: + vm_class_name = obj_type.__class__.__name__ + except Exception: + return + + if self.object_cache.get(vm_class_name) is None: + return + + return self.object_cache[vm_class_name].get(key) + def add_cluster(self, obj): """ Add a OpenStack Availability Zone as a NBCluster to NetBox. Cluster name is checked against @@ -860,15 +734,18 @@ def add_cluster(self, obj): cluster to add """ - name = get_string_or_none(obj.name) - group = self.group_name + name = get_string_or_none(grab(obj, "name")) + group = self.inventory.get_by_data(NBClusterGroup, data={"name": self.settings.group_name}) if name is None or group is None: return + group_name = grab(group, "data.name") + log.debug(f"Parsing OpenStack AZ: {name}") - if self.passes_filter(name, self.cluster_include_filter, self.cluster_exclude_filter) is False: + if self.passes_filter(name, self.settings.cluster_include_filter, + self.settings.cluster_exclude_filter) is False: return site_name = self.get_site_name(NBCluster, name) @@ -876,11 +753,54 @@ def add_cluster(self, obj): data = { "name": name, "type": {"name": "Openstack"}, - "group": {"name": group}, + "group": group, "site": {"name": site_name} } - self.inventory.add_update_object(NBCluster, data=data, source=self) + # try to find cluster including cluster group + log.debug2("Trying to find a matching existing cluster") + cluster_object = None + fallback_cluster_object = None + for cluster_candidate in self.inventory.get_all_items(NBCluster): + if grab(cluster_candidate, "data.name") != name: + continue + + # try to find a cluster with matching site + if cluster_candidate.get_site_name() == site_name: + cluster_object = cluster_candidate + log.debug2("Found an existing cluster where 'name' and 'site' are matching") + break + + if grab(cluster_candidate, "data.group") is not None and \ + grab(cluster_candidate, "data.group.data.name") == group_name: + cluster_object = cluster_candidate + log.debug2("Found an existing cluster where 'name' and 'cluster group' are matching") + break + + if grab(cluster_candidate, "data.tenant") is not None and \ + tenant_name is not None and \ + grab(cluster_candidate, "data.tenant.data.name") == tenant_name: + cluster_object = cluster_candidate + log.debug2("Found an existing cluster where 'name' and 'tenant' are matching") + break + + # if only the name matches and there are multiple cluster with the same name we choose the first + # cluster returned from netbox. This needs to be done to not ignore possible matches in one of + # the next iterations + if fallback_cluster_object is None: + fallback_cluster_object = cluster_candidate + + if cluster_object is None and fallback_cluster_object is not None: + log.debug2(f"Found an existing cluster where 'name' " + f"matches (NetBox id: {fallback_cluster_object.get_nb_reference()})") + cluster_object = fallback_cluster_object + + if cluster_object is not None: + cluster_object.update(data=data, source=self) + else: + cluster_object = self.inventory.add_update_object(NBCluster, data=data, source=self) + + self.add_object_to_cache(NBCluster, name, cluster_object) self.cluster_host_map[name] = list() for host in obj.hosts: @@ -907,9 +827,9 @@ def add_host(self, obj): host object to parse """ - name = get_string_or_none(obj.name) + name = get_string_or_none(grab(obj, "name")) - if name is not None and self.strip_host_domain_name is True: + if name is not None and self.settings.strip_host_domain_name is True: name = name.split(".")[0] # parse data @@ -938,6 +858,9 @@ def add_host(self, obj): log.debug(f"Host '{name}' is not part of a permitted cluster. Skipping") return + # get cluster object + nb_cluster_object = self.get_object_from_cache(NBCluster, cluster_name) + # get a site for this host site_name = self.get_site_name(NBDevice, name, cluster_name) @@ -953,7 +876,7 @@ def add_host(self, obj): self.processed_host_names[site_name].append(name) # filter hosts by name - if self.passes_filter(name, self.host_include_filter, self.host_exclude_filter) is False: + if self.passes_filter(name, self.settings.host_include_filter, self.settings.host_exclude_filter) is False: return # @@ -995,7 +918,7 @@ def add_host(self, obj): } }, "site": {"name": site_name}, - "cluster": {"name": cluster_name}, + "cluster": nb_cluster_object, "status": status } @@ -1057,11 +980,15 @@ def add_virtual_machine(self, obj): virtual machine object to parse """ - name = get_string_or_none(obj.name) + name = get_string_or_none(grab(obj, "name")) - if name is not None and self.strip_vm_domain_name is True: + if name is not None and self.settings.strip_vm_domain_name is True: name = name.split(".")[0] + if self.settings.set_vm_name_to_uuid: + display_name = name + name = get_string_or_none(obj.uuid) + log.debug(f"Parsing OpenStack VM: {name}") # get VM power state @@ -1070,7 +997,7 @@ def add_virtual_machine(self, obj): cluster_name = get_string_or_none(obj.availability_zone) # honor strip_host_domain_name - if cluster_name is not None and self.strip_host_domain_name is True: + if cluster_name is not None and self.settings.strip_host_domain_name is True: cluster_name = cluster_name.split(".")[0] # check VM cluster @@ -1082,6 +1009,8 @@ def add_virtual_machine(self, obj): log.debug(f"Virtual machine '{name}' is not part of a permitted cluster. Skipping") return + nb_cluster_object = self.get_object_from_cache(NBCluster, cluster_name) + if name in self.processed_vm_names.get(cluster_name, list()): log.warning(f"Virtual machine '{name}' for cluster '{cluster_name}' already parsed. " "Make sure to use unique VM names. Skipping") @@ -1094,16 +1023,17 @@ def add_virtual_machine(self, obj): self.processed_vm_names[cluster_name].append(name) # filter VMs by name - if self.passes_filter(name, self.vm_include_filter, self.vm_exclude_filter) is False: + if self.passes_filter(name, self.settings.vm_include_filter, self.settings.vm_exclude_filter) is False: return # # Collect data # + site_name = self.get_site_name(NBCluster, cluster_name) # first check against vm_platform_relation platform = None - if bool(self.skip_vm_platform) is False: + if self.settings.skip_vm_platform is False: platform = get_string_or_none(obj.flavor["original_name"]) if platform is not None: @@ -1116,21 +1046,28 @@ def add_virtual_machine(self, obj): disk += int(size) annotation = None - if bool(self.skip_vm_comments) is False: - annotation = get_string_or_none(obj.id) + if self.settings.skip_vm_comments is False: + if self.settings.set_vm_name_to_uuid: + annotation = display_name + else: + annotation = get_string_or_none(obj.id) # assign vm_tenant_relation tenant_name = self.get_object_relation(name, "vm_tenant_relation") vm_data = { "name": name, - "cluster": {"name": cluster_name}, + "cluster": nb_cluster_object, "status": status, "memory": obj.flavor["ram"], "vcpus": obj.flavor["vcpus"], "disk": disk } + # Add adaption for change in NetBox 3.3.0 VM model + # issue: https://github.com/netbox-community/netbox/issues/10131#issuecomment-1225783758 + if version.parse(self.inventory.netbox_api_version) >= version.parse("3.3.0"): + vm_data["site"] = {"name": site_name} if platform is not None: vm_data["platform"] = {"name": platform} if annotation is not None: @@ -1138,6 +1075,18 @@ def add_virtual_machine(self, obj): if tenant_name is not None: vm_data["tenant"] = {"name": tenant_name} + if self.settings.set_vm_name_to_uuid: + custom_field = self.add_update_custom_field({ + "name": "openstack_vm_name", + "label": "name", + "content_types": "virtualization.virtualmachine", + "type": str, + "description": f"Openstack '{self.name}' synced object attribute 'name'" + }) + vm_data["custom_fields"] = { + grab(custom_field, "data.name"): get_string_or_none(grab(obj, "name")) + } + vm_primary_ip4 = None vm_primary_ip6 = None vm_nic_dict = dict() @@ -1169,7 +1118,7 @@ def add_virtual_machine(self, obj): "description": full_name, "enabled": True, } - if ip_valid_to_add_to_netbox(ip_addr, self.permitted_subnets, full_name) is True: + if self.settings.permitted_subnets.permitted(ip_addr, interface_name=full_name) is True: vm_nic_dict[network] = vm_nic_data else: log.debug(f"Virtual machine '{name}' address '{ip_addr}' is not valid to add. Skipping") @@ -1192,7 +1141,7 @@ def update_basic_data(self): self.inventory.add_update_object(NBTag, data={ "name": self.source_tag, "description": f"Marks objects synced from Openstack '{self.name}' " - f"({self.auth_url}) to this NetBox Instance." + f"({self.settings.auth_url}) to this NetBox Instance." }) # update virtual site if present @@ -1209,11 +1158,10 @@ def update_basic_data(self): server_role_object = self.inventory.get_by_data(NBDeviceRole, data={"name": "Server"}) if server_role_object is not None: - server_role_object.update(data={ - "name": "Server", - "color": "9e9e9e", - "vm_role": True - }) + role_data = {"name": "Server", "vm_role": True} + if server_role_object.is_new is True: + role_data["color"] = "9e9e9e" + server_role_object.update(data=role_data) # EOF diff --git a/settings-example.ini b/settings-example.ini index 47d1e50..69bb699 100644 --- a/settings-example.ini +++ b/settings-example.ini @@ -431,8 +431,8 @@ project_domain = Default # this option should be set to 'true' to ensure a secure connection. #validate_tls_certs = false -# username and password to use to log into vCenter -username = vcenteruser +# username and password to use to log into OpenStack +username = openstackuser password = supersecret # IP networks eligible to be synced to NetBox. @@ -537,4 +537,8 @@ permitted_subnets = 172.16.0.0/12, 10.0.0.0/8, 192.168.0.0/16, fd00::/8 # strip domain part from VM name before syncing VM to NetBox #strip_vm_domain_name = False +# Set name in Netbox to the UUID instead of the name in Openstack +# This is useful as the names are not always unique +#set_vm_name_to_uuid = False + ;EOF From eb41682b7404c80222aa13f6bc3aa6de21799fb3 Mon Sep 17 00:00:00 2001 From: Jean-Louis Dupond Date: Thu, 24 Aug 2023 13:10:58 +0200 Subject: [PATCH 08/12] Some bugfixes --- module/sources/openstack/connection.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/module/sources/openstack/connection.py b/module/sources/openstack/connection.py index 666fe82..39497c6 100644 --- a/module/sources/openstack/connection.py +++ b/module/sources/openstack/connection.py @@ -987,7 +987,7 @@ def add_virtual_machine(self, obj): if self.settings.set_vm_name_to_uuid: display_name = name - name = get_string_or_none(obj.uuid) + name = get_string_or_none(obj.id) log.debug(f"Parsing OpenStack VM: {name}") @@ -1047,9 +1047,7 @@ def add_virtual_machine(self, obj): annotation = None if self.settings.skip_vm_comments is False: - if self.settings.set_vm_name_to_uuid: - annotation = display_name - else: + if not self.settings.set_vm_name_to_uuid: annotation = get_string_or_none(obj.id) # assign vm_tenant_relation @@ -1080,7 +1078,7 @@ def add_virtual_machine(self, obj): "name": "openstack_vm_name", "label": "name", "content_types": "virtualization.virtualmachine", - "type": str, + "type": "text", "description": f"Openstack '{self.name}' synced object attribute 'name'" }) vm_data["custom_fields"] = { From b94744de68a01563f191d8c2396ac19316e06e7c Mon Sep 17 00:00:00 2001 From: Jean-Louis Dupond Date: Fri, 25 Aug 2023 13:54:43 +0200 Subject: [PATCH 09/12] First try to find prefix for own site --- module/sources/openstack/connection.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/module/sources/openstack/connection.py b/module/sources/openstack/connection.py index 39497c6..42aeb45 100644 --- a/module/sources/openstack/connection.py +++ b/module/sources/openstack/connection.py @@ -1098,7 +1098,9 @@ def add_virtual_machine(self, obj): ip_addr = address["addr"] prefix = None - matched_prefix = self.return_longest_matching_prefix_for_ip(ip_interface(ip_addr)) + matched_prefix = self.return_longest_matching_prefix_for_ip(ip_interface(ip_addr), site_name) + if matched_prefix is None: + matched_prefix = self.return_longest_matching_prefix_for_ip(ip_interface(ip_addr)) if matched_prefix is not None: prefix = matched_prefix.data["prefix"].prefixlen ip_addr = f"{ip_addr}/{prefix}" From 49937da70d8fe860b5cc2cf3d65fc94eae7559f5 Mon Sep 17 00:00:00 2001 From: Jean-Louis Dupond Date: Fri, 25 Aug 2023 13:55:31 +0200 Subject: [PATCH 10/12] Add support for oVirt --- module/sources/__init__.py | 4 +- module/sources/ovirt/config.py | 359 ++++++++ module/sources/ovirt/connection.py | 1285 ++++++++++++++++++++++++++++ settings-example.ini | 128 +++ 4 files changed, 1774 insertions(+), 2 deletions(-) create mode 100644 module/sources/ovirt/config.py create mode 100644 module/sources/ovirt/connection.py diff --git a/module/sources/__init__.py b/module/sources/__init__.py index 4fc9842..0b94a81 100644 --- a/module/sources/__init__.py +++ b/module/sources/__init__.py @@ -10,6 +10,7 @@ # define all available sources here from module.sources.vmware.connection import VMWareHandler from module.sources.openstack.connection import OpenStackHandler +from module.sources.ovirt.connection import OVirtHandler from module.sources.check_redfish.import_inventory import CheckRedfish from module.common.logging import get_logger @@ -19,8 +20,7 @@ from module.config import source_config_section_name # list of valid sources -valid_sources = [VMWareHandler, OpenStackHandler, CheckRedfish] - +valid_sources = [VMWareHandler, OpenStackHandler, OVirtHandler, CheckRedfish] def validate_source(source_class_object=None, state="pre"): """ diff --git a/module/sources/ovirt/config.py b/module/sources/ovirt/config.py new file mode 100644 index 0000000..0481bb0 --- /dev/null +++ b/module/sources/ovirt/config.py @@ -0,0 +1,359 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020 - 2023 Ricardo Bartels. All rights reserved. +# +# netbox-sync.py +# +# This work is licensed under the terms of the MIT license. +# For a copy, see file LICENSE.txt included in this +# repository or visit: . + +import re +from ipaddress import ip_address + +from module.common.misc import quoted_split +from module.config import source_config_section_name +from module.config.base import ConfigBase +from module.config.option import ConfigOption +from module.config.group import ConfigOptionGroup +from module.sources.common.conifg import * +from module.sources.common.permitted_subnets import PermittedSubnets +from module.common.logging import get_logger +from module.common.support import normalize_mac_address + +log = get_logger() + + +class OVirtConfig(ConfigBase): + + section_name = source_config_section_name + source_name = None + source_name_example = "my-ovirt-example" + + def __init__(self): + self.options = [ + ConfigOption(**config_option_enabled_definition), + + ConfigOption(**{**config_option_type_definition, "config_example": "ovirt"}), + + ConfigOption("url", + str, + description="host name / IP address of the oVirt API", + config_example="https://engine40.example.com/ovirt-engine/api", + mandatory=True), + + ConfigOption("username", + str, + description="username to use to log into oVirt", + config_example="ovirtuser", + mandatory=True), + + ConfigOption("password", + str, + description="password to use to log into oVirt", + config_example="supersecret", + sensitive=True, + mandatory=True), + + ConfigOption("ca_file", + str, + description="path to the CA file for oVirt", + config_example="ca.pem"), + + ConfigOption("validate_tls_certs", + bool, + description="""Enforces TLS certificate validation. + If oVirt uses a valid TLS certificate then this option should be set + to 'true' to ensure a secure connection.""", + default_value=False), + + ConfigOption(**config_option_permitted_subnets_definition), + + ConfigOptionGroup(title="filter", + description="""filters can be used to include/exclude certain objects from importing + into NetBox. Include filters are checked first and exclude filters after. + An object name has to pass both filters to be synced to NetBox. + If a filter is unset it will be ignored. Filters are all treated as regex expressions! + If more then one expression should match, a '|' needs to be used + """, + config_example="""Example: (exclude all VMs with "replica" in their name + and all VMs starting with "backup"): vm_exclude_filter = .*replica.*|^backup.*""", + options=[ + ConfigOption("cluster_exclude_filter", + str, + description="""If a cluster is excluded from sync then ALL VMs and HOSTS + inside the cluster will be ignored! a cluster can be specified + as "Cluster-name" or "Datacenter-name/Cluster-name" if + multiple clusters have the same name"""), + ConfigOption("cluster_include_filter", str), + ConfigOption("host_exclude_filter", + str, + description="""This will only include/exclude the host, + not the VM if Host is part of a multi host cluster"""), + ConfigOption("host_include_filter", str), + ConfigOption("vm_exclude_filter", + str, description="simply include/exclude VMs"), + ConfigOption("vm_include_filter", str) + ]), + ConfigOptionGroup(title="relations", + options=[ + ConfigOption("cluster_site_relation", + str, + description="""\ + This option defines which vCenter cluster is part of a NetBox site. + This is done with a comma separated key = value list. + key: defines the cluster name as regex + value: defines the NetBox site name (use quotes if name contains commas) + This is a quite important config setting as IP addresses, prefixes, VLANs + and VRFs are site dependent. In order to assign the correct prefix to an IP + address it is important to pick the correct site. + A VM always depends on the cluster site relation + a cluster can be specified as "Cluster-name" or + "Datacenter-name/Cluster-name" if multiple clusters have the same name + """, + config_example="Cluster_NYC = New York, Cluster_FFM.* = Frankfurt, Datacenter_TOKIO/.* = Tokio"), + ConfigOption("host_site_relation", + str, + description="""Same as cluster site but on host level. + If unset it will fall back to cluster_site_relation""", + config_example="nyc02.* = New York, ffm01.* = Frankfurt"), + ConfigOption("cluster_tenant_relation", + str, + description="""\ + This option defines which cluster/host/VM belongs to which tenant. + This is done with a comma separated key = value list. + key: defines a hosts/VM name as regex + value: defines the NetBox tenant name (use quotes if name contains commas) + a cluster can be specified as "Cluster-name" or + "Datacenter-name/Cluster-name" if multiple clusters have the same name + """, + config_example="Cluster_NYC.* = Customer A"), + ConfigOption("host_tenant_relation", str, config_example="esxi300.* = Infrastructure"), + ConfigOption("vm_tenant_relation", str, config_example="grafana.* = Infrastructure"), + ConfigOption("vm_platform_relation", + str, + description="""\ + This option defines custom platforms if the VMWare created platforms are not suitable. + Pretty much a mapping of VMWare platform name to your own platform name. + This is done with a comma separated key = value list. + key: defines a VMWare returned platform name + value: defines the desired NetBox platform name""", + config_example="centos-7.* = centos7, microsoft-windows-server-2016.* = Windows2016"), + ConfigOption("host_role_relation", + str, + description="""\ + Define the NetBox device role used for hosts. The default is + set to "Server". This is done with a comma separated key = value list. + key: defines host(s) name as regex + value: defines the NetBox role name (use quotes if name contains commas) + """, + default_value=".* = Server"), + ConfigOption("vm_role_relation", + str, + description="""\ + Define the NetBox device role used for VMs. This is done with a + comma separated key = value list, same as 'host_role_relation'. + key: defines VM(s) name as regex + value: defines the NetBox role name (use quotes if name contains commas) + """, + config_example=".* = Server"), + ConfigOption("cluster_tag_relation", + str, + description="""\ + Define NetBox tags which are assigned to a cluster, host or VM. This is + done with a comma separated key = value list. + key: defines a hosts/VM name as regex + value: defines the NetBox tag (use quotes if name contains commas) + a cluster can be specified as "Cluster-name" or + "Datacenter-name/Cluster-name" if multiple clusters have the same name""", + config_example="Cluster_NYC.* = Infrastructure"), + ConfigOption("host_tag_relation", str, config_example="esxi300.* = Infrastructure"), + ConfigOption("vm_tag_relation", str, config_example="grafana.* = Infrastructure") + ]), + ConfigOption("dns_name_lookup", + bool, + description="""Perform a reverse lookup for all collected IP addresses. + If a dns name was found it will be added to the IP address object in NetBox + """, + default_value=True), + ConfigOption("custom_dns_servers", + str, + description="use custom DNS server to do the reverse lookups", + config_example="192.168.1.11, 192.168.1.12"), + ConfigOption("set_primary_ip", + str, + description="""\ + define how the primary IPs should be set + possible values: + + always: will remove primary IP from the object where this address is + currently set as primary and moves it to new object + + when-undefined: + only sets primary IP if undefined, will cause ERRORs if same IP is + assigned more then once to different hosts and IP is set as the + objects primary IP + + never: don't set any primary IPs, will cause the same ERRORs + as "when-undefined" + """, + default_value="when-undefined"), + ConfigOption("skip_vm_comments", + bool, + description="do not set notes to the UUID or name of the VM", + default_value=False), + ConfigOption("skip_vm_platform", + bool, + description="do not sync flavors from a VM in Openstack to the comments field on a VM in netbox", + default_value=False), + ConfigOption("strip_host_domain_name", + bool, + description="strip domain part from host name before syncing device to NetBox", + default_value=False), + ConfigOption("strip_vm_domain_name", + bool, + description="strip domain part from VM name before syncing VM to NetBox", + default_value=False), + ConfigOptionGroup(title="custom object attributes", + description="""\ + add arbitrary host/vm object attributes as custom fields to NetBox. + multiple attributes can be defined comma separated. + to get a list of available attributes use '-l DEBUG3' as cli param (CAREFUL: output might be long) + and here 'https://gist.github.com/bb-Ricardo/538768487bdac4efafabe56e005cb4ef' can be seen how to + access these attributes + """, + options=[ + ConfigOption("host_custom_object_attributes", + str, + config_example="uuid"), + ConfigOption("vm_custom_object_attributes", + str, + config_example="uuid") + ]), + ConfigOption("set_source_name_as_cluster_group", + bool, + description="""this will set the sources name as cluster group name instead of the datacenter. + This works if the oVirt CP has ONLY ONE datacenter configured. + Otherwise it will rename all datacenters to the source name!""", + default_value=False), + ConfigOption("set_vm_name_to_uuid", + bool, + description="Set the name in Netbox to the VM UUID instead of name", + default_value=False), + + # removed settings + ConfigOption("netbox_host_device_role", + str, + deprecation_message="You need to switch to 'host_role_relation'.", + removed=True), + ConfigOption("netbox_vm_device_role", + str, + deprecation_message="You need to switch to 'vm_role_relation'.", + removed=True), + ConfigOption("sync_tags", + bool, + deprecation_message="You need to switch to 'host_tag_source', " + + "'vm_tag_source' or 'cluster_tag_source'", + removed=True), + ConfigOption("sync_parent_tags", + bool, + deprecation_message="You need to switch to 'host_tag_source', " + + "'vm_tag_source' or 'cluster_tag_source'", + removed=True) + ] + + super().__init__() + + def validate_options(self): + + for option in self.options: + + if option.value is None: + continue + + if "filter" in option.key: + + re_compiled = None + try: + re_compiled = re.compile(option.value) + except Exception as e: + log.error(f"Problem parsing regular expression for '{self.source_name}.{option.key}': {e}") + self.set_validation_failed() + + option.set_value(re_compiled) + + continue + + if "relation" in option.key: + + relation_data = list() + + relation_type = option.key.split("_")[1] + + for relation in quoted_split(option.value): + + object_name = relation.split("=")[0].strip(' "') + relation_name = relation.split("=")[1].strip(' "') + + if len(object_name) == 0 or len(relation_name) == 0: + log.error(f"Config option '{relation}' malformed got '{object_name}' for " + f"object name and '{relation_name}' for {relation_type} name.") + self.set_validation_failed() + continue + + try: + re_compiled = re.compile(object_name) + except Exception as e: + log.error(f"Problem parsing regular expression '{object_name}' for '{relation}': {e}") + self.set_validation_failed() + continue + + relation_data.append({ + "object_regex": re_compiled, + "assigned_name": relation_name + }) + + option.set_value(relation_data) + + continue + + if "custom_object_attributes" in option.key: + + option.set_value(quoted_split(option.value)) + + continue + + if option.key == "set_primary_ip": + if option.value not in ["always", "when-undefined", "never"]: + log.error(f"Primary IP option '{option.key}' value '{option.value}' invalid.") + self.set_validation_failed() + + if option.key == "custom_dns_servers": + + dns_name_lookup = self.get_option_by_name("dns_name_lookup") + + if not isinstance(dns_name_lookup, ConfigOption) or dns_name_lookup.value is False: + continue + + custom_dns_servers = quoted_split(option.value) + + tested_custom_dns_servers = list() + for custom_dns_server in custom_dns_servers: + try: + tested_custom_dns_servers.append(str(ip_address(custom_dns_server))) + except ValueError: + log.error(f"Config option 'custom_dns_servers' value '{custom_dns_server}' " + f"does not appear to be an IP address.") + self.set_validation_failed() + + option.set_value(tested_custom_dns_servers) + + continue + + permitted_subnets_option = self.get_option_by_name("permitted_subnets") + + if permitted_subnets_option is not None: + permitted_subnets = PermittedSubnets(permitted_subnets_option.value) + if permitted_subnets.validation_failed is True: + self.set_validation_failed() + + permitted_subnets_option.set_value(permitted_subnets) diff --git a/module/sources/ovirt/connection.py b/module/sources/ovirt/connection.py new file mode 100644 index 0000000..f1a584d --- /dev/null +++ b/module/sources/ovirt/connection.py @@ -0,0 +1,1285 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020 - 2021 Ricardo Bartels. All rights reserved. +# +# netbox-sync.py +# +# This work is licensed under the terms of the MIT license. +# For a copy, see file LICENSE.txt included in this +# repository or visit: . + +import pprint +from ipaddress import ip_address, ip_interface + +from packaging import version +import ovirtsdk4 as sdk +import ovirtsdk4.types as types + +from module.sources.common.source_base import SourceBase +from module.sources.ovirt.config import OVirtConfig +from module.common.logging import get_logger, DEBUG3 +from module.common.misc import grab, dump, get_string_or_none +from module.common.support import normalize_mac_address +from module.netbox.inventory import NetBoxInventory +from module.netbox import * + +log = get_logger() + + +# noinspection PyTypeChecker +class OVirtHandler(SourceBase): + """ + Source class to import data from a oVirt instance and add/update NetBox objects based on gathered information + """ + + dependent_netbox_objects = [ + NBTag, + NBManufacturer, + NBDeviceType, + NBPlatform, + NBClusterType, + NBClusterGroup, + NBDeviceRole, + NBSite, + NBCluster, + NBDevice, + NBVM, + NBVMInterface, + NBInterface, + NBIPAddress, + NBPrefix, + NBTenant, + NBVRF, + NBVLAN, + NBCustomField + ] + + source_type = "ovirt" + + # internal vars + session = None + tag_session = None + + site_name = None + + def __init__(self, name=None): + + if name is None: + raise ValueError(f"Invalid value for attribute 'name': '{name}'.") + + self.inventory = NetBoxInventory() + self.name = name + + # parse settings + settings_handler = OVirtConfig() + settings_handler.source_name = self.name + self.settings = settings_handler.parse() + + self.set_source_tag() + self.site_name = f"oVirt: {name}" + + if self.settings.enabled is False: + log.info(f"Source '{name}' is currently disabled. Skipping") + return + + self.create_ovirt_session() + + if self.session is None: + log.info(f"Source '{name}' is currently unavailable. Skipping") + return + + self.init_successful = True + self.permitted_clusters = dict() + self.processed_host_names = dict() + self.processed_vm_names = dict() + self.processed_vm_uuid = list() + self.object_cache = dict() + self.parsing_vms_the_first_time = True + self.disk_cache = dict() + + def create_ovirt_session(self): + """ + Initialize session with oVirt + + Returns + ------- + bool: if initialization was successful or not + """ + + if self.session is not None: + return True + + log.debug(f"Starting oVirt connection to '{self.settings.url}'") + + try: + self.session = sdk.Connection( + url=self.settings.url, + username=self.settings.username, + password=self.settings.password, + ca_file=self.settings.ca_file, + insecure=(not self.settings.validate_tls_certs), + debug=False, + log=log, + ) + except Exception as e: + log.error(f"Unable to connect to oVirt instance '{self.settings.url}'. {e.msg}") + return False + + log.info(f"Successfully connected to oVirt '{self.settings.url}'") + + return True + + def apply(self): + """ + Main source handler method. This method is called for each source from "main" program + to retrieve data from it source and apply it to the NetBox inventory. + + Every update of new/existing objects fot this source has to happen here. + """ + + log.info(f"Query data from oVirt: '{self.settings.url}'") + + """ + Mapping of object type keywords to view types and handlers + + iterate over all VMs twice. + + To handle VMs with the same name in a cluster we first + iterate over all VMs and look only at the active ones + and sync these first. + Then we iterate a second time to catch the rest. + + This has been implemented to support migration scenarios + where you create the same machines with a different setup + like a new version or something. This way NetBox will be + updated primarily with the actual active VM data. + """ + + system_service = self.session.system_service() + + disks = system_service.disks_service().list() + for disk in disks: + self.disk_cache[disk.id] = disk + + datacenters = system_service.data_centers_service().list() + for datacenter in datacenters: + self.add_datacenter(datacenter) + + clusters = system_service.clusters_service().list() + for cluster in clusters: + self.add_cluster(cluster) + + hosts = system_service.hosts_service().list(follow="nics") + for host in hosts: + self.add_host(host) + + vms = system_service.vms_service().list(follow="diskattachments,reporteddevices") + for vm in vms: + self.add_virtual_machine(vm) + + self.update_basic_data() + + @staticmethod + def passes_filter(name, include_filter, exclude_filter): + """ + checks if object name passes a defined object filter. + + Parameters + ---------- + name: str + name of the object to check + include_filter: regex object + regex object of include filter + exclude_filter: regex object + regex object of exclude filter + + Returns + ------- + bool: True if all filter passed, otherwise False + """ + + # first includes + if include_filter is not None and not include_filter.match(name): + log.debug(f"Object '{name}' did not match include filter '{include_filter.pattern}'. Skipping") + return False + + # second excludes + if exclude_filter is not None and exclude_filter.match(name): + log.debug(f"Object '{name}' matched exclude filter '{exclude_filter.pattern}'. Skipping") + return False + + return True + + def get_site_name(self, object_type, object_name, cluster_name=""): + """ + Return a site name for a NBCluster or NBDevice depending on config options + host_site_relation and cluster_site_relation + + Parameters + ---------- + object_type: (NBCluster, NBDevice) + object type to check site relation for + object_name: str + object name to check site relation for + cluster_name: str + cluster name of NBDevice to check for site name + + Returns + ------- + str: site name if a relation was found + """ + + if object_type not in [NBCluster, NBDevice]: + raise ValueError(f"Object must be a '{NBCluster.name}' or '{NBDevice.name}'.") + + log.debug2(f"Trying to find site name for {object_type.name} '{object_name}'") + + # check if site was provided in config + relation_name = "host_site_relation" if object_type == NBDevice else "cluster_site_relation" + + site_name = self.get_object_relation(object_name, relation_name) + + if object_type == NBDevice and site_name is None: + site_name = self.get_site_name(NBCluster, cluster_name) + if site_name is not None: + log.debug2(f"Found a matching cluster site for {object_name}, using site '{site_name}'") + + # set default site name + if site_name is None: + site_name = self.site_name + log.debug(f"No site relation for '{object_name}' found, using default site '{site_name}'") + + return site_name + + def get_object_based_on_macs(self, object_type, mac_list=None): + """ + Try to find a NetBox object based on list of MAC addresses. + + Iterate over all interfaces of this object type and compare MAC address with list of desired MAC + addresses. If match was found store related machine object and count every correct match. + + If exactly one machine with matching interfaces was found then this one will be returned. + + If two or more machines with matching MACs are found compare the two machines with + the highest amount of matching interfaces. If the ration of matching interfaces + exceeds 2.0 then the top matching machine is chosen as desired object. + + If the ration is below 2.0 then None will be returned. The probability is too low that + this one is the correct one. + + None will also be returned if no machine was found at all. + + Parameters + ---------- + object_type: (NBDevice, NBVM) + type of NetBox device to find in inventory + mac_list: list + list of MAC addresses to compare against NetBox interface objects + + Returns + ------- + (NBDevice, NBVM, None): object instance of found device, otherwise None + """ + + object_to_return = None + + if object_type not in [NBDevice, NBVM]: + raise ValueError(f"Object must be a '{NBVM.name}' or '{NBDevice.name}'.") + + if mac_list is None or not isinstance(mac_list, list) or len(mac_list) == 0: + return + + interface_typ = NBInterface if object_type == NBDevice else NBVMInterface + + objects_with_matching_macs = dict() + matching_object = None + + for interface in self.inventory.get_all_items(interface_typ): + + if grab(interface, "data.mac_address") in mac_list: + + matching_object = grab(interface, f"data.{interface.secondary_key}") + if not isinstance(matching_object, (NBDevice, NBVM)): + continue + + log.debug2("Found matching MAC '%s' on %s '%s'" % + (grab(interface, "data.mac_address"), object_type.name, + matching_object.get_display_name(including_second_key=True))) + + if objects_with_matching_macs.get(matching_object) is None: + objects_with_matching_macs[matching_object] = 1 + else: + objects_with_matching_macs[matching_object] += 1 + + # try to find object based on amount of matching MAC addresses + num_devices_witch_matching_macs = len(objects_with_matching_macs.keys()) + + if num_devices_witch_matching_macs == 1 and isinstance(matching_object, (NBDevice, NBVM)): + + log.debug2("Found one %s '%s' based on MAC addresses and using it" % + (object_type.name, matching_object.get_display_name(including_second_key=True))) + + object_to_return = list(objects_with_matching_macs.keys())[0] + + elif num_devices_witch_matching_macs > 1: + + log.debug2(f"Found {num_devices_witch_matching_macs} {object_type.name}s with matching MAC addresses") + + # now select the two top matches + first_choice, second_choice = \ + sorted(objects_with_matching_macs, key=objects_with_matching_macs.get, reverse=True)[0:2] + + first_choice_matches = objects_with_matching_macs.get(first_choice) + second_choice_matches = objects_with_matching_macs.get(second_choice) + + log.debug2(f"The top candidate {first_choice.get_display_name()} with {first_choice_matches} matches") + log.debug2(f"The second candidate {second_choice.get_display_name()} with {second_choice_matches} matches") + + # get ratio between + matching_ration = first_choice_matches / second_choice_matches + + # only pick the first one if the ration exceeds 2 + if matching_ration >= 2.0: + log.debug2(f"The matching ratio of {matching_ration} is high enough " + f"to select {first_choice.get_display_name()} as desired {object_type.name}") + object_to_return = first_choice + else: + log.debug2("Both candidates have a similar amount of " + "matching interface MAC addresses. Using NONE of them!") + + return object_to_return + + def get_object_based_on_primary_ip(self, object_type, primary_ip4=None, primary_ip6=None): + """ + Try to find a NBDevice or NBVM based on the primary IP address. If an exact + match was found the device/vm object will be returned immediately without + checking of the other primary IP address (if defined). + + Parameters + ---------- + object_type: (NBDevice, NBVM) + object type to look for + primary_ip4: str + primary IPv4 address of object to find + primary_ip6: str + primary IPv6 address of object to find + + Returns + ------- + + """ + + def _matches_device_primary_ip(device_primary_ip, ip_needle): + + ip = None + if device_primary_ip is not None and ip_needle is not None: + if isinstance(device_primary_ip, dict): + ip = grab(device_primary_ip, "address") + + elif isinstance(device_primary_ip, int): + ip = self.inventory.get_by_id(NBIPAddress, nb_id=device_primary_ip) + ip = grab(ip, "data.address") + + if ip is not None and ip.split("/")[0] == ip_needle: + return True + + return False + + if object_type not in [NBDevice, NBVM]: + raise ValueError(f"Object must be a '{NBVM.name}' or '{NBDevice.name}'.") + + if primary_ip4 is None and primary_ip6 is None: + return + + if primary_ip4 is not None: + primary_ip4 = str(primary_ip4).split("/")[0] + + if primary_ip6 is not None: + primary_ip6 = str(primary_ip6).split("/")[0] + + for device in self.inventory.get_all_items(object_type): + + if _matches_device_primary_ip(grab(device, "data.primary_ip4"), primary_ip4) is True: + log.debug2(f"Found existing host '{device.get_display_name()}' " + f"based on the primary IPv4 '{primary_ip4}'") + return device + + if _matches_device_primary_ip(grab(device, "data.primary_ip6"), primary_ip6) is True: + log.debug2(f"Found existing host '{device.get_display_name()}' " + f"based on the primary IPv6 '{primary_ip6}'") + return device + + def get_object_relation(self, name, relation, fallback=None): + """ + + Parameters + ---------- + name: str + name of the object to find a relation for + relation: str + name of the config variable relation (i.e: vm_tag_relation) + fallback: str + fallback string if no relation matched + + Returns + ------- + data: str, list, None + string of matching relation or list of matching tags + """ + + resolved_list = list() + for single_relation in grab(self.settings, relation, fallback=list()): + object_regex = single_relation.get("object_regex") + match_found = False + if object_regex.match(name): + resolved_name = single_relation.get("assigned_name") + log.debug2(f"Found a matching {relation} '{resolved_name}' ({object_regex.pattern}) for {name}") + resolved_list.append(resolved_name) + match_found = True + + # special cluster condition + if match_found is False and grab(f"{relation}".split("_"), "0") == "cluster": + + stripped_name = "/".join(name.split("/")[1:]) + if object_regex.match(stripped_name): + + resolved_name = single_relation.get("assigned_name") + log.debug2(f"Found a matching {relation} '{resolved_name}' ({object_regex.pattern}) " + f"for {stripped_name}") + resolved_list.append(resolved_name) + + if grab(f"{relation}".split("_"), "1") == "tag": + return resolved_list + + else: + resolved_name = fallback + if len(resolved_list) >= 1: + resolved_name = resolved_list[0] + if len(resolved_list) > 1: + log.debug(f"Found {len(resolved_list)} matches for {name} in {relation}." + f" Using first on: {resolved_name}") + + return resolved_name + + def add_device_vm_to_inventory(self, object_type, object_data, pnic_data=None, vnic_data=None, + nic_ips=None, p_ipv4=None, p_ipv6=None, ovirt_object=None): + """ + Add/update device/VM object in inventory based on gathered data. + + Try to find object first based on the object data, interface MAC addresses and primary IPs. + 1. try to find by name and cluster/site + 2. try to find by mac addresses interfaces + 3. try to find by primary IP + + IP addresses for each interface are added here as well. First they will be checked and added + if all checks pass. For each IP address a matching IP prefix will be searched for. First we + look for longest matching IP Prefix in the same site. If this failed we try to find the longest + matching global IP Prefix. + + If a IP Prefix was found then we try to get the VRF and VLAN for this prefix. Now we compare + if interface VLAN and prefix VLAN match up and warn if they don't. Then we try to add data to + the IP address if not already set: + + add prefix VRF if VRF for this IP is undefined + add tenant if tenant for this IP is undefined + 1. try prefix tenant + 2. if prefix tenant is undefined try VLAN tenant + + And we also set primary IP4/6 for this object depending on the "set_primary_ip" setting. + + If a IP address is set as primary IP for another device then using this IP on another + device will be rejected by NetBox. + + Setting "always": + check all NBDevice and NBVM objects if this IP address is set as primary IP to any + other object then this one. If we found another object, then we unset the primary_ip* + for the found object and assign it to this object. + + This setting will also reset the primary IP if it has been changed in NetBox + + Setting "when-undefined": + Will set the primary IP for this object if primary_ip4/6 is undefined. Will cause a + NetBox error if IP has been assigned to a different object as well + + Setting "never": + Well, the attribute primary_ip4/6 will never be touched/changed. + + Parameters + ---------- + object_type: (NBDevice, NBVM) + NetBoxObject sub class of object to add + object_data: dict + data of object to add/update + pnic_data: dict + data of physical interfaces of this object, interface name as key + vnic_data: dict + data of virtual interfaces of this object, interface name as key + nic_ips: dict + dict of ips per interface of this object, interface name as key + p_ipv4: str + primary IPv4 as string including netmask/prefix + p_ipv6: str + primary IPv6 as string including netmask/prefix + + """ + + if object_type not in [NBDevice, NBVM]: + raise ValueError(f"Object must be a '{NBVM.name}' or '{NBDevice.name}'.") + + if log.level == DEBUG3: + + log.debug3("function: add_device_vm_to_inventory") + log.debug3(f"Object type {object_type}") + pprint.pprint(object_data) + pprint.pprint(pnic_data) + pprint.pprint(vnic_data) + pprint.pprint(nic_ips) + pprint.pprint(p_ipv4) + pprint.pprint(p_ipv6) + + # check existing Devices for matches + log.debug2(f"Trying to find a {object_type.name} based on the collected name, cluster, IP and MAC addresses") + + device_vm_object = self.inventory.get_by_data(object_type, data=object_data) + + if device_vm_object is not None: + log.debug2("Found a exact matching %s object: %s" % + (object_type.name, device_vm_object.get_display_name(including_second_key=True))) + + # keep searching if no exact match was found + else: + + log.debug2(f"No exact match found. Trying to find {object_type.name} based on MAC addresses") + + # on VMs vnic data is used, on physical devices pnic data is used + mac_source_data = vnic_data if object_type == NBVM else pnic_data + + nic_macs = [x.get("mac_address") for x in mac_source_data.values() if x.get("mac_address") is not None] + + device_vm_object = self.get_object_based_on_macs(object_type, nic_macs) + + if device_vm_object is not None: + log.debug2("Found a matching %s object: %s" % + (object_type.name, device_vm_object.get_display_name(including_second_key=True))) + + # keep looking for devices with the same primary IP + else: + + log.debug2(f"No match found. Trying to find {object_type.name} based on primary IP addresses") + + device_vm_object = self.get_object_based_on_primary_ip(object_type, p_ipv4, p_ipv6) + + if device_vm_object is None: + object_name = object_data.get(object_type.primary_key) + log.debug(f"No existing {object_type.name} object for {object_name}. Creating a new {object_type.name}.") + device_vm_object = self.inventory.add_object(object_type, data=object_data, source=self) + else: + device_vm_object.update(data=object_data, source=self) + + # add object to cache + self.add_object_to_cache(ovirt_object, device_vm_object) + + # update role according to config settings + object_name = object_data.get(object_type.primary_key) + role_name = self.get_object_relation(object_name, + "host_role_relation" if object_type == NBDevice else "vm_role_relation") + + if object_type == NBDevice: + if role_name is None: + role_name = "Server" + device_vm_object.update(data={"device_role": {"name": role_name}}) + if object_type == NBVM and role_name is not None: + device_vm_object.update(data={"role": {"name": role_name}}) + + # compile all nic data into one dictionary + if object_type == NBVM: + nic_data = vnic_data + else: + nic_data = {**pnic_data, **vnic_data} + + # map interfaces of existing object with discovered interfaces + nic_object_dict = self.map_object_interfaces_to_current_interfaces(device_vm_object, nic_data) + + if object_data.get("status", "") == "active" and (nic_ips is None or len(nic_ips.keys()) == 0): + log.debug(f"No IP addresses for '{object_name}' found!") + + primary_ipv4_object = None + primary_ipv6_object = None + + if p_ipv4 is not None: + try: + primary_ipv4_object = ip_interface(p_ipv4) + except ValueError: + log.error(f"Primary IPv4 ({p_ipv4}) does not appear to be a valid IP address (needs included suffix).") + + if p_ipv6 is not None: + try: + primary_ipv6_object = ip_interface(p_ipv6) + except ValueError: + log.error(f"Primary IPv6 ({p_ipv6}) does not appear to be a valid IP address (needs included suffix).") + + for int_name, int_data in nic_data.items(): + + # add/update interface with retrieved data + nic_object, ip_address_objects = self.add_update_interface(nic_object_dict.get(int_name), device_vm_object, + int_data, nic_ips.get(int_name, list())) + + # add all interface IPs + for ip_object in ip_address_objects: + + if ip_object is None: + continue + + ip_interface_object = ip_interface(grab(ip_object, "data.address")) + + # continue if address is not a primary IP + if ip_interface_object not in [primary_ipv4_object, primary_ipv6_object]: + continue + + # set/update/remove primary IP addresses + set_this_primary_ip = False + ip_version = ip_interface_object.ip.version + if self.settings.set_primary_ip == "always": + + for object_type in [NBDevice, NBVM]: + + # new IPs don't need to be removed from other devices/VMs + if ip_object.is_new is True: + break + + for devices_vms in self.inventory.get_all_items(object_type): + + # device has no primary IP of this version + this_primary_ip = grab(devices_vms, f"data.primary_ip{ip_version}") + + # we found this exact object + if devices_vms == device_vm_object: + continue + + # device has the same object assigned + if this_primary_ip == ip_object: + devices_vms.unset_attribute(f"primary_ip{ip_version}") + + set_this_primary_ip = True + + elif self.settings.set_primary_ip != "never" and \ + grab(device_vm_object, f"data.primary_ip{ip_version}") is None: + set_this_primary_ip = True + + if set_this_primary_ip is True: + + log.debug(f"Setting IP '{grab(ip_object, 'data.address')}' as primary IPv{ip_version} for " + f"'{device_vm_object.get_display_name()}'") + device_vm_object.update(data={f"primary_ip{ip_version}": ip_object}) + + return + + def add_object_to_cache(self, vm_object, netbox_object): + + if None in [vm_object, netbox_object]: + return + + # noinspection PyBroadException + try: + vm_class_name = vm_object.__class__.__name__ + # noinspection PyProtectedMember + vm_object_id = vm_object.id + except Exception: + return + + if self.object_cache.get(vm_class_name) is None: + self.object_cache[vm_class_name] = dict() + + self.object_cache[vm_class_name][vm_object_id] = netbox_object + + def get_object_from_cache(self, vm_object): + + if vm_object is None: + return + + # noinspection PyBroadException + try: + vm_class_name = vm_object.__class__.__name__ + # noinspection PyProtectedMember + vm_object_id = vm_object.id + except Exception: + return + + if self.object_cache.get(vm_class_name) is None: + return + + return self.object_cache[vm_class_name].get(vm_object_id) + + def add_datacenter(self, obj): + """ + Add a oVirt datacenter as a NBClusterGroup to NetBox + + Parameters + ---------- + obj: types.DataCenter + datacenter object + + """ + if self.settings.set_source_name_as_cluster_group is True: + name = self.name + else: + name = get_string_or_none(grab(obj, "name")) + + if name is None: + return + + log.debug(f"Parsing oVirt datacenter: {name}") + + object_data = {"name": name} + + if self.settings.set_source_name_as_cluster_group is True: + label = "Datacenter Name" + custom_field = self.add_update_custom_field({ + "name": f"ovirt_{label}", + "label": label, + "content_types": ["virtualization.clustergroup"], + "type": "text", + "description": f"oVirt '{self.name}' synced custom attribute '{label}'" + }) + + object_data["custom_fields"] = { + grab(custom_field, "data.name"): get_string_or_none(grab(obj, "name")) + } + + self.add_object_to_cache(obj, self.inventory.add_update_object(NBClusterGroup, data=object_data, source=self)) + + def add_cluster(self, obj): + """ + Add a oVirt Cluster as a NBCluster to NetBox. Cluster name is checked against + cluster_include_filter and cluster_exclude_filter config setting. Also adds + cluster and site_name to "self.permitted_clusters" so hosts and VMs can be + checked if they are part of a permitted cluster. + + Parameters + ---------- + obj: types.Cluster + cluster to add + """ + + name = get_string_or_none(grab(obj, "name")) + if self.settings.set_source_name_as_cluster_group is True: + group = self.inventory.get_by_data(NBClusterGroup, data={"name": self.name}) + else: + group = self.get_object_from_cache(obj.data_center) + + if name is None or group is None: + return + + group_name = grab(group, "data.name") + full_cluster_name = f"{group_name}/{name}" + + log.debug(f"Parsing oVirt Cluster: {full_cluster_name}") + + # check for full name and then for cluster name only + if self.passes_filter(full_cluster_name, + self.settings.cluster_include_filter, + self.settings.cluster_exclude_filter) is False \ + or self.passes_filter(name, + self.settings.cluster_include_filter, + self.settings.cluster_exclude_filter) is False: + return + + site_name = self.get_site_name(NBCluster, full_cluster_name) + + data = { + "name": name, + "type": {"name": "oVirt"}, + "group": group, + "site": {"name": site_name} + } + + tenant_name = self.get_object_relation(full_cluster_name, "cluster_tenant_relation") + if tenant_name is not None: + data["tenant"] = {"name": tenant_name} + + cluster_tags = self.get_object_relation(full_cluster_name, "cluster_tag_relation") + if len(cluster_tags) > 0: + data["tags"] = cluster_tags + + # try to find cluster including cluster group + log.debug2("Trying to find a matching existing cluster") + cluster_object = None + fallback_cluster_object = None + for cluster_candidate in self.inventory.get_all_items(NBCluster): + if grab(cluster_candidate, "data.name") != name: + continue + + # try to find a cluster with matching site + if cluster_candidate.get_site_name() == site_name: + cluster_object = cluster_candidate + log.debug2("Found an existing cluster where 'name' and 'site' are matching") + break + + if grab(cluster_candidate, "data.group") is not None and \ + grab(cluster_candidate, "data.group.data.name") == group_name: + cluster_object = cluster_candidate + log.debug2("Found an existing cluster where 'name' and 'cluster group' are matching") + break + + if grab(cluster_candidate, "data.tenant") is not None and \ + tenant_name is not None and \ + grab(cluster_candidate, "data.tenant.data.name") == tenant_name: + cluster_object = cluster_candidate + log.debug2("Found an existing cluster where 'name' and 'tenant' are matching") + break + + # if only the name matches and there are multiple cluster with the same name we choose the first + # cluster returned from netbox. This needs to be done to not ignore possible matches in one of + # the next iterations + if fallback_cluster_object is None: + fallback_cluster_object = cluster_candidate + + if cluster_object is None and fallback_cluster_object is not None: + log.debug2(f"Found an existing cluster where 'name' " + f"matches (NetBox id: {fallback_cluster_object.get_nb_reference()})") + cluster_object = fallback_cluster_object + + if cluster_object is not None: + cluster_object.update(data=data, source=self) + else: + cluster_object = self.inventory.add_update_object(NBCluster, data=data, source=self) + + self.add_object_to_cache(obj, cluster_object) + + def add_host(self, obj): + """ + Parse a oVirt host to NetBox once all data is gathered. + + First host is filtered: + host has a cluster and is it permitted + was host with same name and site already parsed + does the host pass the host_include_filter and host_exclude_filter + + Then all necessary host data will be collected. + + Primary IPv4/6 will be determined by 'address' value + Other IP's are listed in the 'network attachments' + + Parameters + ---------- + obj: types.Host + host object to parse + """ + + name = get_string_or_none(grab(obj, "name")) + + if name is not None and self.settings.strip_host_domain_name is True: + name = name.split(".")[0] + + # parse data + log.debug(f"Parsing oVirt host: {name}") + + # + # Filtering + # + + # manage site and cluster + cluster_object = obj.cluster + + if cluster_object is None: + log.error(f"Requesting cluster for host '{name}' failed. Skipping.") + return + + if log.level == DEBUG3: + try: + log.info("Cluster data") + dump(cluster_object) + except Exception as e: + log.error(e) + + # get cluster object + nb_cluster_object = self.get_object_from_cache(cluster_object) + + if nb_cluster_object is None: + log.debug(f"Host '{name}' is not part of a permitted cluster. Skipping") + return + + cluster_name = get_string_or_none(grab(nb_cluster_object, "data.name")) + + # get a site for this host + if self.settings.set_source_name_as_cluster_group is True: + group = self.inventory.get_by_data(NBClusterGroup, data={"name": self.name}) + else: + group = self.get_object_from_cache(obj.cluster.data_center) + group_name = grab(group, "data.name") + site_name = self.get_site_name(NBDevice, name, f"{group_name}/{cluster_name}") + + if name in self.processed_host_names.get(site_name, list()) and obj not in self.objects_to_reevaluate: + log.warning(f"Host '{name}' for site '{site_name}' already parsed. " + "Make sure to use unique host names. Skipping") + return + + # add host to processed list + if self.processed_host_names.get(site_name) is None: + self.processed_host_names[site_name] = list() + + self.processed_host_names[site_name].append(name) + + # filter hosts by name + if self.passes_filter(name, self.settings.host_include_filter, self.settings.host_exclude_filter) is False: + return + + # + # Collecting data + # + + # collect all necessary data + manufacturer = get_string_or_none(obj.hardware_information.manufacturer) + model = get_string_or_none(obj.hardware_information.product_name) + platform = get_string_or_none(obj.version.full_version) + + # if the device vendor/model cannot be retrieved (due to problem on the host), + # set a dummy value so the host still gets synced + if manufacturer is None: + manufacturer = "Generic Vendor" + if model is None: + model = "Generic Model" + + # get status + status = "offline" + if obj.status == types.HostStatus.UP: + status = "active" + + # try to find serial + serial = get_string_or_none(obj.hardware_information.uuid) + + # add asset tag if desired and present + asset_tag = get_string_or_none(obj.hardware_information.serial_number) + + # get host_tenant_relation + tenant_name = self.get_object_relation(name, "host_tenant_relation") + + # get host_tag_relation + host_tags = self.get_object_relation(name, "host_tag_relation") + + # prepare host data model + host_data = { + "name": name, + "device_type": { + "model": model, + "manufacturer": { + "name": manufacturer + } + }, + "site": {"name": site_name}, + "cluster": nb_cluster_object, + "status": status + } + + # add data if present + if serial is not None: + host_data["serial"] = serial + if asset_tag is not None: + host_data["asset_tag"] = asset_tag + if platform is not None: + host_data["platform"] = {"name": platform} + if tenant_name is not None: + host_data["tenant"] = {"name": tenant_name} + if len(host_tags) > 0: + host_data["tags"] = host_tags + + # Get all NICs information + pnic_data_dict = dict() + vnic_data_dict = dict() + vnic_ips = dict() + + host_primary_ip4 = None + host_primary_ip6 = None + + for nic in obj.nics: + nic_type = "other" + if nic.speed: + nic_type = NetBoxInterfaceType(nic.speed/1000).get_this_netbox_type() + + mac_address = None + if nic.mac: + mac_address = nic.mac.address + if nic.base_interface is None and nic.bonding is None: + # Physical Interface + pnic_data = { + "name": nic.name, + "device": None, + "enabled": (nic.status == types.NicStatus.UP), + "type": nic_type, + "mtu": nic.mtu, + "mac_address": mac_address + } + pnic_data_dict[nic.name] = pnic_data + elif nic.bonding is not None: + # Bond + vnic_data = { + "name": nic.name, + "device": None, + "enabled": (nic.status == types.NicStatus.UP), + "type": "virtual", + "mtu": nic.mtu, + "mac_address": mac_address + } + vnic_data_dict[nic.name] = vnic_data + else: + # Bridge + vnic_data = { + "name": nic.name, + "device": None, + "enabled": (nic.status == types.NicStatus.UP), + "type": "virtual", + "mtu": nic.mtu, + "mac_address": mac_address, + "tagged_vlans": [{ + "name": f"VLAN-{nic.vlan.id}", + "vid": nic.vlan.id, + "site": { + "name": site_name + }}] + } + vnic_data_dict[nic.name] = vnic_data + # IP Info + if nic.ip is not None: + int_v4 = "{}/{}".format(nic.ip.address, nic.ip.netmask) + vnic_name = nic.name + if vnic_ips.get(vnic_name) is None: + vnic_ips[vnic_name] = list() + + if self.settings.permitted_subnets.permitted(int_v4, interface_name=vnic_name) is True: + vnic_ips[vnic_name].append(int_v4) + + if host_primary_ip4 is None and nic.ip.address == obj.address: + host_primary_ip4 = int_v4 + + if nic.ipv6 is not None: + int_v6 = "{}/{}".format(nic.ipv6.address, nic.ipv6.netmask) + vnic_name = nic.name + if vnic_ips.get(vnic_name) is None: + vnic_ips[vnic_name] = list() + + if self.settings.permitted_subnets.permitted(int_v6, interface_name=vnic_name) is True: + vnic_ips[vnic_name].append(int_v6) + + # add host to inventory + self.add_device_vm_to_inventory(NBDevice, object_data=host_data, pnic_data=pnic_data_dict, + vnic_data=vnic_data_dict, nic_ips=vnic_ips, + p_ipv4=host_primary_ip4, p_ipv6=host_primary_ip6, ovirt_object=obj) + + return + + def add_virtual_machine(self, obj): + """ + Parse a oVirt VM add to NetBox once all data is gathered. + + Parameters + ---------- + obj: types.Vm + virtual machine object to parse + """ + + name = obj.name + + if name is not None and self.settings.strip_vm_domain_name is True: + name = name.split(".")[0] + + # + # Filtering + # + + # get VM UUID + vm_uuid = obj.id + + if self.settings.set_vm_name_to_uuid: + display_name = name + name = vm_uuid + + log.debug(f"Parsing oVirt VM: {name}") + + # get VM power state + status = "active" if obj.status == types.VmStatus.UP else "offline" + + # ignore offline VMs during first run + if self.parsing_vms_the_first_time is True and status == "offline": + log.debug2(f"Ignoring {status} VM '{name}' on first run") + return + + # add to processed VMs + self.processed_vm_uuid.append(vm_uuid) + + parent_host = self.get_object_from_cache(obj.host) + nb_cluster_object = self.get_object_from_cache(obj.cluster) + + if self.settings.set_source_name_as_cluster_group is True: + group = self.inventory.get_by_data(NBClusterGroup, data={"name": self.name}) + else: + group = grab(nb_cluster_object, "data.group") + + if None in [parent_host, nb_cluster_object, group]: + log.error(f"Requesting host or cluster for Virtual Machine '{name}' failed. Skipping.") + return + + # check VM cluster + if nb_cluster_object is None: + log.debug(f"Virtual machine '{name}' is not part of a permitted cluster. Skipping") + return + + cluster_name = grab(nb_cluster_object, "data.name") + cluster_full_name = f"{group.name}/{cluster_name}" + + if name in self.processed_vm_names.get(cluster_full_name, list()) and obj not in self.objects_to_reevaluate: + log.warning(f"Virtual machine '{name}' for cluster '{cluster_full_name}' already parsed. " + "Make sure to use unique VM names. Skipping") + return + + # add vm to processed list + if self.processed_vm_names.get(cluster_full_name) is None: + self.processed_vm_names[cluster_full_name] = list() + + self.processed_vm_names[cluster_full_name].append(name) + + # filter VMs by name + if self.passes_filter(name, self.settings.vm_include_filter, self.settings.vm_exclude_filter) is False: + return + + # + # Collect data + # + + site_name = nb_cluster_object.get_site_name() + # first check against vm_platform_relation + platform = obj.os.type + + if platform is not None: + platform = self.get_object_relation(platform, "vm_platform_relation", fallback=platform) + + disk = 0 + for disk_attachment in obj.disk_attachments: + disk_object = self.disk_cache[disk_attachment.disk.id] + disk += int(disk_object.provisioned_size) + + annotation = None + if self.settings.skip_vm_comments is False: + if not self.settings.set_vm_name_to_uuid: + annotation = obj.id + + # assign vm_tenant_relation + tenant_name = self.get_object_relation(name, "vm_tenant_relation") + + # assign vm_tag_relation + vm_tags = self.get_object_relation(name, "vm_tag_relation") + + vm_data = { + "name": name, + "cluster": nb_cluster_object, + "status": status, + "memory": int(int(obj.memory)/1024/1024), + "vcpus": obj.cpu.topology.cores*obj.cpu.topology.sockets, + "disk": int(disk/1024/1024/1024) + } + + # Add adaption for change in NetBox 3.3.0 VM model + # issue: https://github.com/netbox-community/netbox/issues/10131#issuecomment-1225783758 + if version.parse(self.inventory.netbox_api_version) >= version.parse("3.3.0"): + vm_data["site"] = {"name": site_name} + if platform is not None: + vm_data["platform"] = {"name": platform} + if annotation is not None: + vm_data["comments"] = annotation + if tenant_name is not None: + vm_data["tenant"] = {"name": tenant_name} + if len(vm_tags) > 0: + vm_data["tags"] = vm_tags + + if self.settings.set_vm_name_to_uuid: + custom_field = self.add_update_custom_field({ + "name": "ovirt_vm_name", + "label": "name", + "content_types": "virtualization.virtualmachine", + "type": "text", + "description": f"oVirt '{self.name}' synced object attribute 'name'" + }) + vm_data["custom_fields"] = { + grab(custom_field, "data.name"): get_string_or_none(grab(obj, "name")) + } + + vm_primary_ip4 = None + vm_primary_ip6 = None + vm_nic_dict = dict() + nic_ips = dict() + + for reporteddevice in obj.reported_devices: + if reporteddevice.type is not types.ReportedDeviceType.NETWORK or reporteddevice.ips is None: + continue + + for ip in reporteddevice.ips: + nic = reporteddevice.name + ip_addr = ip.address + + matched_prefix = self.return_longest_matching_prefix_for_ip(ip_address(ip_addr), site_name) + if matched_prefix is None: + matched_prefix = self.return_longest_matching_prefix_for_ip(ip_address(ip_addr)) + if matched_prefix is not None: + this_prefix = grab(matched_prefix, f"data.{NBPrefix.primary_key}") + ip_addr = f"{ip_addr}/{this_prefix.prefixlen}" + + if self.settings.permitted_subnets.permitted(ip_addr, interface_name=nic) is True: + if nic_ips.get(nic) is None: + nic_ips[nic] = list() + nic_ips[nic].append(ip_addr) + if ip.version == types.IpVersion.V4: + vm_primary_ip4 = ip_addr + if ip.version == types.IpVersion.V6: + vm_primary_ip6 = ip_addr + + vm_nic_data = { + "name": nic, + "virtual_machine": None, + "mac_address": normalize_mac_address(reporteddevice.mac.address), + "description": nic, + "enabled": True, + } + vm_nic_dict[nic] = vm_nic_data + else: + log.debug(f"Virtual machine '{name}' address '{ip_addr}' is not valid to add. Skipping") + + # add VM to inventory + self.add_device_vm_to_inventory(NBVM, object_data=vm_data, vnic_data=vm_nic_dict, + nic_ips=nic_ips, p_ipv4=vm_primary_ip4, p_ipv6=vm_primary_ip6) + + return + + def update_basic_data(self): + """ + + Returns + ------- + + """ + + # add source identification tag + self.inventory.add_update_object(NBTag, data={ + "name": self.source_tag, + "description": f"Marks objects synced from oVirt '{self.name}' " + f"({self.settings.url}) to this NetBox Instance." + }) + + # update virtual site if present + this_site_object = self.inventory.get_by_data(NBSite, data={"name": self.site_name}) + + if this_site_object is not None: + this_site_object.update(data={ + "name": self.site_name, + "comments": "A default virtual site created to house objects " + "that have been synced from this oVirt instance " + "and have no predefined site assigned." + }) + + server_role_object = self.inventory.get_by_data(NBDeviceRole, data={"name": "Server"}) + + if server_role_object is not None: + role_data = {"name": "Server", "vm_role": True} + if server_role_object.is_new is True: + role_data["color"] = "9e9e9e" + + server_role_object.update(data=role_data) + +# EOF diff --git a/settings-example.ini b/settings-example.ini index 69bb699..d1697e1 100644 --- a/settings-example.ini +++ b/settings-example.ini @@ -541,4 +541,132 @@ permitted_subnets = 172.16.0.0/12, 10.0.0.0/8, 192.168.0.0/16, fd00::/8 # This is useful as the names are not always unique #set_vm_name_to_uuid = False +[source/my-ovirt-example] + +# Defines if this source is enabled or not +#enabled = true + +# type of source. This defines which source handler to use. +type = ovirt + +# URL to the oVirt API +url = https://engine40.example.com/ovirt-engine/api + +# CA file path to validate oVirt certificate. +ca_file = "ca.pem" + +# Enforces TLS certificate validation. If Openstack API uses a valid TLS certificate then +# this option should be set to 'true' to ensure a secure connection. +#validate_tls_certs = false + +# username and password to use to log into OpenStack +username = ovirtuser +password = supersecret + +# IP networks eligible to be synced to NetBox. +# If an IP address is not part of this networks then it WON'T be synced to NetBox +permitted_subnets = 172.16.0.0/12, 10.0.0.0/8, 192.168.0.0/16, fd00::/8 + +# filters can be used to include/exclude certain objects from importing into NetBox +# Include filters are checked first and exclude filters after. An object name has to +# pass both filters to be synced to NetBox. If a filter is unset it will be ignored. +# Filters are all treated as regex expressions! + +# If a cluster is excluded from sync then ALL VMs and HOSTS inside the cluster will +# be ignored! +#cluster_exclude_filter = +#cluster_include_filter = + +# This will only include/exclude the host, not the VM if Host is part of a multi host +# cluster. +#host_exclude_filter = +#host_include_filter = + +# simply include/exclude VMs +#vm_exclude_filter = +#vm_include_filter = + +# This option defines which Openstack Availability Zones is part of a NetBox site. This is done +# with a comma separated key = value list. +# key: defines the cluster name as regex +# value: defines the NetBox site name (use quotes if name contains commas) +# This is a quite important config setting as IP addresses, prefixes, VLANs and +# VRFs are site dependent. In order to assign the correct prefix to an IP +# address it is important to pick the correct site. +# A VM always depends on the cluster site relation +#cluster_site_relation = Cluster_NYC = New York , Cluster_FFM.* = Frankfurt + +# Same as cluster site but on host level. If unset it will fall back +# to cluster_site_relation. +#host_site_relation = nyc02.* = New York, ffm01.* = Frankfurt + +# This option defines which cluster/host/VM belongs to which tenant. This is done +# with a comma separated key = value list. +# key: defines a hosts/VM name as regex +# value: defines the NetBox tenant name (use quotes if name contains commas) +#cluster_tenant_relation = Cluster_NYC.* = Customer A +#host_tenant_relation = esxi300.* = Infrastructure +#vm_tenant_relation = grafana.* = Infrastructure + +# This option defines custom platforms if the used Flavors are not suitable. +# Pretty much a mapping of Openstack flavor name to your own platform name. +# This is done with a comma separated key = value list. +# key: defines a Openstack returned flavor name +# value: defines the desired NetBox platform name +#vm_platform_relation = centos-7.* = centos7, microsoft-windows-server-2016.* = Windows2016 + +# Define the NetBox device role used for hosts and VMs. The default is set to "Server". This is done +# with a comma separated key = value list. +# key: defines a hosts/VM name as regex +# value: defines the NetBox role name (use quotes if name contains commas) +#host_role_relation = .* = Server +#vm_role_relation = .* = Server + +# Define NetBox tags which are assigned to a cluster, host or VM. This is done +# with a comma separated key = value list. +# key: defines a hosts/VM name as regex +# value: defines the NetBox tag (use quotes if name contains commas) +#cluster_tag_relation = Cluster_NYC.* = Infrastructure +#host_tag_relation = esxi300.* = Infrastructure +#vm_tag_relation = grafana.* = Infrastructure + +# Perform a reverse lookup for all collected IP addresses. If a dns name +# was found it will be added to the IP address object in NetBox +#dns_name_lookup = True + +# use custom DNS server to do the reverse lookups +#custom_dns_servers = 192.168.1.11, 192.168.1.12 + +# define how the primary IPs should be set +# possible values +# +# always: will remove primary IP from the object where this address is +# currently set as primary and moves it to new object +# +# when-undefined: (default) +# only sets primary IP if undefined, will cause ERRORs if same IP is +# assigned more then once to different hosts and IP is set as the +# objects primary IP +# +# never: don't set any primary IPs, will cause the same ERRORs +# as "when-undefined" + +#set_primary_ip = when-undefined + +# Do not sync flavors from a VM in Openstack to the comments field on a VM in netbox +#skip_vm_platform = False + +# Do not sync ID from a VM in Openstack to the comments field on a VM in netbox +#skip_vm_comments = False + +# strip domain part from host name before syncing device to NetBox +#strip_host_domain_name = False + +# strip domain part from VM name before syncing VM to NetBox +#strip_vm_domain_name = False + +# Set name in Netbox to the UUID instead of the name in Openstack +# This is useful as the names are not always unique +#set_vm_name_to_uuid = False + ;EOF From f239d5070f03c194e5a1191b5d77ebb5f8eef09e Mon Sep 17 00:00:00 2001 From: Jean-Louis Dupond Date: Fri, 25 Aug 2023 14:05:08 +0200 Subject: [PATCH 11/12] Use OS version as platform --- module/sources/ovirt/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/module/sources/ovirt/connection.py b/module/sources/ovirt/connection.py index f1a584d..9ecf9f5 100644 --- a/module/sources/ovirt/connection.py +++ b/module/sources/ovirt/connection.py @@ -930,7 +930,7 @@ def add_host(self, obj): # collect all necessary data manufacturer = get_string_or_none(obj.hardware_information.manufacturer) model = get_string_or_none(obj.hardware_information.product_name) - platform = get_string_or_none(obj.version.full_version) + platform = f"{obj.os.type} {obj.os.version.full_version}" # if the device vendor/model cannot be retrieved (due to problem on the host), # set a dummy value so the host still gets synced From 66b491472afdd681bb2eefc461473336120df4a0 Mon Sep 17 00:00:00 2001 From: Jean-Louis Dupond Date: Wed, 30 Aug 2023 14:53:38 +0200 Subject: [PATCH 12/12] Fix missing tenant_name --- module/sources/openstack/connection.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/module/sources/openstack/connection.py b/module/sources/openstack/connection.py index 42aeb45..8398495 100644 --- a/module/sources/openstack/connection.py +++ b/module/sources/openstack/connection.py @@ -757,6 +757,10 @@ def add_cluster(self, obj): "site": {"name": site_name} } + tenant_name = self.get_object_relation(name, "cluster_tenant_relation") + if tenant_name is not None: + data["tenant"] = {"name": tenant_name} + # try to find cluster including cluster group log.debug2("Trying to find a matching existing cluster") cluster_object = None