From b1f34bf702c7f5436073d6ddcbbb12a429f23f7b Mon Sep 17 00:00:00 2001 From: Mustafa Kemal Gilor Date: Fri, 5 Jul 2024 18:55:10 +0300 Subject: [PATCH] hotsos/core: add name aliasing support at the moment, scenarios are using the long import paths in order to reference to a Python property. this feature allows assigning an alias to a Python class or variable in order to make using plugin property interfaces easier. Added aliases to the plugins. Updated existing scenarios to use aliases. Fixes #912 Signed-off-by: Mustafa Kemal Gilor --- hotsos/core/alias.py | 82 +++++++++++++++++++ hotsos/core/host_helpers/filestat.py | 2 + hotsos/core/host_helpers/ssl.py | 2 + hotsos/core/plugins/juju/common.py | 2 + hotsos/core/plugins/juju/resources.py | 2 + hotsos/core/plugins/kernel/common.py | 2 + hotsos/core/plugins/kernel/config.py | 2 + .../core/plugins/kernel/kernlog/calltrace.py | 2 + hotsos/core/plugins/kernel/kernlog/events.py | 2 + hotsos/core/plugins/kernel/memory.py | 6 ++ hotsos/core/plugins/kernel/net.py | 6 ++ hotsos/core/plugins/kernel/sysfs.py | 2 + hotsos/core/plugins/lxd/common.py | 3 +- hotsos/core/plugins/mysql.py | 2 + hotsos/core/plugins/openstack/common.py | 3 + hotsos/core/plugins/openstack/neutron.py | 3 + hotsos/core/plugins/openstack/nova.py | 2 + hotsos/core/plugins/openstack/octavia.py | 15 ++-- hotsos/core/plugins/openvswitch/ovs.py | 5 ++ hotsos/core/plugins/pacemaker.py | 2 + hotsos/core/plugins/rabbitmq/report.py | 2 + hotsos/core/plugins/sosreport.py | 2 + hotsos/core/plugins/storage/bcache.py | 4 + hotsos/core/plugins/storage/ceph.py | 6 ++ hotsos/core/plugins/system/system.py | 3 + .../core/ycheck/engine/properties/common.py | 26 ++++++ .../defs/scenarios/juju/bugs/lp1910958.yaml | 2 +- .../scenarios/juju/jujud_machine_checks.yaml | 2 +- .../defs/scenarios/kernel/amd_iommu_pt.yaml | 8 +- .../scenarios/kernel/kernlog_calltrace.yaml | 12 +-- hotsos/defs/scenarios/kernel/memory.yaml | 26 +++--- .../defs/scenarios/kernel/network/misc.yaml | 2 +- .../scenarios/kernel/network/netlink.yaml | 4 +- hotsos/defs/scenarios/kernel/network/tcp.yaml | 52 ++++++------ hotsos/defs/scenarios/kernel/network/udp.yaml | 22 ++--- .../kubernetes/system_cpufreq_mode.yaml | 4 +- hotsos/defs/scenarios/lxd/bugs/lp1807628.yaml | 4 +- hotsos/defs/scenarios/lxd/lxcfs_deadlock.yaml | 4 +- hotsos/defs/scenarios/mysql/mysql.yaml | 2 +- hotsos/defs/scenarios/openstack/eol.yaml | 4 +- .../openstack/neutron/bugs/lp1907686.yaml | 2 +- .../neutron/neutron_ovs_cleanup.yaml | 2 +- .../scenarios/openstack/nova/cpu_pinning.yaml | 30 +++---- .../openstack/octavia/hm_port_health.yaml | 10 +-- .../defs/scenarios/openstack/openstack.yaml | 2 +- .../openstack_apache2_certificates.yaml | 6 +- .../openstack/openstack_charm_conflicts.yaml | 2 +- .../pkgs_from_mixed_releases_found.yaml | 2 +- .../openstack/system_cpufreq_mode.yaml | 2 +- .../openstack/systemd_masked_services.yaml | 2 +- .../scenarios/openvswitch/dpdk_config.yaml | 16 ++-- .../openvswitch/dpif_lost_packets.yaml | 2 +- .../openvswitch/ovn/bfd_flapping.yaml | 2 +- .../ovn/ovn_central_certs_logs.yaml | 4 +- .../openvswitch/ovn/ovn_certs_valid.yaml | 12 +-- .../ovn/ovn_chassis_certs_logs.yaml | 4 +- .../openvswitch/ovn/ovn_upgrades.yaml | 2 +- .../openvswitch/service_restarts.yaml | 2 +- .../scenarios/pacemaker/bugs/lp1874719.yaml | 2 +- .../scenarios/rabbitmq/cluster_config.yaml | 2 +- .../scenarios/rabbitmq/cluster_resources.yaml | 2 +- .../scenarios/sosreport/plugin_timeouts.yaml | 2 +- .../defs/scenarios/storage/bcache/bcache.yaml | 2 +- .../defs/scenarios/storage/bcache/bdev.yaml | 8 +- .../scenarios/storage/bcache/cacheset.yaml | 8 +- ...th_insecure_global_id_reclaim_allowed.yaml | 2 +- .../storage/ceph/ceph-mon/autoscaler_bug.yaml | 2 +- .../storage/ceph/ceph-mon/bluefs_size.yaml | 4 +- .../ceph/ceph-mon/bluefs_spillover.yaml | 2 +- .../ceph/ceph-mon/ceph_address_overlap.yaml | 4 +- .../ceph/ceph-mon/ceph_cluster_health.yaml | 4 +- .../ceph/ceph-mon/ceph_versions_mismatch.yaml | 4 +- .../ceph/ceph-mon/crushmap_bucket_checks.yaml | 8 +- .../storage/ceph/ceph-mon/empty_clog.yaml | 4 +- .../scenarios/storage/ceph/ceph-mon/eol.yaml | 4 +- .../storage/ceph/ceph-mon/laggy_pgs.yaml | 2 +- .../ceph/ceph-mon/large_omap_objects.yaml | 4 +- .../storage/ceph/ceph-mon/mon_db_too_big.yaml | 2 +- .../ceph/ceph-mon/mon_elections_flapping.yaml | 4 +- .../storage/ceph/ceph-mon/osd_flapping.yaml | 4 +- .../ceph-mon/osd_maps_backlog_too_large.yaml | 2 +- .../ceph-mon/osd_messenger_v2_protocol.yaml | 4 +- .../storage/ceph/ceph-mon/osd_slow_ops.yaml | 4 +- .../ceph/ceph-mon/osd_unusual_raw.yaml | 4 +- .../storage/ceph/ceph-mon/pg_imbalance.yaml | 14 ++-- .../storage/ceph/ceph-mon/pg_overdose.yaml | 2 +- .../required_osd_release_mismatch.yaml | 4 +- .../storage/ceph/ceph-mon/rgw_frontend.yaml | 2 +- .../ceph/ceph-mon/ssds_using_bcache.yaml | 4 +- .../ceph/ceph-mon/unresponsive_mon_mgr.yaml | 12 +-- .../storage/ceph/ceph-osd/bugs/lp1936136.yaml | 6 +- .../storage/ceph/ceph-osd/bugs/lp1959649.yaml | 4 +- .../storage/ceph/ceph-osd/bugs/lp1996010.yaml | 4 +- .../storage/ceph/ceph-osd/bugs/lp2016845.yaml | 2 +- .../filestore_to_bluestore_upgrade.yaml | 2 +- .../ceph-osd/juju_ceph_no_bcache_tuning.yaml | 6 +- .../storage/ceph/ceph-osd/pg_overdose.yaml | 2 +- .../ssd_osds_no_discard.yaml.disabled | 2 +- .../ceph/ceph-osd/system_cpufreq_mode.yaml | 2 +- .../storage/ceph/ceph-rgw/bugs/lp1974138.yaml | 4 +- .../ceph/common/ceph_charm_conflicts.yaml | 2 +- hotsos/defs/scenarios/storage/storage.yaml | 2 +- .../scenarios/system/sssd-ad-tokengroups.yaml | 2 +- .../scenarios/system/unattended_upgrades.yaml | 2 +- 104 files changed, 394 insertions(+), 222 deletions(-) create mode 100644 hotsos/core/alias.py diff --git a/hotsos/core/alias.py b/hotsos/core/alias.py new file mode 100644 index 000000000..4ca5d32d4 --- /dev/null +++ b/hotsos/core/alias.py @@ -0,0 +1,82 @@ +"""Aliasing utilities.""" + +from hotsos.core.log import log + + +class AliasAlreadyInUseError(Exception): + def __init__(self, name): + self.message = f"Alias '{name}` already in use!" + + def __str__(self): + return self.message + + +class AliasRegistry: + """ + A class that provides a registry for aliasing Python things. + """ + + # A class-level dictionary to store registered aliases. + registry = {} + + @staticmethod + def register(name, decoratee): + """ + Register a function, method, or property under an alias. + + This method handles different types of Python objects and creates + appropriate wrappers or registrations based on the object type. + + Args: + name (str): The alias under which to register the decoratee. + decoratee (callable or property): The Python object to be + registered. + + Raises: + AliasAlreadyInUseError: If the alias name is already registered. + """ + isprop = isinstance(decoratee, property) + target = decoratee.fget if isprop else decoratee + + if name in AliasRegistry.registry: + raise AliasAlreadyInUseError(name) + + import_path = f"{target.__module__}.{target.__qualname__}" + log.debug("registering alias `%s` --> {%s}", name, import_path) + # Register full import path. + AliasRegistry.registry[name] = import_path + + @staticmethod + def resolve(the_alias): + """ + Retrieve a registered alias. + + Args: + the_alias (str): The alias to retrieve. + + Returns: + callable: The function or wrapper associated with the alias. + + Raises: + NoSuchAliasError: No such alias in the registry. + """ + + if the_alias not in AliasRegistry.registry: + return None + + value = AliasRegistry.registry[the_alias] + log.debug("alias %s resolved to %s", the_alias, value) + return value + + +def alias(argument): + """Create an alias for a property, function or a thing.""" + + def real_decorator(func): + """We're not wrapping the func as we don't want + to do anything at runtime. We just want to alias + `func` to some user-defined name and call it on-demand.""" + AliasRegistry.register(argument, func) + return func + + return real_decorator diff --git a/hotsos/core/host_helpers/filestat.py b/hotsos/core/host_helpers/filestat.py index 11b5ddd3e..5d020d248 100644 --- a/hotsos/core/host_helpers/filestat.py +++ b/hotsos/core/host_helpers/filestat.py @@ -3,6 +3,7 @@ from hotsos.core.config import HotSOSConfig from hotsos.core.factory import FactoryBase from hotsos.core.log import log +from hotsos.core.alias import alias class FileObj(): @@ -38,6 +39,7 @@ def size(self): return size +@alias("file") class FileFactory(FactoryBase): """ Factory to dynamically create FileObj objects using file path as input. diff --git a/hotsos/core/host_helpers/ssl.py b/hotsos/core/host_helpers/ssl.py index 5d759f21b..07ee0228c 100644 --- a/hotsos/core/host_helpers/ssl.py +++ b/hotsos/core/host_helpers/ssl.py @@ -7,6 +7,7 @@ from hotsos.core.factory import FactoryBase from hotsos.core.host_helpers.cli import CLIHelper from hotsos.core.log import log +from hotsos.core.alias import alias class SSLCertificate(): @@ -65,6 +66,7 @@ def certificate_expires_soon(self): return self.certificate.days_to_expire <= self.expire_days +@alias('sslcert') class SSLCertificatesFactory(FactoryBase): """ Factory to dynamically create SSLCertificate objects for given paths. diff --git a/hotsos/core/plugins/juju/common.py b/hotsos/core/plugins/juju/common.py index aa9edc7f3..27af45f19 100644 --- a/hotsos/core/plugins/juju/common.py +++ b/hotsos/core/plugins/juju/common.py @@ -3,6 +3,7 @@ from hotsos.core.host_helpers import PebbleHelper, SystemdHelper from hotsos.core.plugins.juju.resources import JujuBase from hotsos.core import plugintools +from hotsos.core.alias import alias SVC_VALID_SUFFIX = r'[0-9a-zA-Z-_]*' JUJU_SVC_EXPRS = [r'mongod{}'.format(SVC_VALID_SUFFIX), @@ -12,6 +13,7 @@ r'(?:^|[^\s])juju-db{}'.format(SVC_VALID_SUFFIX)] +@alias('juju') class JujuChecksBase(plugintools.PluginPartBase, JujuBase): plugin_name = 'juju' plugin_root_index = 12 diff --git a/hotsos/core/plugins/juju/resources.py b/hotsos/core/plugins/juju/resources.py index 3afadb5dd..16285b647 100644 --- a/hotsos/core/plugins/juju/resources.py +++ b/hotsos/core/plugins/juju/resources.py @@ -9,6 +9,7 @@ from hotsos.core.config import HotSOSConfig from hotsos.core.log import log from hotsos.core import utils +from hotsos.core.alias import alias class JujuMachine(): @@ -174,6 +175,7 @@ def __init__(self, name, version): self.version = int(version) +@alias('juju.base') class JujuBase(): CHARM_MANIFEST_GLOB = "agents/unit-*/state/deployer/manifests" diff --git a/hotsos/core/plugins/kernel/common.py b/hotsos/core/plugins/kernel/common.py index a7e36cbbf..1487c0bc7 100644 --- a/hotsos/core/plugins/kernel/common.py +++ b/hotsos/core/plugins/kernel/common.py @@ -5,8 +5,10 @@ from hotsos.core import host_helpers, plugintools from hotsos.core.config import HotSOSConfig from hotsos.core.plugins.kernel.config import KernelConfig +from hotsos.core.alias import alias +@alias('kernel') class KernelBase(): @cached_property diff --git a/hotsos/core/plugins/kernel/config.py b/hotsos/core/plugins/kernel/config.py index f824a9c70..1913d1d42 100644 --- a/hotsos/core/plugins/kernel/config.py +++ b/hotsos/core/plugins/kernel/config.py @@ -3,6 +3,7 @@ from hotsos.core.config import HotSOSConfig from hotsos.core import host_helpers +from hotsos.core.alias import alias class KernelConfig(host_helpers.ConfigBase): @@ -39,6 +40,7 @@ def _load(self): break +@alias('kernel.systemdconfig') class SystemdConfig(host_helpers.IniConfigBase): """Systemd configuration.""" diff --git a/hotsos/core/plugins/kernel/kernlog/calltrace.py b/hotsos/core/plugins/kernel/kernlog/calltrace.py index 192570910..fa7eb25cd 100644 --- a/hotsos/core/plugins/kernel/kernlog/calltrace.py +++ b/hotsos/core/plugins/kernel/kernlog/calltrace.py @@ -12,6 +12,7 @@ TraceTypeBase, KernLogBase, ) +from hotsos.core.alias import alias KERNLOG_TS = r'\[\s*\d+\.\d+\]' KERNLOG_PREFIX = (r'(?:\S+\s+\d+\s+[\d:]+\s+\S+\s+\S+:\s+)?{}'. @@ -454,6 +455,7 @@ def __iter__(self): yield from self.hungtasks +@alias("kernel.calltrace") class CallTraceManager(KernLogBase): def __init__(self, *args, **kwargs): diff --git a/hotsos/core/plugins/kernel/kernlog/events.py b/hotsos/core/plugins/kernel/kernlog/events.py index 0076d1d21..d2549e660 100644 --- a/hotsos/core/plugins/kernel/kernlog/events.py +++ b/hotsos/core/plugins/kernel/kernlog/events.py @@ -1,6 +1,7 @@ from hotsos.core.log import log from hotsos.core.plugins.kernel.kernlog.common import KernLogBase from hotsos.core.search import SearchDef +from hotsos.core.alias import alias class OverMTUDroppedPacketEvent(): @@ -11,6 +12,7 @@ def searchdef(self): hint='dropped', tag='over-mtu-dropped') +@alias('kernel.kernlog.events') class KernLogEvents(KernLogBase): def __init__(self, *args, **kwargs): diff --git a/hotsos/core/plugins/kernel/memory.py b/hotsos/core/plugins/kernel/memory.py index 26a61ed66..f99c240b1 100644 --- a/hotsos/core/plugins/kernel/memory.py +++ b/hotsos/core/plugins/kernel/memory.py @@ -3,6 +3,7 @@ from hotsos.core.config import HotSOSConfig from hotsos.core.utils import sorted_dict +from hotsos.core.alias import alias class _BaseProcKeyValue(): @@ -50,6 +51,7 @@ def __getattr__(self, key): format(key, self.__class__.__name__)) +@alias('kernel.vmstat') class VMStat(_BaseProcKeyValue): VALID_KEYS = ['compact_fail', 'compact_success'] @@ -58,6 +60,7 @@ def path(self): return os.path.join(HotSOSConfig.data_root, 'proc/vmstat') @property + @alias("kernel.vmstat.compaction_failures_pct") def compaction_failures_percent(self): if not os.path.exists(self.path): return 0 @@ -70,6 +73,7 @@ def compaction_failures_percent(self): return int(fail_count / (success_count / 100)) +@alias("kernel.meminfo") class MemInfo(_BaseProcKeyValue): VALID_KEYS = ['MemTotal', 'MemAvailable', 'Hugetlb', 'HugePages_Total', 'HugePages_Free'] @@ -111,6 +115,7 @@ def hugep_used_to_hugep_total_percentage(self): return round(100 - (self.HugePages_Free * 100) / self.HugePages_Total) +@alias("kernel.slab") class SlabInfo(): def __init__(self, filter_names=None): @@ -284,6 +289,7 @@ def high_order_seq(self): return count +@alias('kernel.memchecks') class MemoryChecks(): @property diff --git a/hotsos/core/plugins/kernel/net.py b/hotsos/core/plugins/kernel/net.py index 2fb762927..52e8186ad 100644 --- a/hotsos/core/plugins/kernel/net.py +++ b/hotsos/core/plugins/kernel/net.py @@ -6,6 +6,7 @@ from hotsos.core.host_helpers import SYSCtlFactory, CLIHelperFile from hotsos.core.log import log from hotsos.core.search import FileSearcher, SearchDef, ResultFieldInfo +from hotsos.core.alias import alias class ProcNetBase(abc.ABC): @@ -109,6 +110,7 @@ def __init__(self): 'proc/net/snmp')) +@alias('kernel.net.snmp.tcp') class SNMPTcp(SNMPBase): def _percent_in_segs(self, field): @@ -154,6 +156,7 @@ def __getattr__(self, fld): return super().__getattr__(fld) +@alias('kernel.net.snmp.udp') class SNMPUdp(SNMPBase): @property @@ -208,6 +211,7 @@ def __init__(self): self.net_snmp_tcp = SNMPTcp() +@alias('kernel.net.netstat.tcp') class NetStatTCP(NetStatBase): @property @@ -286,6 +290,7 @@ def __getattr__(self, fld): return super().__getattr__(fld) +@alias('kernel.net.sockstat') class SockStat(ProcNetBase): """ @@ -534,6 +539,7 @@ def all_with_inode(self, inode): return list(filter(lambda x: (x.NODE == inode), self.data)) +@alias('kernel.net.netlink') class NetLink(STOVParserBase): """ Provides a way to extract fields from /proc/net/netlink. diff --git a/hotsos/core/plugins/kernel/sysfs.py b/hotsos/core/plugins/kernel/sysfs.py index aaaa220c2..9cc41a7c5 100644 --- a/hotsos/core/plugins/kernel/sysfs.py +++ b/hotsos/core/plugins/kernel/sysfs.py @@ -4,6 +4,7 @@ from hotsos.core.config import HotSOSConfig from hotsos.core import host_helpers from hotsos.core.plugins.system.system import SystemBase +from hotsos.core.alias import alias class SYSFSBase(): @@ -23,6 +24,7 @@ def get(relpath): return fd.read().strip() +@alias('kernel.sysfs.cpu') class CPU(SYSFSBase): @property diff --git a/hotsos/core/plugins/lxd/common.py b/hotsos/core/plugins/lxd/common.py index 79de71a90..8b4330ecf 100644 --- a/hotsos/core/plugins/lxd/common.py +++ b/hotsos/core/plugins/lxd/common.py @@ -11,13 +11,14 @@ FileSearcher, SearchDef, SequenceSearchDef ) - +from hotsos.core.alias import alias CORE_APT = ['lxd', 'lxc'] CORE_SNAPS = [r"(?:snap\.)?{}".format(p) for p in CORE_APT] SERVICE_EXPRS = [r"{}\S*".format(s) for s in CORE_SNAPS] +@alias('lxd') class LXD(): @cached_property diff --git a/hotsos/core/plugins/mysql.py b/hotsos/core/plugins/mysql.py index 6142294e1..a93897970 100644 --- a/hotsos/core/plugins/mysql.py +++ b/hotsos/core/plugins/mysql.py @@ -11,12 +11,14 @@ host_helpers, plugintools, ) +from hotsos.core.alias import alias SVC_VALID_SUFFIX = r'[0-9a-zA-Z-_]*' MYSQL_SVC_EXPRS = [r'mysql{}'.format(SVC_VALID_SUFFIX)] CORE_APT = ['mysql'] +@alias('mysql') class MySQLChecksBase(plugintools.PluginPartBase): plugin_name = 'mysql' plugin_root_index = 3 diff --git a/hotsos/core/plugins/openstack/common.py b/hotsos/core/plugins/openstack/common.py index 23b988a07..5ce12bfde 100644 --- a/hotsos/core/plugins/openstack/common.py +++ b/hotsos/core/plugins/openstack/common.py @@ -26,8 +26,10 @@ from hotsos.core.plugins.openstack.octavia import OctaviaBase from hotsos.core import plugintools from hotsos.core.ycheck.events import EventHandlerBase, EventCallbackBase +from hotsos.core.alias import alias +@alias('openstack') class OpenstackBase(): def __init__(self, *args, **kwargs): @@ -239,6 +241,7 @@ def apache2_allow_encoded_slashes_on(self): return False +@alias('openstack.checks') class OpenstackChecksBase(OpenstackBase, plugintools.PluginPartBase): plugin_name = "openstack" plugin_root_index = 4 diff --git a/hotsos/core/plugins/openstack/neutron.py b/hotsos/core/plugins/openstack/neutron.py index 5dcefe99b..e67c04832 100644 --- a/hotsos/core/plugins/openstack/neutron.py +++ b/hotsos/core/plugins/openstack/neutron.py @@ -9,6 +9,7 @@ OpenstackConfig, OSTServiceBase, ) +from hotsos.core.alias import alias # See https://github.com/openstack/neutron-lib/blob/master/neutron_lib/constants.py#L346 # noqa, pylint: disable=C0301 IP_HEADER_BYTES = 20 @@ -44,6 +45,7 @@ def bind_interfaces(self): return interfaces +@alias('neutron.service_checks') class ServiceChecks(): @cached_property @@ -128,6 +130,7 @@ def find_router_with_vr_id(self, vr_id): return None +@alias('neutron.config') class Config(FactoryBase): def __getattr__(self, path): diff --git a/hotsos/core/plugins/openstack/nova.py b/hotsos/core/plugins/openstack/nova.py index 23e73fcca..af9f3196b 100644 --- a/hotsos/core/plugins/openstack/nova.py +++ b/hotsos/core/plugins/openstack/nova.py @@ -22,6 +22,7 @@ NUMAInfo, SystemBase, ) +from hotsos.core.alias import alias class NovaBase(OSTServiceBase): @@ -223,6 +224,7 @@ def vcpu_info(self): return vcpu_info +@alias('openstack.nova.cpupinning') class CPUPinning(NovaBase): def __init__(self): diff --git a/hotsos/core/plugins/openstack/octavia.py b/hotsos/core/plugins/openstack/octavia.py index e6d28e477..a4a4c4658 100644 --- a/hotsos/core/plugins/openstack/octavia.py +++ b/hotsos/core/plugins/openstack/octavia.py @@ -1,12 +1,14 @@ from functools import cached_property from hotsos.core.plugins.openstack.openstack import OSTServiceBase - -OCTAVIA_HM_PORT_NAME = 'o-hm0' +from hotsos.core.alias import alias +@alias('openstack.octavia') class OctaviaBase(OSTServiceBase): + OCTAVIA_HM_PORT_NAME = 'o-hm0' + def __init__(self, *args, **kwargs): super().__init__('octavia', *args, **kwargs) @@ -17,15 +19,16 @@ def bind_interfaces(self): keyed by config key used to identify interface. """ interfaces = {} - port = self.nethelp.get_interface_with_name(OCTAVIA_HM_PORT_NAME) + port = self.nethelp.get_interface_with_name( + OctaviaBase.OCTAVIA_HM_PORT_NAME) if port: - interfaces.update({OCTAVIA_HM_PORT_NAME: port}) + interfaces.update({OctaviaBase.OCTAVIA_HM_PORT_NAME: port}) return interfaces @property def hm_port_has_address(self): - port = self.bind_interfaces.get(OCTAVIA_HM_PORT_NAME) + port = self.bind_interfaces.get(OctaviaBase.OCTAVIA_HM_PORT_NAME) if port is None or not port.addresses: return False @@ -33,7 +36,7 @@ def hm_port_has_address(self): @cached_property def hm_port_healthy(self): - port = self.bind_interfaces.get(OCTAVIA_HM_PORT_NAME) + port = self.bind_interfaces.get(OctaviaBase.OCTAVIA_HM_PORT_NAME) if port is None: return True diff --git a/hotsos/core/plugins/openvswitch/ovs.py b/hotsos/core/plugins/openvswitch/ovs.py index dc1e01dab..3c1c38840 100644 --- a/hotsos/core/plugins/openvswitch/ovs.py +++ b/hotsos/core/plugins/openvswitch/ovs.py @@ -18,6 +18,7 @@ create_constraint, ) from hotsos.core.plugins.openvswitch.common import OpenvSwitchGlobalSearch +from hotsos.core.alias import alias class OVSDBTable(): @@ -85,6 +86,7 @@ def __getattr__(self, column): return self.get(record='.', column=column) +@alias('openvswitch.db') class OVSDB(FactoryBase): """ This class is used like a factory in that attributes are table names that @@ -94,6 +96,7 @@ def __getattr__(self, table): return OVSDBTable(table) +@alias('openvswitch.dplookups') class OVSDPLookups(): def __init__(self): @@ -245,6 +248,7 @@ def paths(cls): 'var/log/openvswitch/ovs-vswitchd.log')] +@alias("openvswitch.bfd") class OVSBFD(OpenvSwitchBase): @property @@ -298,6 +302,7 @@ def max_transitions_last_24h_within_hour(self): for port in self._transitions.values())) +@alias('openvswitch.dpdk') class OVSDPDK(OpenvSwitchBase): @cached_property diff --git a/hotsos/core/plugins/pacemaker.py b/hotsos/core/plugins/pacemaker.py index b523f366f..d508e2c4d 100644 --- a/hotsos/core/plugins/pacemaker.py +++ b/hotsos/core/plugins/pacemaker.py @@ -7,12 +7,14 @@ SystemdHelper, ) from hotsos.core.plugintools import PluginPartBase +from hotsos.core.alias import alias PACEMAKER_PKGS_CORE = ['pacemaker', r'pacemaker-\S+', 'crmsh', 'corosync'] PACEMAKER_SVC_EXPR = ['pacemaker[a-zA-Z-]*', 'corosync'] +@alias('pacemaker') class PacemakerBase(): @cached_property diff --git a/hotsos/core/plugins/rabbitmq/report.py b/hotsos/core/plugins/rabbitmq/report.py index d64c3dcdd..54877602c 100644 --- a/hotsos/core/plugins/rabbitmq/report.py +++ b/hotsos/core/plugins/rabbitmq/report.py @@ -8,8 +8,10 @@ FileSearcher, ) from hotsos.core.host_helpers import CLIHelperFile +from hotsos.core.alias import alias +@alias('rabbitmq') class RabbitMQReport(): """ Class providing easy access to the contents of a rabbitmqctl report. diff --git a/hotsos/core/plugins/sosreport.py b/hotsos/core/plugins/sosreport.py index f189fb08b..3511549fd 100644 --- a/hotsos/core/plugins/sosreport.py +++ b/hotsos/core/plugins/sosreport.py @@ -8,10 +8,12 @@ SearchDef, FileSearcher, ) +from hotsos.core.alias import alias CORE_APT = ['sosreport'] +@alias('sosreport') class SOSReportChecksBase(PluginPartBase): plugin_name = 'sosreport' plugin_root_index = 2 diff --git a/hotsos/core/plugins/storage/bcache.py b/hotsos/core/plugins/storage/bcache.py index 083b8449a..11e68b933 100644 --- a/hotsos/core/plugins/storage/bcache.py +++ b/hotsos/core/plugins/storage/bcache.py @@ -13,6 +13,7 @@ SearchDef ) from hotsos.core.utils import sort_suffixed_integers +from hotsos.core.alias import alias class BcacheConfig(ConfigBase): @@ -87,6 +88,7 @@ def __getattr__(self, key): raise AttributeError("{} not found in cacheset config".format(key)) +@alias('bcache') class BcacheBase(StorageBase): def __init__(self, *args, **kwargs): @@ -186,6 +188,7 @@ def is_bcache_device(self, dev): return False +@alias('bcache.bdevsinfo') class BDevsInfo(BcacheBase): def _get_parameter(self, key): @@ -229,6 +232,7 @@ def writeback_percent(self): return sorted(list(map(int, ret))) +@alias('bcache.cachesetsinfo') class CachesetsInfo(BcacheBase): def _get_parameter(self, key): diff --git a/hotsos/core/plugins/storage/ceph.py b/hotsos/core/plugins/storage/ceph.py index 540b9ecc9..6f2a2ff2a 100644 --- a/hotsos/core/plugins/storage/ceph.py +++ b/hotsos/core/plugins/storage/ceph.py @@ -34,6 +34,7 @@ seconds_to_date, ) from hotsos.core.ycheck.events import EventCallbackBase +from hotsos.core.alias import alias CEPH_SERVICES_EXPRS = [r"ceph-[a-z0-9-]+", r"rados[a-z0-9-:]+", @@ -93,6 +94,7 @@ def csv_to_set_inner(*args, **kwargs): return csv_to_set_inner +@alias('ceph.config') class CephConfig(IniConfigBase): def __init__(self, *args, **kwargs): path = os.path.join(HotSOSConfig.data_root, 'etc/ceph/ceph.conf') @@ -136,6 +138,7 @@ def public_network_set(self): return self.get('public network') +@alias('ceph.crushmap') class CephCrushMap(): @staticmethod @@ -340,6 +343,7 @@ def is_rgw_using_civetweb(self): return False +@alias('ceph.cluster') class CephCluster(): OSD_META_LIMIT_PERCENT = 5 OSD_PG_MAX_LIMIT = 500 @@ -934,6 +938,7 @@ def devtype(self): return _devtype +@alias('ceph') class CephChecksBase(StorageBase): def __init__(self, *args, **kwargs): @@ -1221,6 +1226,7 @@ def __getattr__(self, name=None): return list(vals) +@alias('ceph.daemon.all-osds') class CephDaemonAllOSDsFactory(FactoryBase): """ A factory interface to allow dynamic access to ceph daemon commands and diff --git a/hotsos/core/plugins/system/system.py b/hotsos/core/plugins/system/system.py index dc49412e9..9abf715a8 100644 --- a/hotsos/core/plugins/system/system.py +++ b/hotsos/core/plugins/system/system.py @@ -13,6 +13,7 @@ FileSearcher, SearchDef, SequenceSearchDef ) +from hotsos.core.alias import alias class NUMAInfo(): @@ -75,6 +76,7 @@ def cores(self, node=None): return self.nodes.get(node) +@alias('system') class SystemBase(): @cached_property @@ -232,6 +234,7 @@ def sysctl_all(self): return SYSCtlFactory().sysctl_all +@alias('sssd') class SSSD(): def __init__(self): diff --git a/hotsos/core/ycheck/engine/properties/common.py b/hotsos/core/ycheck/engine/properties/common.py index 8e21f8d83..dc9694c79 100644 --- a/hotsos/core/ycheck/engine/properties/common.py +++ b/hotsos/core/ycheck/engine/properties/common.py @@ -10,6 +10,7 @@ ) from hotsos.core.log import log from hotsos.core.host_helpers.config import ConfigBase +from hotsos.core.alias import AliasRegistry class ImportPathIsNotAClass(Exception): @@ -393,6 +394,7 @@ def get_cls(self, import_str): self._add_to_import_cache(import_str, ret) return ret + # pylint: disable-next=too-many-statements def get_property(self, import_str): """ Import and fetch value of a Python property or factory. @@ -407,6 +409,30 @@ def get_property(self, import_str): @param import_str: a path to a Python property or Factory. """ + + # Try to resolve full alias first. + resolved_path = AliasRegistry.resolve(import_str) + if resolved_path: + import_str = resolved_path + else: + log.debug("trying to resolve `%s` as class path alias", import_str) + + # Try to resolve as class alias. + lhs, sep, prop_arg = import_str.partition(":") + class_path, property_path = lhs.rsplit(".", maxsplit=1) + log.debug("assuming class path %s and property %s", + class_path, property_path) + resolved_path = AliasRegistry.resolve(class_path) + if resolved_path: + import_str = f"{resolved_path}.{property_path}{sep}{prop_arg}" + log.debug( + "resolved class alias `%s` to `%s`." + " final import string is %s", + class_path, + resolved_path, + import_str, + ) + ret = self._load_from_import_cache(import_str) if ret: log.debug("calling property %s (from_cache=True)", import_str) diff --git a/hotsos/defs/scenarios/juju/bugs/lp1910958.yaml b/hotsos/defs/scenarios/juju/bugs/lp1910958.yaml index 844a7839e..928856b75 100644 --- a/hotsos/defs/scenarios/juju/bugs/lp1910958.yaml +++ b/hotsos/defs/scenarios/juju/bugs/lp1910958.yaml @@ -17,4 +17,4 @@ conclusions: format-dict: units: '@checks.has_lp1910958.search.results_group_1:unique_comma_join' rels: '@checks.has_lp1910958.search.results_group_2:unique_comma_join' - juju_version: 'hotsos.core.plugins.juju.resources.JujuBase.version' + juju_version: 'juju.base.version' diff --git a/hotsos/defs/scenarios/juju/jujud_machine_checks.yaml b/hotsos/defs/scenarios/juju/jujud_machine_checks.yaml index 1b52e64f4..8c22ba5ad 100644 --- a/hotsos/defs/scenarios/juju/jujud_machine_checks.yaml +++ b/hotsos/defs/scenarios/juju/jujud_machine_checks.yaml @@ -1,7 +1,7 @@ checks: jujud_not_found: property: - path: hotsos.core.plugins.juju.JujuChecksBase.systemd_processes + path: juju.systemd_processes ops: [[contains, jujud], [not_]] conclusions: jujud-not-found: diff --git a/hotsos/defs/scenarios/kernel/amd_iommu_pt.yaml b/hotsos/defs/scenarios/kernel/amd_iommu_pt.yaml index 3861eedac..1ab270c3d 100644 --- a/hotsos/defs/scenarios/kernel/amd_iommu_pt.yaml +++ b/hotsos/defs/scenarios/kernel/amd_iommu_pt.yaml @@ -1,7 +1,7 @@ vars: - virt_type: '@hotsos.core.plugins.system.SystemBase.virtualisation_type' - cpu_vendor: '@hotsos.core.plugins.kernel.sysfs.CPU.vendor' - kernel_cmd_line: '@hotsos.core.plugins.kernel.KernelBase.boot_parameters' + virt_type: '@system.virtualisation_type' + cpu_vendor: '@kernel.sysfs.cpu.vendor' + kernel_cmd_line: '@kernel.boot_parameters' checks: is_phy_host: varops: [[$virt_type], [not_]] @@ -22,4 +22,4 @@ conclusions: passthrough mode (e.g. set iommu=pt in boot parameters) which is recommended in order to get the best performance e.g. for networking. format-dict: - cpu_model: hotsos.core.plugins.kernel.sysfs.CPU.model + cpu_model: kernel.sysfs.cpu.model diff --git a/hotsos/defs/scenarios/kernel/kernlog_calltrace.yaml b/hotsos/defs/scenarios/kernel/kernlog_calltrace.yaml index df3c596ee..030ee71ca 100644 --- a/hotsos/defs/scenarios/kernel/kernlog_calltrace.yaml +++ b/hotsos/defs/scenarios/kernel/kernlog_calltrace.yaml @@ -1,23 +1,23 @@ checks: has_stacktraces: property: - path: hotsos.core.plugins.kernel.CallTraceManager.calltrace_anytype + path: kernel.calltrace.calltrace_anytype ops: [[length_hint]] has_oom_killer_invoked: property: - path: hotsos.core.plugins.kernel.CallTraceManager.oom_killer + path: kernel.calltrace.oom_killer ops: [[length_hint]] has_bcache_deadlock_invoked: property: - path: hotsos.core.plugins.kernel.CallTraceManager.calltrace-bcache + path: kernel.calltrace.calltrace-bcache ops: [[length_hint]] has_hungtasks: property: - path: hotsos.core.plugins.kernel.CallTraceManager.calltrace_hungtask + path: kernel.calltrace.calltrace_hungtask ops: [[length_hint]] has_fanotify_hang: property: - path: hotsos.core.plugins.kernel.CallTraceManager.calltrace-fanotify + path: kernel.calltrace.calltrace-fanotify ops: [[length_hint]] conclusions: stacktraces: @@ -51,7 +51,7 @@ conclusions: See https://www.spinics.net/lists/stable/msg566639.html for full detail. format-dict: - kver: hotsos.core.plugins.kernel.KernelBase.version + kver: kernel.version hungtasks: priority: 2 decision: has_hungtasks diff --git a/hotsos/defs/scenarios/kernel/memory.yaml b/hotsos/defs/scenarios/kernel/memory.yaml index 49ef9ac6b..0a68f5350 100644 --- a/hotsos/defs/scenarios/kernel/memory.yaml +++ b/hotsos/defs/scenarios/kernel/memory.yaml @@ -1,23 +1,19 @@ vars: - nodes_with_limited_high_order_memory: - '@hotsos.core.plugins.kernel.memory.MemoryChecks.nodes_with_limited_high_order_memory' - compact_success: '@hotsos.core.plugins.kernel.memory.VMStat.compact_success' - compaction_failures_percent: '@hotsos.core.plugins.kernel.memory.VMStat.compaction_failures_percent' - slab_major_consumers: '@hotsos.core.plugins.kernel.memory.SlabInfo.major_consumers' + nodes_with_limited_high_order_memory: '@kernel.memchecks.nodes_with_limited_high_order_memory' + compact_success: '@kernel.vmstat.compact_success' + compaction_failures_percent: '@kernel.vmstat.compaction_failures_pct' + slab_major_consumers: '@kernel.slab.major_consumers' # We use an arbitrary threshold of 10k to suggest that a lot of # compaction has occurred but noting that this is a rolling counter # and is not necessarily representative of current state. min_compaction_success: 10000 max_compaction_failures_pcent: 10 - hugetlb_to_mem_total_percentage: - '@hotsos.core.plugins.kernel.memory.MemInfo.hugetlb_to_mem_total_percentage' - mem_avail_to_mem_total_percentage: - '@hotsos.core.plugins.kernel.memory.MemInfo.mem_avail_to_mem_total_percentage' - hugep_used_to_hugep_total_percentage: - '@hotsos.core.plugins.kernel.memory.MemInfo.hugep_used_to_hugep_total_percentage' - mem_total_gb: '@hotsos.core.plugins.kernel.memory.MemInfo.mem_total_gb' - mem_available_gb: '@hotsos.core.plugins.kernel.memory.MemInfo.mem_available_gb' - hugetlb_gb: '@hotsos.core.plugins.kernel.memory.MemInfo.hugetlb_gb' + hugetlb_to_mem_total_percentage: '@kernel.meminfo.hugetlb_to_mem_total_percentage' + mem_avail_to_mem_total_percentage: '@kernel.meminfo.mem_avail_to_mem_total_percentage' + hugep_used_to_hugep_total_percentage: '@kernel.meminfo.hugep_used_to_hugep_total_percentage' + mem_total_gb: '@kernel.meminfo.mem_total_gb' + mem_available_gb: '@kernel.meminfo.mem_available_gb' + hugetlb_gb: '@kernel.meminfo.hugetlb_gb' # Arbitrary thresholds set for the memory allocated for the huge # pages to total memory and memory available to total memory. hugetlb_to_mem_total_threshold_percent: 80 @@ -29,7 +25,7 @@ checks: - varops: [[$compact_success], [gt, $min_compaction_success]] - varops: [[$compaction_failures_percent], [gt, $max_compaction_failures_pcent]] too_many_free_hugepages: - - property: hotsos.core.plugins.kernel.memory.MemInfo.huge_pages_enabled + - property: kernel.meminfo.huge_pages_enabled - varops: [[$hugetlb_to_mem_total_percentage], [gt, $hugetlb_to_mem_total_threshold_percent]] - varops: [[$mem_avail_to_mem_total_percentage], [lt, $mem_available_to_mem_total_thershold_percent]] conclusions: diff --git a/hotsos/defs/scenarios/kernel/network/misc.yaml b/hotsos/defs/scenarios/kernel/network/misc.yaml index 1243c624d..7be0d9d86 100644 --- a/hotsos/defs/scenarios/kernel/network/misc.yaml +++ b/hotsos/defs/scenarios/kernel/network/misc.yaml @@ -8,7 +8,7 @@ checks: # "Jun 08 10:48:13 compute4 kernel:" expr: '(\w{3,5}\s+\d{1,2}\s+[\d:]+)\S+.+ nf_conntrack: table full, dropping packet' has_over_mtu_dropped_packets: - property: hotsos.core.plugins.kernel.kernlog.KernLogEvents.over_mtu_dropped_packets + property: kernel.kernlog.events.over_mtu_dropped_packets conclusions: nf-conntrack-full: decision: has_nf_conntrack_full diff --git a/hotsos/defs/scenarios/kernel/network/netlink.yaml b/hotsos/defs/scenarios/kernel/network/netlink.yaml index e67907faa..5223b68c4 100644 --- a/hotsos/defs/scenarios/kernel/network/netlink.yaml +++ b/hotsos/defs/scenarios/kernel/network/netlink.yaml @@ -1,6 +1,6 @@ checks: has_socks_with_drops: - property: hotsos.core.plugins.kernel.net.NetLink.all_with_drops + property: kernel.net.netlink.all_with_drops conclusions: netlink-socks-with-drops: decision: has_socks_with_drops @@ -14,4 +14,4 @@ conclusions: This may be a symptom of problems in the associated process(es) and should be investigated further. format-dict: - socks_with_drops: hotsos.core.plugins.kernel.net.NetLink.all_with_drops_str + socks_with_drops: kernel.net.netlink.all_with_drops_str diff --git a/hotsos/defs/scenarios/kernel/network/tcp.yaml b/hotsos/defs/scenarios/kernel/network/tcp.yaml index c57c7c2d9..d0ceefce1 100644 --- a/hotsos/defs/scenarios/kernel/network/tcp.yaml +++ b/hotsos/defs/scenarios/kernel/network/tcp.yaml @@ -1,30 +1,30 @@ vars: - incsumerr: '@hotsos.core.plugins.kernel.net.SNMPTcp.InCsumErrors' - incsumrate_pcent: '@hotsos.core.plugins.kernel.net.SNMPTcp.InCsumErrorsPcentInSegs' - outsegs: '@hotsos.core.plugins.kernel.net.SNMPTcp.OutSegs' - retrans: '@hotsos.core.plugins.kernel.net.SNMPTcp.RetransSegs' - outretrans_pcent: '@hotsos.core.plugins.kernel.net.SNMPTcp.RetransSegsPcentOutSegs' - spurrtx: '@hotsos.core.plugins.kernel.net.NetStatTCP.TCPSpuriousRtxHostQueues' - spurrtx_pcent: '@hotsos.core.plugins.kernel.net.NetStatTCP.TCPSpuriousRtxHostQueuesPcentOutSegs' - prunec: '@hotsos.core.plugins.kernel.net.NetStatTCP.PruneCalled' - rcvcoll: '@hotsos.core.plugins.kernel.net.NetStatTCP.TCPRcvCollapsed' - rcvpr: '@hotsos.core.plugins.kernel.net.NetStatTCP.RcvPruned' - ofopr: '@hotsos.core.plugins.kernel.net.NetStatTCP.OfoPruned' - backlogd: '@hotsos.core.plugins.kernel.net.NetStatTCP.TCPBacklogDrop' - rpfilterd: '@hotsos.core.plugins.kernel.net.NetStatTCP.IPReversePathFilter' - ldrop: '@hotsos.core.plugins.kernel.net.NetStatTCP.ListenDrops' - pfmemd: '@hotsos.core.plugins.kernel.net.NetStatTCP.PFMemallocDrop' - minttld: '@hotsos.core.plugins.kernel.net.NetStatTCP.TCPMinTTLDrop' - listenovf: '@hotsos.core.plugins.kernel.net.NetStatTCP.ListenOverflows' - ofod: '@hotsos.core.plugins.kernel.net.NetStatTCP.OfoPruned' - zwind: '@hotsos.core.plugins.kernel.net.NetStatTCP.TCPZeroWindowDrop' - rcvqd: '@hotsos.core.plugins.kernel.net.NetStatTCP.TCPRcvQDrop' - rcvqd_pcent: '@hotsos.core.plugins.kernel.net.NetStatTCP.TCPRcvQDropPcentInSegs' - rqfulld: '@hotsos.core.plugins.kernel.net.NetStatTCP.TCPReqQFullDrop' - rqfullcook: '@hotsos.core.plugins.kernel.net.NetStatTCP.TCPReqQFullDoCookies' - memusage_pages_inuse: '@hotsos.core.plugins.kernel.net.SockStat.GlobTcpSocksTotalMemPages' - memusage_pages_max: '@hotsos.core.plugins.kernel.net.SockStat.SysctlTcpMemMax' - memusage_pct: '@hotsos.core.plugins.kernel.net.SockStat.TCPMemUsagePct' + incsumerr: '@kernel.net.snmp.tcp.InCsumErrors' + incsumrate_pcent: '@kernel.net.snmp.tcp.InCsumErrorsPcentInSegs' + outsegs: '@kernel.net.snmp.tcp.OutSegs' + retrans: '@kernel.net.snmp.tcp.RetransSegs' + outretrans_pcent: '@kernel.net.snmp.tcp.RetransSegsPcentOutSegs' + spurrtx: '@kernel.net.netstat.tcp.TCPSpuriousRtxHostQueues' + spurrtx_pcent: '@kernel.net.netstat.tcp.TCPSpuriousRtxHostQueuesPcentOutSegs' + prunec: '@kernel.net.netstat.tcp.PruneCalled' + rcvcoll: '@kernel.net.netstat.tcp.TCPRcvCollapsed' + rcvpr: '@kernel.net.netstat.tcp.RcvPruned' + ofopr: '@kernel.net.netstat.tcp.OfoPruned' + backlogd: '@kernel.net.netstat.tcp.TCPBacklogDrop' + rpfilterd: '@kernel.net.netstat.tcp.IPReversePathFilter' + ldrop: '@kernel.net.netstat.tcp.ListenDrops' + pfmemd: '@kernel.net.netstat.tcp.PFMemallocDrop' + minttld: '@kernel.net.netstat.tcp.TCPMinTTLDrop' + listenovf: '@kernel.net.netstat.tcp.ListenOverflows' + ofod: '@kernel.net.netstat.tcp.OfoPruned' + zwind: '@kernel.net.netstat.tcp.TCPZeroWindowDrop' + rcvqd: '@kernel.net.netstat.tcp.TCPRcvQDrop' + rcvqd_pcent: '@kernel.net.netstat.tcp.TCPRcvQDropPcentInSegs' + rqfulld: '@kernel.net.netstat.tcp.TCPReqQFullDrop' + rqfullcook: '@kernel.net.netstat.tcp.TCPReqQFullDoCookies' + memusage_pages_inuse: '@kernel.net.sockstat.GlobTcpSocksTotalMemPages' + memusage_pages_max: '@kernel.net.sockstat.SysctlTcpMemMax' + memusage_pct: '@kernel.net.sockstat.TCPMemUsagePct' checks: incsumerr_high: or: diff --git a/hotsos/defs/scenarios/kernel/network/udp.yaml b/hotsos/defs/scenarios/kernel/network/udp.yaml index d970562ba..9b1c86f01 100644 --- a/hotsos/defs/scenarios/kernel/network/udp.yaml +++ b/hotsos/defs/scenarios/kernel/network/udp.yaml @@ -1,15 +1,15 @@ vars: - inerrors: '@hotsos.core.plugins.kernel.net.SNMPUdp.InErrors' - inerrors_pcent: '@hotsos.core.plugins.kernel.net.SNMPUdp.InErrorsPcentInDatagrams' - rcvbuferrors: '@hotsos.core.plugins.kernel.net.SNMPUdp.RcvbufErrors' - rcvbuferrors_pcent: '@hotsos.core.plugins.kernel.net.SNMPUdp.RcvbufErrorsPcentInDatagrams' - sndbuferrors: '@hotsos.core.plugins.kernel.net.SNMPUdp.SndbufErrors' - sndbuferrors_pcent: '@hotsos.core.plugins.kernel.net.SNMPUdp.SndbufErrorsPcentOutDatagrams' - incsumerrors: '@hotsos.core.plugins.kernel.net.SNMPUdp.InCsumErrors' - incsumerrors_pcent: '@hotsos.core.plugins.kernel.net.SNMPUdp.InCsumErrorsPcentInDatagrams' - memusage_pages_inuse: '@hotsos.core.plugins.kernel.net.SockStat.GlobUdpSocksTotalMemPages' - memusage_pages_max: '@hotsos.core.plugins.kernel.net.SockStat.SysctlUdpMemMax' - memusage_pct: '@hotsos.core.plugins.kernel.net.SockStat.UDPMemUsagePct' + inerrors: '@kernel.net.snmp.udp.InErrors' + inerrors_pcent: '@kernel.net.snmp.udp.InErrorsPcentInDatagrams' + rcvbuferrors: '@kernel.net.snmp.udp.RcvbufErrors' + rcvbuferrors_pcent: '@kernel.net.snmp.udp.RcvbufErrorsPcentInDatagrams' + sndbuferrors: '@kernel.net.snmp.udp.SndbufErrors' + sndbuferrors_pcent: '@kernel.net.snmp.udp.SndbufErrorsPcentOutDatagrams' + incsumerrors: '@kernel.net.snmp.udp.InCsumErrors' + incsumerrors_pcent: '@kernel.net.snmp.udp.InCsumErrorsPcentInDatagrams' + memusage_pages_inuse: '@kernel.net.sockstat.GlobUdpSocksTotalMemPages' + memusage_pages_max: '@kernel.net.sockstat.SysctlUdpMemMax' + memusage_pct: '@kernel.net.sockstat.UDPMemUsagePct' checks: rcvbuferrors_high: or: diff --git a/hotsos/defs/scenarios/kubernetes/system_cpufreq_mode.yaml b/hotsos/defs/scenarios/kubernetes/system_cpufreq_mode.yaml index 5959e2d1b..7997cf4cd 100644 --- a/hotsos/defs/scenarios/kubernetes/system_cpufreq_mode.yaml +++ b/hotsos/defs/scenarios/kubernetes/system_cpufreq_mode.yaml @@ -9,7 +9,7 @@ vars: message_ondemand: >- You will also need to stop and disable the ondemand systemd service in order for changes to persist. - scaling_governor: '@hotsos.core.plugins.kernel.sysfs.CPU.cpufreq_scaling_governor_all' + scaling_governor: '@kernel.sysfs.cpu.cpufreq_scaling_governor_all' checks: cpufreq_governor_not_performance: # can we actually see the setting @@ -20,7 +20,7 @@ checks: - snap: kubelet # ignore if not running on metal - property: - path: hotsos.core.plugins.system.system.SystemBase.virtualisation_type + path: system.virtualisation_type ops: [[eq, null]] ondemand_installed_and_enabled: systemd: diff --git a/hotsos/defs/scenarios/lxd/bugs/lp1807628.yaml b/hotsos/defs/scenarios/lxd/bugs/lp1807628.yaml index fe59b293c..258159451 100644 --- a/hotsos/defs/scenarios/lxd/bugs/lp1807628.yaml +++ b/hotsos/defs/scenarios/lxd/bugs/lp1807628.yaml @@ -15,11 +15,11 @@ checks: max: 3.0.3-0ubuntu1~18.04.2 is_not_a_lxc_container: property: - path: hotsos.core.plugins.system.SystemBase.virtualisation_type + path: system.virtualisation_type ops: [[ne, 'lxc']] has_lxc_containers: property: - path: hotsos.core.plugins.lxd.LXD.instances + path: lxd.instances ops: [[length_hint], [gt, 0]] conclusions: lxcfs_segfault: diff --git a/hotsos/defs/scenarios/lxd/lxcfs_deadlock.yaml b/hotsos/defs/scenarios/lxd/lxcfs_deadlock.yaml index fc4f7cfa6..49af17142 100644 --- a/hotsos/defs/scenarios/lxd/lxcfs_deadlock.yaml +++ b/hotsos/defs/scenarios/lxd/lxcfs_deadlock.yaml @@ -1,11 +1,11 @@ checks: is_not_a_lxc_container: property: - path: hotsos.core.plugins.system.SystemBase.virtualisation_type + path: system.virtualisation_type ops: [[ne, 'lxc']] has_lxc_containers: property: - path: hotsos.core.plugins.lxd.LXD.instances + path: lxd.instances ops: [[length_hint], [gt, 0]] has_lxd_version_5_9: snap: diff --git a/hotsos/defs/scenarios/mysql/mysql.yaml b/hotsos/defs/scenarios/mysql/mysql.yaml index 4e0ea32a7..327a7f652 100644 --- a/hotsos/defs/scenarios/mysql/mysql.yaml +++ b/hotsos/defs/scenarios/mysql/mysql.yaml @@ -1,4 +1,4 @@ # This file is used to define overrides applicable to contents of this # directory including subdirectories. requires: - property: hotsos.core.plugins.mysql.MySQLChecksBase.plugin_runnable + property: mysql.plugin_runnable diff --git a/hotsos/defs/scenarios/openstack/eol.yaml b/hotsos/defs/scenarios/openstack/eol.yaml index f0d7152a3..f7afc5019 100644 --- a/hotsos/defs/scenarios/openstack/eol.yaml +++ b/hotsos/defs/scenarios/openstack/eol.yaml @@ -1,7 +1,7 @@ checks: is_eol: property: - path: hotsos.core.plugins.openstack.OpenstackChecksBase.days_to_eol + path: openstack.days_to_eol ops: [[le, 0]] conclusions: is-eol: @@ -14,4 +14,4 @@ conclusions: limited support and is likely not receiving updates anymore. Please consider upgrading to a newer release. format-dict: - release: hotsos.core.plugins.openstack.OpenstackChecksBase.release_name + release: openstack.release_name diff --git a/hotsos/defs/scenarios/openstack/neutron/bugs/lp1907686.yaml b/hotsos/defs/scenarios/openstack/neutron/bugs/lp1907686.yaml index 9cd31a75c..0eaaf26db 100644 --- a/hotsos/defs/scenarios/openstack/neutron/bugs/lp1907686.yaml +++ b/hotsos/defs/scenarios/openstack/neutron/bugs/lp1907686.yaml @@ -1,6 +1,6 @@ checks: isolcpus_enabled: - property: hotsos.core.plugins.kernel.KernelBase.isolcpus_enabled + property: kernel.isolcpus_enabled has_1907686: input: path: 'var/log/neutron/neutron-openvswitch-agent.log' diff --git a/hotsos/defs/scenarios/openstack/neutron/neutron_ovs_cleanup.yaml b/hotsos/defs/scenarios/openstack/neutron/neutron_ovs_cleanup.yaml index 474442ffc..fa795043f 100644 --- a/hotsos/defs/scenarios/openstack/neutron/neutron_ovs_cleanup.yaml +++ b/hotsos/defs/scenarios/openstack/neutron/neutron_ovs_cleanup.yaml @@ -3,7 +3,7 @@ checks: systemd: neutron-ovs-cleanup: enabled ovs_cleanup_run_manually: - property: hotsos.core.plugins.openstack.neutron.ServiceChecks.ovs_cleanup_run_manually + property: neutron.service_checks.ovs_cleanup_run_manually conclusions: ovs-cleanup-run-manually: decision: diff --git a/hotsos/defs/scenarios/openstack/nova/cpu_pinning.yaml b/hotsos/defs/scenarios/openstack/nova/cpu_pinning.yaml index 05b717415..ebd0d2581 100644 --- a/hotsos/defs/scenarios/openstack/nova/cpu_pinning.yaml +++ b/hotsos/defs/scenarios/openstack/nova/cpu_pinning.yaml @@ -1,24 +1,24 @@ # All conclusions in this scenario share the same priority so that one or more # of them can be reached. vars: - isolcpus_enabled: '@hotsos.core.plugins.kernel.KernelBase.isolcpus_enabled' - cpuaffinity_enabled: '@hotsos.core.plugins.kernel.SystemdConfig.cpuaffinity_enabled' - openstack_release: '@hotsos.core.plugins.openstack.OpenstackChecksBase.release_name' - vcpu_pinset: '@hotsos.core.plugins.openstack.nova.CPUPinning.vcpu_pin_set' - cpu_dedicated_set: '@hotsos.core.plugins.openstack.nova.CPUPinning.cpu_dedicated_set' - cpu_dedicated_set_name: '@hotsos.core.plugins.openstack.nova.CPUPinning.cpu_dedicated_set_name' - pinning_from_multi_numa_nodes: '@hotsos.core.plugins.openstack.nova.CPUPinning.nova_pinning_from_multi_numa_nodes' + isolcpus_enabled: '@kernel.isolcpus_enabled' + cpuaffinity_enabled: '@kernel.systemdconfig.cpuaffinity_enabled' + openstack_release: '@openstack.release_name' + vcpu_pinset: '@openstack.nova.cpupinning.vcpu_pin_set' + cpu_dedicated_set: '@openstack.nova.cpupinning.cpu_dedicated_set' + cpu_dedicated_set_name: '@openstack.nova.cpupinning.cpu_dedicated_set_name' + pinning_from_multi_numa_nodes: '@openstack.nova.cpupinning.nova_pinning_from_multi_numa_nodes' cpu_dedicated_set_intersection_isolcpus: - '@hotsos.core.plugins.openstack.nova.CPUPinning.cpu_dedicated_set_intersection_isolcpus' + '@openstack.nova.cpupinning.cpu_dedicated_set_intersection_isolcpus' cpu_dedicated_set_intersection_cpuaffinity: - '@hotsos.core.plugins.openstack.nova.CPUPinning.cpu_dedicated_set_intersection_cpuaffinity' + '@openstack.nova.cpupinning.cpu_dedicated_set_intersection_cpuaffinity' cpu_shared_set_intersection_isolcpus: - '@hotsos.core.plugins.openstack.nova.CPUPinning.cpu_shared_set_intersection_isolcpus' + '@openstack.nova.cpupinning.cpu_shared_set_intersection_isolcpus' cpu_shared_set_intersection_cpu_dedicated_set: - '@hotsos.core.plugins.openstack.nova.CPUPinning.cpu_shared_set_intersection_cpu_dedicated_set' - cpuaffinity_intersection_isolcpus: '@hotsos.core.plugins.openstack.nova.CPUPinning.cpuaffinity_intersection_isolcpus' - num_unpinned_cpus: '@hotsos.core.plugins.openstack.nova.CPUPinning.num_unpinned_cpus' - unpinned_cpus_pcent: '@hotsos.core.plugins.openstack.nova.CPUPinning.unpinned_cpus_pcent' + '@openstack.nova.cpupinning.cpu_shared_set_intersection_cpu_dedicated_set' + cpuaffinity_intersection_isolcpus: '@openstack.nova.cpupinning.cpuaffinity_intersection_isolcpus' + num_unpinned_cpus: '@openstack.nova.cpupinning.num_unpinned_cpus' + unpinned_cpus_pcent: '@openstack.nova.cpupinning.unpinned_cpus_pcent' checks: is_nova_compute_node: systemd: nova-compute @@ -106,7 +106,7 @@ conclusions: format-dict: pcent_unpinned: $unpinned_cpus_pcent nonisolated: $num_unpinned_cpus - total: hotsos.core.plugins.system.common.SystemBase.num_cpus + total: system.num_cpus nova-pinning-from-multi-numa-nodes: decision: - is_nova_compute_node diff --git a/hotsos/defs/scenarios/openstack/octavia/hm_port_health.yaml b/hotsos/defs/scenarios/openstack/octavia/hm_port_health.yaml index a18dd35b0..88ff25af5 100644 --- a/hotsos/defs/scenarios/openstack/octavia/hm_port_health.yaml +++ b/hotsos/defs/scenarios/openstack/octavia/hm_port_health.yaml @@ -1,13 +1,13 @@ checks: octavia_worker_installed: - property: hotsos.core.plugins.openstack.octavia.OctaviaBase.installed + property: openstack.octavia.installed or: systemd: octavia-worker pebble: octavia-worker hm_port_has_no_packet_drops_or_errors: - property: hotsos.core.plugins.openstack.octavia.OctaviaBase.hm_port_healthy + property: openstack.octavia.hm_port_healthy hm_port_address_check: - property: hotsos.core.plugins.openstack.octavia.OctaviaBase.hm_port_has_address + property: openstack.octavia.hm_port_has_address conclusions: no-addr-or-noexist: priority: 1 @@ -22,7 +22,7 @@ conclusions: access to the lb-management network and therefore will not be able to communicate with Amphora VMs - please investigate. format-dict: - name: hotsos.core.plugins.openstack.octavia.OCTAVIA_HM_PORT_NAME + name: openstack.octavia.OCTAVIA_HM_PORT_NAME packet-drops-errors: priority: 2 decision: @@ -34,4 +34,4 @@ conclusions: Octavia health manager port {name} has some packets drops or errors - please investigate. format-dict: - name: hotsos.core.plugins.openstack.octavia.OCTAVIA_HM_PORT_NAME + name: openstack.octavia.OCTAVIA_HM_PORT_NAME diff --git a/hotsos/defs/scenarios/openstack/openstack.yaml b/hotsos/defs/scenarios/openstack/openstack.yaml index 7791f0864..13341565e 100644 --- a/hotsos/defs/scenarios/openstack/openstack.yaml +++ b/hotsos/defs/scenarios/openstack/openstack.yaml @@ -1,4 +1,4 @@ # This file is used to define overrides applicable to contents of this # directory including subdirectories. requires: - property: hotsos.core.plugins.openstack.OpenstackChecksBase.plugin_runnable + property: openstack.plugin_runnable diff --git a/hotsos/defs/scenarios/openstack/openstack_apache2_certificates.yaml b/hotsos/defs/scenarios/openstack/openstack_apache2_certificates.yaml index 480b09b3a..0e75af471 100644 --- a/hotsos/defs/scenarios/openstack/openstack_apache2_certificates.yaml +++ b/hotsos/defs/scenarios/openstack/openstack_apache2_certificates.yaml @@ -1,9 +1,9 @@ checks: ssl_enabled: - property: hotsos.core.plugins.openstack.OpenstackBase.ssl_enabled + property: openstack.ssl_enabled apache2_certificate_expiring: property: - path: hotsos.core.plugins.openstack.OpenstackBase.apache2_certificates_expiring + path: openstack.apache2_certificates_expiring ops: [[ne, []]] conclusions: need-certificate-renewal: @@ -17,4 +17,4 @@ conclusions: {apache2-certificates-path} format-dict: apache2-certificates-path: '@checks.apache2_certificate_expiring.requires.value_actual:comma_join' - apache2-certificates-days-to-expire: 'hotsos.core.plugins.openstack.OpenstackBase.certificate_expire_days' + apache2-certificates-days-to-expire: 'openstack.certificate_expire_days' diff --git a/hotsos/defs/scenarios/openstack/openstack_charm_conflicts.yaml b/hotsos/defs/scenarios/openstack/openstack_charm_conflicts.yaml index 8e59a065b..9518d9168 100644 --- a/hotsos/defs/scenarios/openstack/openstack_charm_conflicts.yaml +++ b/hotsos/defs/scenarios/openstack/openstack_charm_conflicts.yaml @@ -1,5 +1,5 @@ vars: - local_charms: '@hotsos.core.plugins.juju.JujuChecksBase.charms' + local_charms: '@juju.charms' checks: neutron_conflicts: - varops: [[$local_charms], [contains, neutron-api]] diff --git a/hotsos/defs/scenarios/openstack/pkgs_from_mixed_releases_found.yaml b/hotsos/defs/scenarios/openstack/pkgs_from_mixed_releases_found.yaml index 26caadd0f..867b674ef 100644 --- a/hotsos/defs/scenarios/openstack/pkgs_from_mixed_releases_found.yaml +++ b/hotsos/defs/scenarios/openstack/pkgs_from_mixed_releases_found.yaml @@ -1,7 +1,7 @@ checks: has_mixed_pkg_releases: property: - path: hotsos.core.plugins.openstack.OpenstackChecksBase.installed_pkg_release_names + path: openstack.installed_pkg_release_names ops: [[length_hint], [gt, 1]] conclusions: mixed-pkg-releases: diff --git a/hotsos/defs/scenarios/openstack/system_cpufreq_mode.yaml b/hotsos/defs/scenarios/openstack/system_cpufreq_mode.yaml index e83c12db6..31115e41b 100644 --- a/hotsos/defs/scenarios/openstack/system_cpufreq_mode.yaml +++ b/hotsos/defs/scenarios/openstack/system_cpufreq_mode.yaml @@ -12,7 +12,7 @@ vars: msg_ondemand: >- You will also need to stop and disable the ondemand systemd service in order for changes to persist. - scaling_governor: '@hotsos.core.plugins.kernel.sysfs.CPU.cpufreq_scaling_governor_all' + scaling_governor: '@kernel.sysfs.cpu.cpufreq_scaling_governor_all' checks: cpufreq_governor_not_performance: # can we actually see the setting diff --git a/hotsos/defs/scenarios/openstack/systemd_masked_services.yaml b/hotsos/defs/scenarios/openstack/systemd_masked_services.yaml index 80267d7cf..5e55808a3 100644 --- a/hotsos/defs/scenarios/openstack/systemd_masked_services.yaml +++ b/hotsos/defs/scenarios/openstack/systemd_masked_services.yaml @@ -1,7 +1,7 @@ checks: has_unexpected_masked: property: - path: hotsos.core.plugins.openstack.OpenstackChecksBase.unexpected_masked_services + path: openstack.unexpected_masked_services ops: [[ne, []]] conclusions: has-unexpected-masked: diff --git a/hotsos/defs/scenarios/openvswitch/dpdk_config.yaml b/hotsos/defs/scenarios/openvswitch/dpdk_config.yaml index c95044c3a..a1e3fd3c6 100644 --- a/hotsos/defs/scenarios/openvswitch/dpdk_config.yaml +++ b/hotsos/defs/scenarios/openvswitch/dpdk_config.yaml @@ -1,15 +1,15 @@ vars: pmd_cpu_mask_key: 'pmd-cpu-mask' - pmd_cpu_mask: '@hotsos.core.plugins.openvswitch.OVSDPDK.pmd_cpu_mask' + pmd_cpu_mask: '@openvswitch.dpdk.pmd_cpu_mask' lcore_mask_key: 'dpdk-lcore-mask' - lcore_mask: '@hotsos.core.plugins.openvswitch.OVSDPDK.dpdk_lcore_mask' - other_config: '@hotsos.core.plugins.openvswitch.OVSDB.other_config:Open_vSwitch' - cpu_dedicated_set_hex: '@hotsos.core.plugins.openstack.nova.CPUPinning.cpu_dedicated_set_hex' - cpu_shared_set_hex: '@hotsos.core.plugins.openstack.nova.CPUPinning.cpu_shared_set_hex' + lcore_mask: '@openvswitch.dpdk.dpdk_lcore_mask' + other_config: '@openvswitch.db.other_config:Open_vSwitch' + cpu_dedicated_set_hex: '@openstack.nova.cpupinning.cpu_dedicated_set_hex' + cpu_shared_set_hex: '@openstack.nova.cpupinning.cpu_shared_set_hex' checks: ovs_dpdk_enabled: # see https://docs.openvswitch.org/en/latest/intro/install/dpdk/#setup-ovs - property: hotsos.core.plugins.openvswitch.OVSDPDK.enabled + property: openvswitch.dpdk.enabled dpdk_installed: apt: [openvswitch-switch-dpdk, dpdk] pmd_mask_is_set: @@ -79,5 +79,5 @@ conclusions: Poll Mode Driver threads and the Nova instances. format-dict: pmd_cpu_mask: '$pmd_cpu_mask_key' - cpu_dedicated_set: 'hotsos.core.plugins.openstack.nova.CPUPinning.cpu_dedicated_set:int_ranges' - cpu_shared_set: 'hotsos.core.plugins.openstack.nova.CPUPinning.cpu_shared_set:int_ranges' + cpu_dedicated_set: 'openstack.nova.cpupinning.cpu_dedicated_set:int_ranges' + cpu_shared_set: 'openstack.nova.cpupinning.cpu_shared_set:int_ranges' diff --git a/hotsos/defs/scenarios/openvswitch/dpif_lost_packets.yaml b/hotsos/defs/scenarios/openvswitch/dpif_lost_packets.yaml index 134f1a830..88e9250ac 100644 --- a/hotsos/defs/scenarios/openvswitch/dpif_lost_packets.yaml +++ b/hotsos/defs/scenarios/openvswitch/dpif_lost_packets.yaml @@ -2,7 +2,7 @@ input: path: var/log/openvswitch/ovs-vswitchd.log vars: - num_lost_packets: '@hotsos.core.plugins.openvswitch.OVSDPLookups.lost' + num_lost_packets: '@openvswitch.dplookups.lost' lost_packets_msg_part1: >- This host is running Openvswitch and its datapath is reporting a non-zero amount diff --git a/hotsos/defs/scenarios/openvswitch/ovn/bfd_flapping.yaml b/hotsos/defs/scenarios/openvswitch/ovn/bfd_flapping.yaml index 557df1b0a..c42fec22a 100644 --- a/hotsos/defs/scenarios/openvswitch/ovn/bfd_flapping.yaml +++ b/hotsos/defs/scenarios/openvswitch/ovn/bfd_flapping.yaml @@ -1,5 +1,5 @@ vars: - bfd_transitions: '@hotsos.core.plugins.openvswitch.OVSBFD.max_transitions_last_24h_within_hour' + bfd_transitions: '@openvswitch.bfd.max_transitions_last_24h_within_hour' checks: vswitchd_to_ovn_controller_inactivity_timeouts: input: diff --git a/hotsos/defs/scenarios/openvswitch/ovn/ovn_central_certs_logs.yaml b/hotsos/defs/scenarios/openvswitch/ovn/ovn_central_certs_logs.yaml index 5c7af947e..912b4605e 100644 --- a/hotsos/defs/scenarios/openvswitch/ovn/ovn_central_certs_logs.yaml +++ b/hotsos/defs/scenarios/openvswitch/ovn/ovn_central_certs_logs.yaml @@ -1,6 +1,6 @@ vars: - host_cert_mtime: '@hotsos.core.host_helpers.filestat.FileFactory.mtime:etc/ovn/cert_host' - ovn_central_cert_mtime: '@hotsos.core.host_helpers.filestat.FileFactory.mtime:etc/ovn/ovn-central.crt' + host_cert_mtime: '@file.mtime:etc/ovn/cert_host' + ovn_central_cert_mtime: '@file.mtime:etc/ovn/ovn-central.crt' northd_start_time: '@hotsos.core.host_helpers.systemd.ServiceFactory.start_time_secs:ovn-northd' ovsdb_nb_start_time: '@hotsos.core.host_helpers.systemd.ServiceFactory.start_time_secs:ovn-ovsdb-server-nb' ovsdb_sb_start_time: '@hotsos.core.host_helpers.systemd.ServiceFactory.start_time_secs:ovn-ovsdb-server-sb' diff --git a/hotsos/defs/scenarios/openvswitch/ovn/ovn_certs_valid.yaml b/hotsos/defs/scenarios/openvswitch/ovn/ovn_certs_valid.yaml index c7de8f15f..d5fe4fb4d 100644 --- a/hotsos/defs/scenarios/openvswitch/ovn/ovn_certs_valid.yaml +++ b/hotsos/defs/scenarios/openvswitch/ovn/ovn_certs_valid.yaml @@ -1,12 +1,12 @@ vars: - ml2_mechanism_driver: '@hotsos.core.plugins.openstack.neutron.Config.mechanism_drivers:plugins/ml2/ml2_conf.ini' - data_root_is_sosreport: '@hotsos.core.plugins.sosreport.SOSReportChecksBase.data_root_is_sosreport' - ovn_cert_host_exists: '@hotsos.core.host_helpers.filestat.FileFactory.exists:etc/ovn/cert_host' - ovn_cert_host_days: '@hotsos.core.host_helpers.ssl.SSLCertificatesFactory.days_to_expire:etc/ovn/cert_host' + ml2_mechanism_driver: '@neutron.config.mechanism_drivers:plugins/ml2/ml2_conf.ini' + data_root_is_sosreport: '@sosreport.data_root_is_sosreport' + ovn_cert_host_exists: '@file.exists:etc/ovn/cert_host' + ovn_cert_host_days: '@sslcert.days_to_expire:etc/ovn/cert_host' neutron_ml2_cert_host_exists: - '@hotsos.core.host_helpers.filestat.FileFactory.exists:etc/neutron/plugins/ml2/cert_host' + '@file.exists:etc/neutron/plugins/ml2/cert_host' neutron_ml2_cert_host_days: - '@hotsos.core.host_helpers.ssl.SSLCertificatesFactory.days_to_expire:etc/neutron/plugins/ml2/cert_host' + '@sslcert.days_to_expire:etc/neutron/plugins/ml2/cert_host' checks: is_not_sosreport_data_root: varops: [[$data_root_is_sosreport], [ne, true]] diff --git a/hotsos/defs/scenarios/openvswitch/ovn/ovn_chassis_certs_logs.yaml b/hotsos/defs/scenarios/openvswitch/ovn/ovn_chassis_certs_logs.yaml index 6317be2d9..2ea19aa09 100644 --- a/hotsos/defs/scenarios/openvswitch/ovn/ovn_chassis_certs_logs.yaml +++ b/hotsos/defs/scenarios/openvswitch/ovn/ovn_chassis_certs_logs.yaml @@ -1,6 +1,6 @@ vars: - host_cert_mtime: '@hotsos.core.host_helpers.filestat.FileFactory.mtime:etc/ovn/cert_host' - ovn_chassis_cert_mtime: '@hotsos.core.host_helpers.filestat.FileFactory.mtime:etc/ovn/ovn-chassis.crt' + host_cert_mtime: '@file.mtime:etc/ovn/cert_host' + ovn_chassis_cert_mtime: '@file.mtime:etc/ovn/ovn-chassis.crt' ovn_controller_start_time: '@hotsos.core.host_helpers.systemd.ServiceFactory.start_time_secs:ovn-controller' cert_expired_expr: '([\d-]+)T([\d:]+)\.\d+Z\|\S+\|stream_ssl\|WARN\|SSL_accept: error:\S+:SSL routines:ssl3_read_bytes:sslv3 alert certificate expired' cert_invalid_expr: '([\d-]+)T([\d:]+)\.\d+Z\|\S+\|stream_ssl\|WARN\|SSL_accept: error:\S+:SSL routines:tls_process_client_certificate:certificate verify failed' diff --git a/hotsos/defs/scenarios/openvswitch/ovn/ovn_upgrades.yaml b/hotsos/defs/scenarios/openvswitch/ovn/ovn_upgrades.yaml index aa710fc9b..fa84e5253 100644 --- a/hotsos/defs/scenarios/openvswitch/ovn/ovn_upgrades.yaml +++ b/hotsos/defs/scenarios/openvswitch/ovn/ovn_upgrades.yaml @@ -1,6 +1,6 @@ vars: dbkey: 'ovn-match-northd-version' - external_ids: '@hotsos.core.plugins.openvswitch.OVSDB.external_ids:Open_vSwitch' + external_ids: '@openvswitch.db.external_ids:Open_vSwitch' message_boilerplate: >- The ovn-controller service on this node is reporting northd version mismatch errors. This happens when the version of OVN differs between diff --git a/hotsos/defs/scenarios/openvswitch/service_restarts.yaml b/hotsos/defs/scenarios/openvswitch/service_restarts.yaml index e5a8eebfe..3f49f8316 100644 --- a/hotsos/defs/scenarios/openvswitch/service_restarts.yaml +++ b/hotsos/defs/scenarios/openvswitch/service_restarts.yaml @@ -15,7 +15,7 @@ checks: apt: [openvswitch-switch-dpdk, dpdk] ovs_dpdk_enabled: # see https://docs.openvswitch.org/en/latest/intro/install/dpdk/#setup-ovs - property: hotsos.core.plugins.openvswitch.OVSDPDK.enabled + property: openvswitch.dpdk.enabled conclusions: ovs_frequent_restarts_dpdk: priority: 2 diff --git a/hotsos/defs/scenarios/pacemaker/bugs/lp1874719.yaml b/hotsos/defs/scenarios/pacemaker/bugs/lp1874719.yaml index 9cd323a03..6b7e7a3f8 100644 --- a/hotsos/defs/scenarios/pacemaker/bugs/lp1874719.yaml +++ b/hotsos/defs/scenarios/pacemaker/bugs/lp1874719.yaml @@ -1,7 +1,7 @@ checks: node1-found: property: - path: hotsos.core.plugins.pacemaker.PacemakerBase.offline_nodes + path: pacemaker.offline_nodes ops: [[contains, node1]] conclusions: node1-found-needs-removal: diff --git a/hotsos/defs/scenarios/rabbitmq/cluster_config.yaml b/hotsos/defs/scenarios/rabbitmq/cluster_config.yaml index eed53645f..3edb274f7 100644 --- a/hotsos/defs/scenarios/rabbitmq/cluster_config.yaml +++ b/hotsos/defs/scenarios/rabbitmq/cluster_config.yaml @@ -1,7 +1,7 @@ checks: partition_handling_is_ignore: property: - path: hotsos.core.plugins.rabbitmq.RabbitMQReport.partition_handling + path: rabbitmq.partition_handling ops: [[eq, ignore]] conclusions: partition-handling-is-ignore: diff --git a/hotsos/defs/scenarios/rabbitmq/cluster_resources.yaml b/hotsos/defs/scenarios/rabbitmq/cluster_resources.yaml index 46222a570..b92ec2690 100644 --- a/hotsos/defs/scenarios/rabbitmq/cluster_resources.yaml +++ b/hotsos/defs/scenarios/rabbitmq/cluster_resources.yaml @@ -1,7 +1,7 @@ checks: cluster_vhosts_unbalanced: property: - path: hotsos.core.plugins.rabbitmq.RabbitMQReport.skewed_nodes + path: rabbitmq.skewed_nodes ops: [[length_hint], [gt, 0]] conclusions: cluster-vhosts-unbalanced: diff --git a/hotsos/defs/scenarios/sosreport/plugin_timeouts.yaml b/hotsos/defs/scenarios/sosreport/plugin_timeouts.yaml index 38d453d7a..acb2c00ac 100644 --- a/hotsos/defs/scenarios/sosreport/plugin_timeouts.yaml +++ b/hotsos/defs/scenarios/sosreport/plugin_timeouts.yaml @@ -1,7 +1,7 @@ checks: has_timed_out_plugins: property: - path: hotsos.core.plugins.sosreport.SOSReportChecksBase.timed_out_plugins + path: sosreport.timed_out_plugins ops: [[length_hint], [gt, 0]] conclusions: has-timed-out-plugins: diff --git a/hotsos/defs/scenarios/storage/bcache/bcache.yaml b/hotsos/defs/scenarios/storage/bcache/bcache.yaml index 380739c33..98133e264 100644 --- a/hotsos/defs/scenarios/storage/bcache/bcache.yaml +++ b/hotsos/defs/scenarios/storage/bcache/bcache.yaml @@ -1,5 +1,5 @@ requires: # don't run these checks if we are inside a lxc container property: - path: hotsos.core.plugins.system.system.SystemBase.virtualisation_type + path: system.virtualisation_type ops: [[ne, lxc]] diff --git a/hotsos/defs/scenarios/storage/bcache/bdev.yaml b/hotsos/defs/scenarios/storage/bcache/bdev.yaml index 9defa773b..e5956459e 100644 --- a/hotsos/defs/scenarios/storage/bcache/bdev.yaml +++ b/hotsos/defs/scenarios/storage/bcache/bdev.yaml @@ -1,10 +1,10 @@ vars: - sequential_cutoff: '@hotsos.core.plugins.storage.bcache.BDevsInfo.sequential_cutoff' - cache_mode: '@hotsos.core.plugins.storage.bcache.BDevsInfo.cache_mode' - writeback_percent: '@hotsos.core.plugins.storage.bcache.BDevsInfo.writeback_percent' + sequential_cutoff: '@bcache.bdevsinfo.sequential_cutoff' + cache_mode: '@bcache.bdevsinfo.cache_mode' + writeback_percent: '@bcache.bdevsinfo.writeback_percent' checks: bcache_enabled: - property: hotsos.core.plugins.storage.bcache.BcacheBase.bcache_enabled + property: bcache.bcache_enabled has_invalid_bdev_cutoff: varops: [[$sequential_cutoff], [getitem, 0], [ne, '0.0k']] has_invalid_bdev_cache_mode: diff --git a/hotsos/defs/scenarios/storage/bcache/cacheset.yaml b/hotsos/defs/scenarios/storage/bcache/cacheset.yaml index 3d63ce101..51fdcead1 100644 --- a/hotsos/defs/scenarios/storage/bcache/cacheset.yaml +++ b/hotsos/defs/scenarios/storage/bcache/cacheset.yaml @@ -1,10 +1,10 @@ vars: - congested_read_threshold_us: '@hotsos.core.plugins.storage.bcache.CachesetsInfo.congested_read_threshold_us' - congested_write_threshold_us: '@hotsos.core.plugins.storage.bcache.CachesetsInfo.congested_write_threshold_us' - cache_available_percent: '@hotsos.core.plugins.storage.bcache.CachesetsInfo.cache_available_percent' + congested_read_threshold_us: '@bcache.cachesetsinfo.congested_read_threshold_us' + congested_write_threshold_us: '@bcache.cachesetsinfo.congested_write_threshold_us' + cache_available_percent: '@bcache.cachesetsinfo.cache_available_percent' checks: bcache_enabled: - property: hotsos.core.plugins.storage.bcache.BcacheBase.bcache_enabled + property: bcache.bcache_enabled has_invalid_cset_congested_read_threshold_us: varops: [[$congested_read_threshold_us], [getitem, 0], [ne, 0]] has_invalid_cset_congested_write_threshold_us: diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/auth_insecure_global_id_reclaim_allowed.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/auth_insecure_global_id_reclaim_allowed.yaml index ce775d965..309496e25 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/auth_insecure_global_id_reclaim_allowed.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/auth_insecure_global_id_reclaim_allowed.yaml @@ -5,7 +5,7 @@ checks: expr: '.+"message": "mon is allowing insecure global_id reclaim"' health_warning: property: - path: hotsos.core.plugins.storage.ceph.CephCluster.health_status + path: ceph.cluster.health_status ops: [[eq, HEALTH_WARN]] conclusions: insecure-auth-allowed: diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/autoscaler_bug.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/autoscaler_bug.yaml index ad6ca25b5..fafa3a3b2 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/autoscaler_bug.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/autoscaler_bug.yaml @@ -1,5 +1,5 @@ vars: - autoscaler_enabled_pools: '@hotsos.core.plugins.storage.ceph.CephCrushMap.autoscaler_enabled_pools' + autoscaler_enabled_pools: '@ceph.crushmap.autoscaler_enabled_pools' msg_main: >- This Ceph cluster is vulnerable to a bug in which OSDs can consume considerable amounts of memory and eventually be OOM killed due to diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/bluefs_size.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/bluefs_size.yaml index 56fb0bd91..ea57e4129 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/bluefs_size.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/bluefs_size.yaml @@ -1,7 +1,7 @@ checks: bluefs_osds_have_oversize_metadata: property: - path: hotsos.core.plugins.storage.ceph.CephCluster.bluefs_oversized_metadata_osds + path: ceph.cluster.bluefs_oversized_metadata_osds ops: [[length_hint], [gt, 0]] conclusions: bluefs-osds-have-oversize-metadata: @@ -16,4 +16,4 @@ conclusions: compact the metadata, use 'ceph-bluestore-tool' which is available since 14.2.0. format-dict: bad_meta_osds: '@checks.bluefs_osds_have_oversize_metadata.requires.value_actual:comma_join' - limit_percent: hotsos.core.plugins.storage.ceph.CephCluster.OSD_META_LIMIT_PERCENT + limit_percent: ceph.cluster.OSD_META_LIMIT_PERCENT diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/bluefs_spillover.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/bluefs_spillover.yaml index 67e31edfb..50533f783 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/bluefs_spillover.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/bluefs_spillover.yaml @@ -5,7 +5,7 @@ checks: expr: '.+experiencing BlueFS spillover' health_warning: property: - path: hotsos.core.plugins.storage.ceph.CephCluster.health_status + path: ceph.cluster.health_status ops: [[eq, HEALTH_WARN]] conclusions: bluefs-spillover: diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/ceph_address_overlap.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/ceph_address_overlap.yaml index 5d734f8c6..7ce381090 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/ceph_address_overlap.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/ceph_address_overlap.yaml @@ -1,6 +1,6 @@ vars: - cluster_network: '@hotsos.core.plugins.storage.ceph.CephConfig.cluster_network_set' - public_network: '@hotsos.core.plugins.storage.ceph.CephConfig.public_network_set' + cluster_network: '@ceph.config.cluster_network_set' + public_network: '@ceph.config.public_network_set' checks: network_configs_overlap: # The following logic passes if one or both of network sets P and C diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/ceph_cluster_health.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/ceph_cluster_health.yaml index 2fe86c23a..d1e4b63b2 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/ceph_cluster_health.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/ceph_cluster_health.yaml @@ -1,11 +1,11 @@ checks: cluster_health_available: property: - path: hotsos.core.plugins.storage.ceph.CephCluster.health_status + path: ceph.cluster.health_status ops: [[ne, null]] cluster_unhealthy: property: - path: hotsos.core.plugins.storage.ceph.CephCluster.health_status + path: ceph.cluster.health_status ops: [[ne, HEALTH_OK]] conclusions: cluster-health-not-ok: diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/ceph_versions_mismatch.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/ceph_versions_mismatch.yaml index 80e456317..dea7c401c 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/ceph_versions_mismatch.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/ceph_versions_mismatch.yaml @@ -1,8 +1,8 @@ checks: cluster_daemon_versions_aligned: - property: hotsos.core.plugins.storage.ceph.CephCluster.ceph_versions_aligned + property: ceph.cluster.ceph_versions_aligned mon_versions_aligned: - property: hotsos.core.plugins.storage.ceph.CephCluster.mon_versions_aligned_with_cluster + property: ceph.cluster.mon_versions_aligned_with_cluster conclusions: all-daemon-versions-not-aligned: priority: 1 diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/crushmap_bucket_checks.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/crushmap_bucket_checks.yaml index d95077188..d3156dfea 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/crushmap_bucket_checks.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/crushmap_bucket_checks.yaml @@ -1,11 +1,11 @@ checks: crushmap_has_mixed_type_buckets: property: - path: hotsos.core.plugins.storage.ceph.CephCrushMap.crushmap_mixed_buckets + path: ceph.crushmap.crushmap_mixed_buckets ops: [[length_hint], [gt, 0]] crushmap_has_unequal_buckets: property: - path: hotsos.core.plugins.storage.ceph.CephCrushMap.crushmap_equal_buckets + path: ceph.crushmap.crushmap_equal_buckets ops: [[length_hint], [gt, 0]] conclusions: crushmap-mixed-buckets: @@ -16,7 +16,7 @@ conclusions: Mixed crush bucket types identified in buckets '{buckets}'. This can cause data distribution to become skewed - please check crush map. format-dict: - buckets: hotsos.core.plugins.storage.ceph.CephCrushMap.crushmap_mixed_buckets_str + buckets: ceph.crushmap.crushmap_mixed_buckets_str crushmap-unbalanced-buckets: decision: crushmap_has_unequal_buckets raises: @@ -28,4 +28,4 @@ conclusions: Transient issues such as "out" OSDs, or cluster expansion/maintenance can trigger this warning. Affected CRUSH tree(s) and bucket types are {affected}. format-dict: - affected: hotsos.core.plugins.storage.ceph.CephCrushMap.crushmap_equal_buckets_pretty + affected: ceph.crushmap.crushmap_equal_buckets_pretty diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/empty_clog.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/empty_clog.yaml index ddd1ccb08..491f5db72 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/empty_clog.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/empty_clog.yaml @@ -1,6 +1,6 @@ vars: - clog_size: '@hotsos.core.host_helpers.filestat.FileFactory.size:var/log/ceph/ceph.log' - mc_clog_size: '@hotsos.core.host_helpers.filestat.FileFactory.size:var/snap/microceph/common/logs/ceph.log' + clog_size: '@file.size:var/log/ceph/ceph.log' + mc_clog_size: '@file.size:var/snap/microceph/common/logs/ceph.log' checks: empty_clog_size: or: diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/eol.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/eol.yaml index 6dbad7e44..e12cadc1b 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/eol.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/eol.yaml @@ -1,7 +1,7 @@ checks: is_eol: property: - path: hotsos.core.plugins.storage.ceph.CephChecksBase.days_to_eol + path: ceph.days_to_eol ops: [[le, 0]] conclusions: is-eol: @@ -14,4 +14,4 @@ conclusions: has limited support and is likely not receiving updates anymore. Please consider upgrading to a newer release. format-dict: - release: hotsos.core.plugins.storage.ceph.CephChecksBase.release_name + release: ceph.release_name diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/laggy_pgs.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/laggy_pgs.yaml index da5ec88ca..89f3b270a 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/laggy_pgs.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/laggy_pgs.yaml @@ -1,7 +1,7 @@ checks: cluster_has_laggy_pgs: property: - path: hotsos.core.plugins.storage.ceph.CephCluster.laggy_pgs + path: ceph.cluster.laggy_pgs ops: [[length_hint], [gt, 0]] conclusions: cluster-has-laggy-pgs: diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/large_omap_objects.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/large_omap_objects.yaml index 870b6af1a..d6bf26981 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/large_omap_objects.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/large_omap_objects.yaml @@ -1,7 +1,7 @@ checks: cluster_has_pgs_with_large_omap_objects: property: - path: hotsos.core.plugins.storage.ceph.CephCluster.large_omap_pgs + path: ceph.cluster.large_omap_pgs ops: [[length_hint], [gt, 0]] conclusions: large-omap-pgs: @@ -19,4 +19,4 @@ conclusions: If the large OMAP objects are reported from a pool used by OpenStack Gnocchi, it may need tuning: https://portal.support.canonical.com/ua/s/article/Gnocchi-causing-large-OMAP-objects-in-a-Ceph-cluster format-dict: - large_omap_pgs: hotsos.core.plugins.storage.ceph.CephCluster.large_omap_pgs_str + large_omap_pgs: ceph.cluster.large_omap_pgs_str diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/mon_db_too_big.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/mon_db_too_big.yaml index 8f6897493..1abf37bc4 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/mon_db_too_big.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/mon_db_too_big.yaml @@ -5,7 +5,7 @@ checks: expr: '.*mon (.+) is using a lot of disk space.*' health_warning: property: - path: hotsos.core.plugins.storage.ceph.CephCluster.health_status + path: ceph.cluster.health_status ops: [[ne, HEALTH_OK]] conclusions: db_too_large: diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/mon_elections_flapping.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/mon_elections_flapping.yaml index 26c775f6b..5e0f22aae 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/mon_elections_flapping.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/mon_elections_flapping.yaml @@ -11,7 +11,7 @@ checks: search-result-age-hours: 48 min-hours-since-last-boot: 1 ceph_interfaces_have_errors: - property: hotsos.core.plugins.storage.ceph.CephChecksBase.has_interface_errors + property: ceph.has_interface_errors conclusions: cause-unknown: priority: 1 @@ -35,5 +35,5 @@ conclusions: period and the network interface(s) {interfaces} used by the ceph-mon are showing errors - please investigate. format-dict: - interfaces: hotsos.core.plugins.storage.ceph.CephChecksBase.bind_interface_names + interfaces: ceph.bind_interface_names count: '@checks.ceph_log_has_election_calls.search.num_results' diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/osd_flapping.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/osd_flapping.yaml index 2c909b03b..1a1177e7b 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/osd_flapping.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/osd_flapping.yaml @@ -6,7 +6,7 @@ checks: path: ['var/log/ceph/ceph*.log', 'var/snap/microceph/common/logs/ceph*.log'] expr: '([\d-])+[T ][\d:]+\S+ .+ wrongly marked me down at .+' ceph_interfaces_have_errors: - property: hotsos.core.plugins.storage.ceph.CephChecksBase.has_interface_errors + property: ceph.has_interface_errors conclusions: cause-unknown: priority: 1 @@ -36,4 +36,4 @@ conclusions: interface(s) ({interfaces}) used by the Ceph are showing errors - please investigate. format-dict: - interfaces: hotsos.core.plugins.storage.ceph.CephChecksBase.bind_interface_names + interfaces: ceph.bind_interface_names diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/osd_maps_backlog_too_large.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/osd_maps_backlog_too_large.yaml index e8a85f8dd..2815082c8 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/osd_maps_backlog_too_large.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/osd_maps_backlog_too_large.yaml @@ -6,7 +6,7 @@ checks: # increasing which can result in more disk utilization, possibly slower # mons, etc. See https://docs.ceph.com/en/latest/dev/mon-osdmap-prune/. property: - path: hotsos.core.plugins.storage.ceph.CephCluster.osdmaps_count + path: ceph.cluster.osdmaps_count # mon_min_osdmap_epochs default ops: [[gt, 500]] conclusions: diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/osd_messenger_v2_protocol.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/osd_messenger_v2_protocol.yaml index 5b8cdecd9..0528b17a3 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/osd_messenger_v2_protocol.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/osd_messenger_v2_protocol.yaml @@ -2,11 +2,11 @@ checks: ceph_release_gt_mimic: # v2 only available for >= Nautilus property: - path: hotsos.core.plugins.storage.ceph.CephChecksBase.release_name + path: ceph.release_name ops: [[gt, mimic]] cluster_has_v1_only_osds: property: - path: hotsos.core.plugins.storage.ceph.CephCluster.cluster_osds_without_v2_messenger_protocol + path: ceph.cluster.cluster_osds_without_v2_messenger_protocol ops: [[length_hint], [gt, 0]] conclusions: some-osds-not-using-v2: diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/osd_slow_ops.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/osd_slow_ops.yaml index 729e04d78..e84f9f3a7 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/osd_slow_ops.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/osd_slow_ops.yaml @@ -12,7 +12,7 @@ checks: search-period-hours: 1 search-result-age-hours: 24 ceph_interfaces_have_errors: - property: hotsos.core.plugins.storage.ceph.CephChecksBase.has_interface_errors + property: ceph.has_interface_errors conclusions: cause-unknown: priority: 1 @@ -38,4 +38,4 @@ conclusions: interface(s) ({interfaces}) used by the Ceph are showing errors - please investigate. format-dict: - interfaces: hotsos.core.plugins.storage.ceph.CephChecksBase.bind_interface_names + interfaces: ceph.bind_interface_names diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/osd_unusual_raw.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/osd_unusual_raw.yaml index a6c00949d..596d11086 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/osd_unusual_raw.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/osd_unusual_raw.yaml @@ -1,7 +1,7 @@ checks: osds_have_unusual_raw_usage: property: - path: hotsos.core.plugins.storage.ceph.CephCluster.osd_raw_usage_higher_than_data + path: ceph.cluster.osd_raw_usage_higher_than_data ops: [[length_hint], [gt, 0]] conclusions: osds-have-unusual-raw-usage: @@ -16,4 +16,4 @@ conclusions: full or misbehave, please restart them and possibly file a bug in Ceph tracker. format-dict: bad_osds: '@checks.osds_have_unusual_raw_usage.requires.value_actual:comma_join' - limit: hotsos.core.plugins.storage.ceph.CephCluster.OSD_DISCREPANCY_ALLOWED + limit: ceph.cluster.OSD_DISCREPANCY_ALLOWED diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/pg_imbalance.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/pg_imbalance.yaml index a9c492108..845113dec 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/pg_imbalance.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/pg_imbalance.yaml @@ -1,18 +1,18 @@ checks: cluster_has_osds_with_pgs_above_max: property: - path: hotsos.core.plugins.storage.ceph.CephCluster.osds_pgs_above_max + path: ceph.cluster.osds_pgs_above_max ops: [[length_hint], [gt, 0]] cluster_has_osds_with_suboptimal_pgs: property: - path: hotsos.core.plugins.storage.ceph.CephCluster.osds_pgs_suboptimal + path: ceph.cluster.osds_pgs_suboptimal ops: [[length_hint], [gt, 0]] cluster_has_non_empty_pools: property: - path: hotsos.core.plugins.storage.ceph.CephCluster.cluster_has_non_empty_pools + path: ceph.cluster.cluster_has_non_empty_pools autoscaler_disabled_for_any_pool: property: - path: hotsos.core.plugins.storage.ceph.CephCrushMap.autoscaler_disabled_pools + path: ceph.crushmap.autoscaler_disabled_pools ops: [[length_hint]] conclusions: cluster-osds-with-pgs-above-max: @@ -24,7 +24,7 @@ conclusions: limit at which point they will stop creating pgs and fail - please investigate. format-dict: - limit: hotsos.core.plugins.storage.ceph.CephCluster.OSD_PG_MAX_LIMIT + limit: ceph.cluster.OSD_PG_MAX_LIMIT cluster-osds-with-suboptimal-pgs: decision: - cluster_has_osds_with_suboptimal_pgs @@ -37,5 +37,5 @@ conclusions: of {min}-{max} pgs. This could indicate poor data distribution across the cluster and result in performance degradation. format-dict: - min: hotsos.core.plugins.storage.ceph.CephCluster.OSD_PG_OPTIMAL_NUM_MIN - max: hotsos.core.plugins.storage.ceph.CephCluster.OSD_PG_OPTIMAL_NUM_MAX + min: ceph.cluster.OSD_PG_OPTIMAL_NUM_MIN + max: ceph.cluster.OSD_PG_OPTIMAL_NUM_MAX diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/pg_overdose.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/pg_overdose.yaml index de342f417..3a7d9ce8d 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/pg_overdose.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/pg_overdose.yaml @@ -5,7 +5,7 @@ checks: expr: '.+ PGs pending on creation' health_warning: property: - path: hotsos.core.plugins.storage.ceph.CephCluster.health_status + path: ceph.cluster.health_status ops: [[ne, HEALTH_OK]] conclusions: pending_creating_pgs: diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/required_osd_release_mismatch.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/required_osd_release_mismatch.yaml index 7298bdb5e..5c89cc63a 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/required_osd_release_mismatch.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/required_osd_release_mismatch.yaml @@ -1,10 +1,10 @@ checks: has_required_osd_release_cluster_config: property: - path: hotsos.core.plugins.storage.ceph.CephCluster.require_osd_release + path: ceph.cluster.require_osd_release ops: [[ne, null]] osd_versions_match_osd_required: - property: hotsos.core.plugins.storage.ceph.CephCluster.osd_daemon_release_names_match_required + property: ceph.cluster.osd_daemon_release_names_match_required conclusions: osd-versions-mismatch: decision: diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/rgw_frontend.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/rgw_frontend.yaml index cf117d731..09ef28ded 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/rgw_frontend.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/rgw_frontend.yaml @@ -13,7 +13,7 @@ checks: ops: [[contains, civetweb]] is_rgw_using_civetweb: property: - path: hotsos.core.plugins.storage.ceph.CephCrushMap.is_rgw_using_civetweb + path: ceph.crushmap.is_rgw_using_civetweb conclusions: rgw_outdated_frontend: decision: diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/ssds_using_bcache.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/ssds_using_bcache.yaml index a7e608d1c..3881f43d9 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/ssds_using_bcache.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/ssds_using_bcache.yaml @@ -1,7 +1,7 @@ checks: ssds_using_bcache: property: - path: hotsos.core.plugins.storage.ceph.CephCluster.ssds_using_bcache + path: ceph.cluster.ssds_using_bcache ops: [[length_hint], [gt, 0]] conclusions: ssd_osds_using_bcache: @@ -17,4 +17,4 @@ conclusions: the OSDs directly instead. Please compare the IOPs of the SSD (OSDs) vs. the bcache device (SSD/NVMe) to ascertain. format-dict: - osds: hotsos.core.plugins.storage.ceph.CephCluster.ssds_using_bcache + osds: ceph.cluster.ssds_using_bcache diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-mon/unresponsive_mon_mgr.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-mon/unresponsive_mon_mgr.yaml index 8844c7cfb..6fd19c344 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-mon/unresponsive_mon_mgr.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-mon/unresponsive_mon_mgr.yaml @@ -2,26 +2,26 @@ checks: # In older sosreports, the plugin is called 'ceph' (= mon + mgr + osd) and # in the newer ones it's called 'ceph_mon'. is_sosreport: - property: hotsos.core.plugins.sosreport.SOSReportChecksBase.plugin_runnable + property: sosreport.plugin_runnable sosreport_hung_ceph_mon_old: property: - path: hotsos.core.plugins.sosreport.SOSReportChecksBase.timed_out_plugins + path: sosreport.timed_out_plugins ops: [[contains, ceph]] sosreport_hung_ceph_mon_new: property: - path: hotsos.core.plugins.sosreport.SOSReportChecksBase.timed_out_plugins + path: sosreport.timed_out_plugins ops: [[contains, ceph_mon]] sosreport_hung_ceph_mgr: property: - path: hotsos.core.plugins.sosreport.SOSReportChecksBase.timed_out_plugins + path: sosreport.timed_out_plugins ops: [[contains, ceph_mgr]] ceph_osd_df_tree: property: - path: hotsos.core.plugins.storage.ceph.CephCluster.osd_df_tree + path: ceph.cluster.osd_df_tree ops: [[eq, null]] ceph_pg_dump: property: - path: hotsos.core.plugins.storage.ceph.CephCluster.pg_dump + path: ceph.cluster.pg_dump ops: [[eq, null]] conclusions: ceph_mon_hung: diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-osd/bugs/lp1936136.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-osd/bugs/lp1936136.yaml index 3ad57b665..28f48aacf 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-osd/bugs/lp1936136.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-osd/bugs/lp1936136.yaml @@ -1,5 +1,5 @@ vars: - cache_available_percent: '@hotsos.core.plugins.storage.bcache.CachesetsInfo.cache_available_percent' + cache_available_percent: '@bcache.cachesetsinfo.cache_available_percent' checks: node_is_ceph_osd_and_has_version: # Get version of osd based on package installed. This is prone to @@ -16,10 +16,10 @@ checks: - min: 17.0.0 max: 17.0.0 node_has_osds_using_bcache: - property: hotsos.core.plugins.storage.ceph.CephChecksBase.local_osds_use_bcache + property: ceph.local_osds_use_bcache kernel_version_check: property: - path: hotsos.core.plugins.kernel.KernelBase.version + path: kernel.version ops: [[lt, '5.4']] bluefs_buffered_io_enabled: config: diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-osd/bugs/lp1959649.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-osd/bugs/lp1959649.yaml index 1d09dec80..853e1b09d 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-osd/bugs/lp1959649.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-osd/bugs/lp1959649.yaml @@ -1,8 +1,8 @@ vars: bluestore_volume_selection_policy: - '@hotsos.core.plugins.storage.ceph.CephDaemonAllOSDsFactory.bluestore_volume_selection_policy:CephDaemonConfigShow' + '@ceph.daemon.all-osds.bluestore_volume_selection_policy:CephDaemonConfigShow' bluestore_cache_onode: - '@hotsos.core.plugins.storage.ceph.CephDaemonAllOSDsFactory.bluestore_cache_onode:CephDaemonDumpMemPools' + '@ceph.daemon.all-osds.bluestore_cache_onode:CephDaemonDumpMemPools' checks: has_1959649: - apt: diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-osd/bugs/lp1996010.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-osd/bugs/lp1996010.yaml index f143f0d58..48956ea3f 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-osd/bugs/lp1996010.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-osd/bugs/lp1996010.yaml @@ -1,8 +1,8 @@ vars: bluestore_volume_selection_policy: - '@hotsos.core.plugins.storage.ceph.CephDaemonAllOSDsFactory.bluestore_volume_selection_policy:CephDaemonConfigShow' + '@ceph.daemon.all-osds.bluestore_volume_selection_policy:CephDaemonConfigShow' bluestore_cache_onode: - '@hotsos.core.plugins.storage.ceph.CephDaemonAllOSDsFactory.bluestore_cache_onode:CephDaemonDumpMemPools' + '@ceph.daemon.all-osds.bluestore_cache_onode:CephDaemonDumpMemPools' checks: has_1996010_osd_log: # NOTE: this needs quite a high debug level to appear - debug_bluestore=30/30 diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-osd/bugs/lp2016845.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-osd/bugs/lp2016845.yaml index 4bb3cf2c5..066a8c311 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-osd/bugs/lp2016845.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-osd/bugs/lp2016845.yaml @@ -7,7 +7,7 @@ checks: # packages have this issue in the past or future. - min: 12.2.0 linked_with_tcmalloc: - property: hotsos.core.plugins.storage.ceph.CephChecksBase.linked_with_tcmalloc + property: ceph.linked_with_tcmalloc conclusions: node_affected_by_bug_2016845: decision: diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-osd/filestore_to_bluestore_upgrade.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-osd/filestore_to_bluestore_upgrade.yaml index d818a33ec..05f1eebe2 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-osd/filestore_to_bluestore_upgrade.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-osd/filestore_to_bluestore_upgrade.yaml @@ -2,7 +2,7 @@ # This can happen e.g. after an upgrade from Filestore to Bluestore. checks: bluestore_enabled: - property: hotsos.core.plugins.storage.ceph.CephChecksBase.bluestore_enabled + property: ceph.bluestore_enabled ceph_config_has_journal: config: handler: hotsos.core.plugins.storage.ceph.CephConfig diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-osd/juju_ceph_no_bcache_tuning.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-osd/juju_ceph_no_bcache_tuning.yaml index 45adce302..4c835002a 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-osd/juju_ceph_no_bcache_tuning.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-osd/juju_ceph_no_bcache_tuning.yaml @@ -1,14 +1,14 @@ checks: juju_ceph_osd_charm_enabled: property: - path: hotsos.core.plugins.juju.resources.JujuBase.charm_names + path: juju.base.charm_names ops: [[contains, ceph-osd]] juju_bcache_tuning_charm_enabled: property: - path: hotsos.core.plugins.juju.resources.JujuBase.charm_names + path: juju.base.charm_names ops: [[contains, bcache-tuning]] local_osds_using_bcache: - property: hotsos.core.plugins.storage.ceph.CephChecksBase.local_osds_use_bcache + property: ceph.local_osds_use_bcache conclusions: charmed-ceph-osd-no-bcache-tuning: decision: diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-osd/pg_overdose.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-osd/pg_overdose.yaml index 64772da19..219577091 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-osd/pg_overdose.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-osd/pg_overdose.yaml @@ -1,7 +1,7 @@ checks: health_warning: property: - path: hotsos.core.plugins.storage.ceph.CephCluster.health_status + path: ceph.cluster.health_status ops: [[ne, HEALTH_OK]] ceph_osd_withhold_creation: input: diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-osd/ssd_osds_no_discard.yaml.disabled b/hotsos/defs/scenarios/storage/ceph/ceph-osd/ssd_osds_no_discard.yaml.disabled index 2344628ee..952ffc0ff 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-osd/ssd_osds_no_discard.yaml.disabled +++ b/hotsos/defs/scenarios/storage/ceph/ceph-osd/ssd_osds_no_discard.yaml.disabled @@ -1,7 +1,7 @@ checks: ssd_ceph_osds_exist: property: - path: hotsos.core.plugins.storage.ceph.CephChecksBase.local_osds_devtypes + path: ceph.local_osds_devtypes ops: [[contains, ssd]] ceph_discard_not_enabled: config: diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-osd/system_cpufreq_mode.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-osd/system_cpufreq_mode.yaml index 7a51babb1..a8e468f27 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-osd/system_cpufreq_mode.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-osd/system_cpufreq_mode.yaml @@ -9,7 +9,7 @@ vars: message_ondemand: >- You will also need to stop and disable the ondemand systemd service in order for changes to persist. - scaling_governor: '@hotsos.core.plugins.kernel.sysfs.CPU.cpufreq_scaling_governor_all' + scaling_governor: '@kernel.sysfs.cpu.cpufreq_scaling_governor_all' checks: cpufreq_governor_not_performance: # can we actually see the setting diff --git a/hotsos/defs/scenarios/storage/ceph/ceph-rgw/bugs/lp1974138.yaml b/hotsos/defs/scenarios/storage/ceph/ceph-rgw/bugs/lp1974138.yaml index 575a3655e..ecaa81036 100644 --- a/hotsos/defs/scenarios/storage/ceph/ceph-rgw/bugs/lp1974138.yaml +++ b/hotsos/defs/scenarios/storage/ceph/ceph-rgw/bugs/lp1974138.yaml @@ -4,9 +4,9 @@ checks: librgw2: - min: 14.2.0 ssl_enabled: - property: hotsos.core.plugins.openstack.OpenstackBase.ssl_enabled + property: openstack.ssl_enabled apache2_allow_encoded_slashes_on: - property: hotsos.core.plugins.openstack.OpenstackBase.apache2_allow_encoded_slashes_on + property: openstack.apache2_allow_encoded_slashes_on conclusions: lp1974138: decision: diff --git a/hotsos/defs/scenarios/storage/ceph/common/ceph_charm_conflicts.yaml b/hotsos/defs/scenarios/storage/ceph/common/ceph_charm_conflicts.yaml index d20050ad2..b0ed5974d 100644 --- a/hotsos/defs/scenarios/storage/ceph/common/ceph_charm_conflicts.yaml +++ b/hotsos/defs/scenarios/storage/ceph/common/ceph_charm_conflicts.yaml @@ -1,5 +1,5 @@ vars: - local_charms: '@hotsos.core.plugins.juju.JujuChecksBase.charms' + local_charms: '@juju.charms' checks: ceph_osd_has_conflicts: varops: [[$local_charms], [contains, ceph-osd]] diff --git a/hotsos/defs/scenarios/storage/storage.yaml b/hotsos/defs/scenarios/storage/storage.yaml index dd8abac8e..e4c47d1cf 100644 --- a/hotsos/defs/scenarios/storage/storage.yaml +++ b/hotsos/defs/scenarios/storage/storage.yaml @@ -2,5 +2,5 @@ # directory including subdirectories. requires: or: - - property: hotsos.core.plugins.storage.ceph.CephChecksBase.plugin_runnable + - property: ceph.plugin_runnable - property: hotsos.core.plugins.storage.bcache.BcacheChecksBase.plugin_runnable diff --git a/hotsos/defs/scenarios/system/sssd-ad-tokengroups.yaml b/hotsos/defs/scenarios/system/sssd-ad-tokengroups.yaml index 415e3174f..dba7c37fd 100644 --- a/hotsos/defs/scenarios/system/sssd-ad-tokengroups.yaml +++ b/hotsos/defs/scenarios/system/sssd-ad-tokengroups.yaml @@ -1,5 +1,5 @@ vars: - ad_domains_with_tokengroups_enabled: '@hotsos.core.plugins.system.system.SSSD.tokengroups_enabled_domains' + ad_domains_with_tokengroups_enabled: '@sssd.tokengroups_enabled_domains' checks: any_tokengroups_enabled_domains: diff --git a/hotsos/defs/scenarios/system/unattended_upgrades.yaml b/hotsos/defs/scenarios/system/unattended_upgrades.yaml index ea2e5bf78..c9cee0361 100644 --- a/hotsos/defs/scenarios/system/unattended_upgrades.yaml +++ b/hotsos/defs/scenarios/system/unattended_upgrades.yaml @@ -1,6 +1,6 @@ checks: is_enabled: - property: hotsos.core.plugins.system.system.SystemBase.unattended_upgrades_enabled + property: system.unattended_upgrades_enabled conclusions: unattended-upgrades-enabled: decision: is_enabled