Skip to content

Commit

Permalink
Reorganise structure of ceph plugin code (#946)
Browse files Browse the repository at this point in the history
Creates the following structure:

  ceph/{common,daemon,cluster} where:

  common: code that applies to all/any
  cluster: cluster checks and helpers
  daemon: daemon checks and helpers
  • Loading branch information
dosaboy authored Jul 16, 2024
1 parent 55b8f7f commit e3cfa1e
Show file tree
Hide file tree
Showing 22 changed files with 78 additions and 78 deletions.
8 changes: 8 additions & 0 deletions hotsos/core/plugins/storage/ceph/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
from .common import CephChecks, CephConfig, CephDaemonAllOSDsFactory
from .cluster import CephCluster, CephCrushMap

__all__ = [CephChecks.__name__,
CephConfig.__name__,
CephDaemonAllOSDsFactory.__name__,
CephCluster.__name__,
CephCrushMap.__name__]
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
SequenceSearchDef,
SearchDef
)
from hotsos.core.plugins.storage.ceph_base import (
from hotsos.core.plugins.storage.ceph.daemon import (
CephMon,
CephOSD,
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,8 @@
from hotsos.core.plugins.kernel.net import Lsof
from hotsos.core.plugins.storage import StorageBase
from hotsos.core.plugins.storage.bcache import BcacheBase
from hotsos.core.plugins.storage.ceph_base import (
CephOSD,
)
from hotsos.core.plugins.storage.ceph_cluster import CephCluster
from hotsos.core.plugins.storage.ceph.daemon import CephOSD
from hotsos.core.plugins.storage.ceph.cluster import CephCluster
from hotsos.core.search import (
FileSearcher,
SequenceSearchDef,
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
vars:
autoscaler_enabled_pools: '@hotsos.core.plugins.storage.ceph_cluster.CephCrushMap.autoscaler_enabled_pools'
autoscaler_enabled_pools: '@hotsos.core.plugins.storage.ceph.CephCrushMap.autoscaler_enabled_pools'
msg_main: >-
This Ceph cluster is vulnerable to a bug in which OSDs can consume
considerable amounts of memory and eventually be OOM killed due to
Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
checks:
crushmap_has_mixed_type_buckets:
property:
path: hotsos.core.plugins.storage.ceph_cluster.CephCrushMap.crushmap_mixed_buckets
path: hotsos.core.plugins.storage.ceph.CephCrushMap.crushmap_mixed_buckets
ops: [[length_hint], [gt, 0]]
crushmap_has_unequal_buckets:
property:
path: hotsos.core.plugins.storage.ceph_cluster.CephCrushMap.crushmap_equal_buckets
path: hotsos.core.plugins.storage.ceph.CephCrushMap.crushmap_equal_buckets
ops: [[length_hint], [gt, 0]]
conclusions:
crushmap-mixed-buckets:
Expand All @@ -16,7 +16,7 @@ conclusions:
Mixed crush bucket types identified in buckets '{buckets}'. This can
cause data distribution to become skewed - please check crush map.
format-dict:
buckets: hotsos.core.plugins.storage.ceph_cluster.CephCrushMap.crushmap_mixed_buckets_str
buckets: hotsos.core.plugins.storage.ceph.CephCrushMap.crushmap_mixed_buckets_str
crushmap-unbalanced-buckets:
decision: crushmap_has_unequal_buckets
raises:
Expand All @@ -28,4 +28,4 @@ conclusions:
Transient issues such as "out" OSDs, or cluster expansion/maintenance can trigger this warning.
Affected CRUSH tree(s) and bucket types are {affected}.
format-dict:
affected: hotsos.core.plugins.storage.ceph_cluster.CephCrushMap.crushmap_equal_buckets_pretty
affected: hotsos.core.plugins.storage.ceph.CephCrushMap.crushmap_equal_buckets_pretty
14 changes: 7 additions & 7 deletions hotsos/defs/scenarios/storage/ceph/ceph-mon/pg_imbalance.yaml
Original file line number Diff line number Diff line change
@@ -1,18 +1,18 @@
checks:
cluster_has_osds_with_pgs_above_max:
property:
path: hotsos.core.plugins.storage.ceph_cluster.CephCluster.osds_pgs_above_max
path: hotsos.core.plugins.storage.ceph.CephCluster.osds_pgs_above_max
ops: [[length_hint], [gt, 0]]
cluster_has_osds_with_suboptimal_pgs:
property:
path: hotsos.core.plugins.storage.ceph_cluster.CephCluster.osds_pgs_suboptimal
path: hotsos.core.plugins.storage.ceph.CephCluster.osds_pgs_suboptimal
ops: [[length_hint], [gt, 0]]
cluster_has_non_empty_pools:
property:
path: hotsos.core.plugins.storage.ceph_cluster.CephCluster.cluster_has_non_empty_pools
path: hotsos.core.plugins.storage.ceph.CephCluster.cluster_has_non_empty_pools
autoscaler_disabled_for_any_pool:
property:
path: hotsos.core.plugins.storage.ceph_cluster.CephCrushMap.autoscaler_disabled_pools
path: hotsos.core.plugins.storage.ceph.CephCrushMap.autoscaler_disabled_pools
ops: [[length_hint]]
conclusions:
cluster-osds-with-pgs-above-max:
Expand All @@ -24,7 +24,7 @@ conclusions:
limit at which point they will stop creating pgs and fail - please
investigate.
format-dict:
limit: hotsos.core.plugins.storage.ceph_cluster.CephCluster.OSD_PG_MAX_LIMIT
limit: hotsos.core.plugins.storage.ceph.CephCluster.OSD_PG_MAX_LIMIT
cluster-osds-with-suboptimal-pgs:
decision:
- cluster_has_osds_with_suboptimal_pgs
Expand All @@ -37,5 +37,5 @@ conclusions:
of {min}-{max} pgs. This could indicate poor data distribution across the
cluster and result in performance degradation.
format-dict:
min: hotsos.core.plugins.storage.ceph_cluster.CephCluster.OSD_PG_OPTIMAL_NUM_MIN
max: hotsos.core.plugins.storage.ceph_cluster.CephCluster.OSD_PG_OPTIMAL_NUM_MAX
min: hotsos.core.plugins.storage.ceph.CephCluster.OSD_PG_OPTIMAL_NUM_MIN
max: hotsos.core.plugins.storage.ceph.CephCluster.OSD_PG_OPTIMAL_NUM_MAX
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ checks:
ops: [[contains, civetweb]]
is_rgw_using_civetweb:
property:
path: hotsos.core.plugins.storage.ceph_cluster.CephCrushMap.is_rgw_using_civetweb
path: hotsos.core.plugins.storage.ceph.CephCrushMap.is_rgw_using_civetweb
conclusions:
rgw_outdated_frontend:
decision:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ data-root:
- sos_commands/systemd/systemctl_list-unit-files
mock:
patch:
hotsos.core.plugins.storage.ceph_cluster.CephCrushMap.ceph_report:
hotsos.core.plugins.storage.ceph.CephCrushMap.ceph_report:
kwargs:
new:
osdmap:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ data-root:
- sos_commands/systemd/systemctl_list-unit-files
mock:
patch:
hotsos.core.plugins.storage.ceph_cluster.CephCrushMap.ceph_report:
hotsos.core.plugins.storage.ceph.CephCrushMap.ceph_report:
kwargs:
new:
osdmap:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,10 +87,10 @@ data-root:
- sos_commands/ceph_mon/ceph_report
mock:
patch:
hotsos.core.plugins.storage.ceph_cluster.CephCrushMap.autoscaler_disabled_pools:
hotsos.core.plugins.storage.ceph.CephCrushMap.autoscaler_disabled_pools:
kwargs:
new: true
hotsos.core.plugins.storage.ceph_cluster.CephCluster.cluster_has_non_empty_pools:
hotsos.core.plugins.storage.ceph.CephCluster.cluster_has_non_empty_pools:
kwargs:
new: true
raised-issues:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -963,10 +963,10 @@ data-root:
- sos_commands/ceph_mon/ceph_report
mock:
patch:
hotsos.core.plugins.storage.ceph_cluster.CephCrushMap.autoscaler_disabled_pools:
hotsos.core.plugins.storage.ceph.CephCrushMap.autoscaler_disabled_pools:
kwargs:
new: true
hotsos.core.plugins.storage.ceph_cluster.CephCluster.cluster_has_non_empty_pools:
hotsos.core.plugins.storage.ceph.CephCluster.cluster_has_non_empty_pools:
kwargs:
new: true
raised-issues:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ data-root:
- sos_commands/systemd/systemctl_list-unit-files
mock:
patch:
hotsos.core.plugins.storage.ceph_cluster.CephCluster.require_osd_release:
hotsos.core.plugins.storage.ceph.CephCluster.require_osd_release:
kwargs:
new: octopus
raised-issues:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ data-root:
- sos_commands/systemd/systemctl_list-unit-files
mock:
patch:
hotsos.core.plugins.storage.ceph_cluster.CephCrushMap.ceph_report:
hotsos.core.plugins.storage.ceph.CephCrushMap.ceph_report:
kwargs:
new:
osdmap:
Expand All @@ -19,7 +19,7 @@ mock:
- pg_autoscale_mode: 'off'
- pool: 2
- pg_autoscale_mode: 'on'
hotsos.core.plugins.storage.ceph_cluster.CephCluster.cluster_has_non_empty_pools:
hotsos.core.plugins.storage.ceph.CephCluster.cluster_has_non_empty_pools:
kwargs:
new: true
raised-issues:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ data-root:
- sos_commands/systemd/systemctl_list-unit-files
mock:
patch:
hotsos.core.plugins.storage.ceph_cluster.CephCluster.health_status:
hotsos.core.plugins.storage.ceph.CephCluster.health_status:
kwargs:
new: HEALTH_WARN
raised-issues:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ data-root:
- sos_commands/systemd/systemctl_list-unit-files
mock:
patch:
hotsos.core.plugins.storage.ceph_cluster.CephCluster.require_osd_release:
hotsos.core.plugins.storage.ceph.CephCluster.require_osd_release:
kwargs:
new: octopus
raised-issues:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ mock:
hotsos.core.plugins.sosreport.SOSReportChecks.timed_out_plugins:
kwargs:
new: ['ceph_mon']
hotsos.core.plugins.storage.ceph_cluster.CephCluster.osd_df_tree:
hotsos.core.plugins.storage.ceph.CephCluster.osd_df_tree:
kwargs:
new: null
hotsos.core.plugins.sosreport.SOSReportChecks.plugin_runnable:
Expand Down
2 changes: 1 addition & 1 deletion hotsos/plugin_extensions/storage/ceph_event_checks.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import re

from hotsos.core.issues import IssuesManager, CephOSDError
from hotsos.core.plugins.storage.ceph import (
from hotsos.core.plugins.storage.ceph.common import (
CephChecks,
CephEventCallbackBase,
)
Expand Down
2 changes: 1 addition & 1 deletion hotsos/plugin_extensions/storage/ceph_summary.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from hotsos.core.plugins.storage.ceph import CephChecks
from hotsos.core.plugins.storage.ceph.common import CephChecks
from hotsos.core.utils import sorted_dict
from hotsos.core.plugintools import summary_entry

Expand Down
6 changes: 3 additions & 3 deletions tests/unit/storage/test_ceph_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,13 @@ def setUp(self):
class TestCephPluginDeps(CephCommonTestsBase):
""" Unit tests for ceph plugin deps. """
def test_ceph_dep_dpkg(self):
self.assertTrue(ceph.CephChecks().plugin_runnable)
self.assertTrue(ceph.common.CephChecks().plugin_runnable)

@utils.create_data_root({'sos_commands/snap/snap_list_--all':
SNAP_LIST_MICROCEPH})
def test_ceph_dep_snap(self):
self.assertTrue(ceph.CephChecks().plugin_runnable)
self.assertEqual(ceph.CephChecks().release_name, 'reef')
self.assertTrue(ceph.common.CephChecks().plugin_runnable)
self.assertEqual(ceph.common.CephChecks().release_name, 'reef')


@utils.load_templated_tests('scenarios/storage/ceph/common')
Expand Down
47 changes: 22 additions & 25 deletions tests/unit/storage/test_ceph_mon.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,7 @@
from unittest import mock

from hotsos.core.config import HotSOSConfig
from hotsos.core.plugins.storage import (
ceph_base,
ceph_cluster,
)
from hotsos.core.plugins.storage import ceph
from hotsos.core.ycheck.common import GlobalSearcher
from hotsos.plugin_extensions.storage import ceph_summary, ceph_event_checks

Expand Down Expand Up @@ -181,49 +178,49 @@ def setup_fake_cli_osds_imbalanced_pgs(mock_cli_helper):
class TestCoreCephCluster(CephMonTestsBase):
""" Unit tests for ceph cluster code. """
def test_cluster_mons(self):
cluster_mons = ceph_cluster.CephCluster().mons
self.assertEqual([ceph_base.CephMon],
cluster_mons = ceph.CephCluster().mons
self.assertEqual([ceph.daemon.CephMon],
list(set(type(obj) for obj in cluster_mons)))

def test_cluster_osds(self):
cluster_osds = ceph_cluster.CephCluster().osds
self.assertEqual([ceph_base.CephOSD],
cluster_osds = ceph.CephCluster().osds
self.assertEqual([ceph.daemon.CephOSD],
list(set(type(obj) for obj in cluster_osds)))

def test_health_status(self):
health = ceph_cluster.CephCluster().health_status
health = ceph.CephCluster().health_status
self.assertEqual(health, 'HEALTH_WARN')

def test_osd_versions(self):
versions = ceph_cluster.CephCluster().daemon_versions('osd')
versions = ceph.CephCluster().daemon_versions('osd')
self.assertEqual(versions, {'15.2.14': 3})

def test_mon_versions(self):
versions = ceph_cluster.CephCluster().daemon_versions('mon')
versions = ceph.CephCluster().daemon_versions('mon')
self.assertEqual(versions, {'15.2.14': 3})

def test_mds_versions(self):
versions = ceph_cluster.CephCluster().daemon_versions('mds')
versions = ceph.CephCluster().daemon_versions('mds')
self.assertEqual(versions, {})

def test_rgw_versions(self):
versions = ceph_cluster.CephCluster().daemon_versions('rgw')
versions = ceph.CephCluster().daemon_versions('rgw')
self.assertEqual(versions, {})

def test_osd_release_name(self):
release_names = ceph_cluster.CephCluster().daemon_release_names('osd')
release_names = ceph.CephCluster().daemon_release_names('osd')
self.assertEqual(release_names, {'octopus': 3})

def test_mon_release_name(self):
release_names = ceph_cluster.CephCluster().daemon_release_names('mon')
release_names = ceph.CephCluster().daemon_release_names('mon')
self.assertEqual(release_names, {'octopus': 3})

def test_cluster_osd_ids(self):
cluster = ceph_cluster.CephCluster()
cluster = ceph.CephCluster()
self.assertEqual([osd.id for osd in cluster.osds], [0, 1, 2])

def test_crush_rules(self):
cluster = ceph_cluster.CephCluster()
cluster = ceph.CephCluster()
expected = {'replicated_rule': {'id': 0, 'type': 'replicated',
'pools': ['device_health_metrics (1)', 'glance (2)',
'cinder-ceph (3)', 'nova (4)']}}
Expand All @@ -233,7 +230,7 @@ def test_ceph_daemon_versions_unique(self):
result = {'mgr': ['15.2.14'],
'mon': ['15.2.14'],
'osd': ['15.2.14']}
cluster = ceph_cluster.CephCluster()
cluster = ceph.CephCluster()
self.assertEqual(cluster.ceph_daemon_versions_unique(), result)
self.assertTrue(cluster.ceph_versions_aligned)
self.assertTrue(cluster.mon_versions_aligned_with_cluster)
Expand All @@ -246,30 +243,30 @@ def test_ceph_daemon_versions_unique_not(self):
'15.2.11'],
'osd': ['15.2.11',
'15.2.13']}
cluster = ceph_cluster.CephCluster()
cluster = ceph.CephCluster()
self.assertEqual(cluster.ceph_daemon_versions_unique(), result)
self.assertFalse(cluster.ceph_versions_aligned)
self.assertFalse(cluster.mon_versions_aligned_with_cluster)

def test_crushmap_equal_buckets(self):
cluster = ceph_cluster.CephCluster()
cluster = ceph.CephCluster()
buckets = cluster.crush_map.crushmap_equal_buckets
self.assertEqual(buckets, [])

@utils.create_data_root({'sos_commands/ceph_mon/ceph_osd_crush_dump':
CEPH_OSD_CRUSH_DUMP})
def test_crushmap_mixed_buckets(self):
cluster = ceph_cluster.CephCluster()
cluster = ceph.CephCluster()
buckets = cluster.crush_map.crushmap_mixed_buckets
self.assertEqual(buckets, ['default'])

def test_crushmap_no_mixed_buckets(self):
cluster = ceph_cluster.CephCluster()
cluster = ceph.CephCluster()
buckets = cluster.crush_map.crushmap_mixed_buckets
self.assertEqual(buckets, [])

def test_mgr_modules(self):
cluster = ceph_cluster.CephCluster()
cluster = ceph.CephCluster()
expected = ['balancer',
'crash',
'devicehealth',
Expand Down Expand Up @@ -346,7 +343,7 @@ def test_cluster_info(self):
self.assertEqual(actual['versions'], expected['versions'])

@mock.patch(
'hotsos.core.plugins.storage.ceph_cluster.CephCluster.pool_id_to_name',
'hotsos.core.plugins.storage.ceph.CephCluster.pool_id_to_name',
lambda *args: 'foo')
@utils.create_data_root({'sos_commands/ceph_mon/json_output/'
'ceph_pg_dump_--format_json-pretty':
Expand All @@ -360,7 +357,7 @@ def test_cluster_info_large_omap_pgs(self):
actual = self.part_output_to_actual(inst.output)
self.assertEqual(actual['large-omap-pgs'], expected)

@mock.patch.object(ceph_cluster, 'CLIHelper')
@mock.patch.object(ceph.cluster, 'CLIHelper')
def test_ceph_pg_imbalance(self, mock_helper):
result = self.setup_fake_cli_osds_imbalanced_pgs(mock_helper)
inst = ceph_summary.CephSummary()
Expand Down
Loading

0 comments on commit e3cfa1e

Please sign in to comment.