Skip to content

Commit

Permalink
Replace Magic Strings with Literals
Browse files Browse the repository at this point in the history
  • Loading branch information
mateoflorido committed Dec 4, 2024
1 parent 5d6af4b commit 7b8af28
Show file tree
Hide file tree
Showing 5 changed files with 78 additions and 42 deletions.
67 changes: 37 additions & 30 deletions charms/worker/k8s/src/charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,22 @@
from events import update_status
from inspector import ClusterInspector
from kube_control import configure as configure_kube_control
from literals import DEPENDENCIES
from literals import (
CLUSTER_RELATION,
CONTAINERD_RELATION,
COS_RELATION,
COS_TOKENS_RELATION,
COS_TOKENS_WORKER_RELATION,
DEPENDENCIES,
ETC_KUBERNETES,
ETCD_RELATION,
K8SD_PORT,
K8SD_SNAP_SOCKET,
KUBECONFIG,
KUBECTL_PATH,
SUPPORTED_DATASTORES,
CLUSTER_WORKER_RELATION,
)
from ops.interface_kube_control import KubeControlProvides
from snap import management as snap_management
from snap import version as snap_version
Expand All @@ -77,14 +92,6 @@
# Log messages can be retrieved using juju debug-log
log = logging.getLogger(__name__)

VALID_LOG_LEVELS = ["info", "debug", "warning", "error", "critical"]
K8SD_SNAP_SOCKET = "/var/snap/k8s/common/var/lib/k8sd/state/control.socket"
KUBECONFIG = Path.home() / ".kube/config"
ETC_KUBERNETES = Path("/etc/kubernetes")
KUBECTL_PATH = Path("/snap/k8s/current/bin/kubectl")
K8SD_PORT = 6400
SUPPORTED_DATASTORES = ["dqlite", "etcd"]


def _get_public_address() -> str:
"""Get public address from juju.
Expand Down Expand Up @@ -208,7 +215,7 @@ def _apply_cos_requirements(self):
Integration by applying the manifests for COS Cluster Roles and
kube-state-metrics (K-S-M).
"""
if not self.model.get_relation("cos-agent"):
if not self.model.get_relation(COS_RELATION):
return

log.info("Apply COS Integrations")
Expand Down Expand Up @@ -242,7 +249,7 @@ def get_worker_versions(self) -> Dict[str, List[ops.Unit]]:
Returns:
Dict[str, List[ops.Unit]]: A dictionary of versions and the units that have them.
"""
if not (relation := self.model.get_relation("k8s-cluster")):
if not (relation := self.model.get_relation(CLUSTER_WORKER_RELATION)):
return {}

versions = defaultdict(list)
Expand Down Expand Up @@ -293,7 +300,7 @@ def get_cluster_name(self) -> str:
if self._stored.cluster_name == "":
if self.lead_control_plane and self.api_manager.is_cluster_bootstrapped():
self._stored.cluster_name = self._generate_unique_cluster_name()
elif not (relation := self.model.get_relation("cluster")):
elif not (relation := self.model.get_relation(CLUSTER_RELATION)):
pass
elif any(
[
Expand Down Expand Up @@ -402,7 +409,7 @@ def _bootstrap_k8s_snap(self):
def _config_containerd_registries(self):
"""Apply containerd custom registries."""
registries, config = [], ""
containerd_relation = self.model.get_relation("containerd")
containerd_relation = self.model.get_relation(CONTAINERD_RELATION)
if self.is_control_plane:
config = str(self.config["containerd-custom-registries"])
registries = containerd.parse_registries(config)
Expand All @@ -415,12 +422,12 @@ def _config_containerd_registries(self):

def _configure_cos_integration(self):
"""Retrieve the join token from secret databag and join the cluster."""
if not self.model.get_relation("cos-agent"):
if not self.model.get_relation(COS_RELATION):
return

status.add(ops.MaintenanceStatus("Updating COS integrations"))
log.info("Updating COS integration")
if relation := self.model.get_relation("cos-tokens"):
if relation := self.model.get_relation(COS_TOKENS_RELATION):
self.collector.request(relation)

def _get_valid_annotations(self) -> Optional[dict]:
Expand Down Expand Up @@ -540,7 +547,7 @@ def _configure_datastore(self, config: Union[BootstrapConfig, UpdateClusterConfi

if datastore == "etcd":
log.info("Using etcd as external datastore")
etcd_relation = self.model.get_relation("etcd")
etcd_relation = self.model.get_relation(ETCD_RELATION)

if not etcd_relation:
raise ReconcilerError("Missing etcd relation")
Expand Down Expand Up @@ -585,15 +592,15 @@ def _revoke_cluster_tokens(self, event: ops.EventBase):
elif unit := _cluster_departing_unit(event):
to_remove = unit

if peer := self.model.get_relation("cluster"):
if peer := self.model.get_relation(CLUSTER_RELATION):
self.distributor.revoke_tokens(
relation=peer,
token_strategy=TokenStrategy.CLUSTER,
token_type=ClusterTokenType.CONTROL_PLANE,
to_remove=to_remove,
)

if workers := self.model.get_relation("k8s-cluster"):
if workers := self.model.get_relation(CLUSTER_WORKER_RELATION):
self.distributor.revoke_tokens(
relation=workers,
token_strategy=TokenStrategy.CLUSTER,
Expand All @@ -604,14 +611,14 @@ def _revoke_cluster_tokens(self, event: ops.EventBase):
def _create_cluster_tokens(self):
"""Create tokens for the units in the cluster and k8s-cluster relations."""
log.info("Prepare clustering")
if peer := self.model.get_relation("cluster"):
if peer := self.model.get_relation(CLUSTER_RELATION):
self.distributor.allocate_tokens(
relation=peer,
token_strategy=TokenStrategy.CLUSTER,
token_type=ClusterTokenType.CONTROL_PLANE,
)

if workers := self.model.get_relation("k8s-cluster"):
if workers := self.model.get_relation(CLUSTER_WORKER_RELATION):
self.distributor.allocate_tokens(
relation=workers,
token_strategy=TokenStrategy.CLUSTER,
Expand All @@ -624,18 +631,18 @@ def _create_cos_tokens(self):
This method creates COS tokens and distributes them to peers and workers
if relations exist.
"""
if not self.model.get_relation("cos-agent"):
if not self.model.get_relation(COS_RELATION):
return

log.info("Prepare cos tokens")
if rel := self.model.get_relation("cos-tokens"):
if rel := self.model.get_relation(COS_TOKENS_RELATION):
self.distributor.allocate_tokens(
relation=rel,
token_strategy=TokenStrategy.COS,
token_type=ClusterTokenType.CONTROL_PLANE,
)

if rel := self.model.get_relation("cos-worker-tokens"):
if rel := self.model.get_relation(COS_TOKENS_WORKER_RELATION):
self.distributor.allocate_tokens(
relation=rel,
token_strategy=TokenStrategy.COS,
Expand Down Expand Up @@ -672,7 +679,7 @@ def _get_scrape_jobs(self):
Returns an empty list if the token cannot be retrieved or if the
"cos-tokens" relation does not exist.
"""
relation = self.model.get_relation("cos-tokens")
relation = self.model.get_relation(COS_TOKENS_RELATION)
if not relation:
log.warning("No cos-tokens available")
return []
Expand All @@ -693,7 +700,7 @@ def _update_kubernetes_version(self):
Raises:
ReconcilerError: If the cluster integration is missing.
"""
relation = self.model.get_relation("cluster")
relation = self.model.get_relation(CLUSTER_RELATION)
if not relation:
status.add(ops.BlockedStatus("Missing cluster integration"))
raise ReconcilerError("Missing cluster integration")
Expand All @@ -715,8 +722,8 @@ def _announce_kubernetes_version(self):
if not local_version:
raise ReconcilerError("k8s-snap is not installed")

peer = self.model.get_relation("cluster")
worker = self.model.get_relation("k8s-cluster")
peer = self.model.get_relation(CLUSTER_RELATION)
worker = self.model.get_relation(CLUSTER_WORKER_RELATION)

for relation in (peer, worker):
if not relation:
Expand Down Expand Up @@ -765,7 +772,7 @@ def _join_cluster(self, event: ops.EventBase):
Args:
event (ops.EventBase): event triggering the join
"""
if not (relation := self.model.get_relation("cluster")):
if not (relation := self.model.get_relation(CLUSTER_RELATION)):
status.add(ops.BlockedStatus("Missing cluster integration"))
raise ReconcilerError("Missing cluster integration")

Expand Down Expand Up @@ -819,7 +826,7 @@ def _death_handler(self, event: ops.EventBase):
self.update_status.run()
self._last_gasp()

relation = self.model.get_relation("cluster")
relation = self.model.get_relation(CLUSTER_RELATION)
local_cluster = self.get_cluster_name()
remote_cluster = self.collector.cluster_name(relation, False) if relation else ""
if local_cluster and local_cluster != remote_cluster:
Expand Down Expand Up @@ -887,7 +894,7 @@ def _evaluate_removal(self, event: ops.EventBase) -> bool:
elif (
self.is_worker
and self.get_cluster_name()
and (relation := self.model.get_relation("cluster"))
and (relation := self.model.get_relation(CLUSTER_RELATION))
and not relation.units
):
# If a worker unit has been clustered,
Expand Down
27 changes: 27 additions & 0 deletions charms/worker/k8s/src/literals.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,32 @@

"""Literals for the charm."""

from pathlib import Path

# Snap
SNAP_NAME = "k8s"

# Logging
VALID_LOG_LEVELS = ["info", "debug", "warning", "error", "critical"]

# Charm
ETC_KUBERNETES = Path("/etc/kubernetes")
KUBECONFIG = Path.home() / ".kube/config"
KUBECTL_PATH = Path("/snap/k8s/current/bin/kubectl")
K8SD_SNAP_SOCKET = "/var/snap/k8s/common/var/lib/k8sd/state/control.socket"
K8SD_PORT = 6400
SUPPORTED_DATASTORES = ["dqlite", "etcd"]

# Relations
CLUSTER_RELATION = "cluster"
CLUSTER_WORKER_RELATION = "k8s-cluster"
CONTAINERD_RELATION = "containerd"
COS_TOKENS_RELATION = "cos-tokens"
COS_TOKENS_WORKER_RELATION = "cos-worker-tokens"
COS_RELATION = "cos-agent"
ETCD_RELATION = "etcd"

# Kubernetes services
K8S_COMMON_SERVICES = [
"kubelet",
"kube-proxy",
Expand All @@ -18,12 +42,15 @@
K8S_DQLITE_SERVICE,
"kube-controller-manager",
"kube-scheduler",
*K8S_COMMON_SERVICES,
]

K8S_WORKER_SERVICES = [
"k8s-apiserver-proxy",
*K8S_COMMON_SERVICES,
]

# Upgrade
DEPENDENCIES = {
# NOTE: Update the dependencies for the k8s-charm before releasing.
"k8s_charm": {
Expand Down
10 changes: 5 additions & 5 deletions charms/worker/k8s/src/upgrade.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from charms.operator_libs_linux.v2.snap import SnapError
from inspector import ClusterInspector
from literals import (
K8S_COMMON_SERVICES,
CLUSTER_RELATION,
K8S_CONTROL_PLANE_SERVICES,
K8S_DQLITE_SERVICE,
K8S_WORKER_SERVICES,
Expand Down Expand Up @@ -199,12 +199,12 @@ def _upgrade(self, event: Union[ops.EventBase, ops.HookEvent]) -> None:
self.charm.grant_upgrade()

services = (
K8S_CONTROL_PLANE_SERVICES + K8S_COMMON_SERVICES
K8S_CONTROL_PLANE_SERVICES
if self.charm.is_control_plane
else K8S_COMMON_SERVICES + K8S_WORKER_SERVICES
else K8S_WORKER_SERVICES
)

if K8S_DQLITE_SERVICE in services and self.charm.datastore == "dqlite":
if K8S_DQLITE_SERVICE in services and self.charm.datastore != "dqlite":
services.remove(K8S_DQLITE_SERVICE)

try:
Expand All @@ -227,7 +227,7 @@ def build_upgrade_stack(self) -> List[int]:
Returns:
A list of unit numbers to upgrade in order.
"""
relation = self.charm.model.get_relation("cluster")
relation = self.charm.model.get_relation(CLUSTER_RELATION)
if not relation:
return [int(self.charm.unit.name.split("/")[-1])]

Expand Down
11 changes: 6 additions & 5 deletions charms/worker/k8s/tests/unit/test_token_distributor.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
import pytest
import token_distributor
from charm import K8sCharm
from literals import CLUSTER_RELATION


@pytest.fixture(params=["worker", "control-plane"])
Expand All @@ -37,7 +38,7 @@ def test_request(harness):
harness.disable_hooks()
collector = token_distributor.TokenCollector(harness.charm, "my-node")
relation_id = harness.add_relation("cluster", "remote")
collector.request(harness.charm.model.get_relation("cluster"))
collector.request(harness.charm.model.get_relation(CLUSTER_RELATION))
data = harness.get_relation_data(relation_id, harness.charm.unit.name)
assert data["node-name"] == "my-node"

Expand All @@ -47,8 +48,8 @@ def test_cluster_name_not_joined(harness):
harness.disable_hooks()
collector = token_distributor.TokenCollector(harness.charm, "my-node")
relation_id = harness.add_relation("cluster", "remote")
remote = collector.cluster_name(harness.charm.model.get_relation("cluster"), False)
local = collector.cluster_name(harness.charm.model.get_relation("cluster"), True)
remote = collector.cluster_name(harness.charm.model.get_relation(CLUSTER_RELATION), False)
local = collector.cluster_name(harness.charm.model.get_relation(CLUSTER_RELATION), True)
assert remote == local == ""
data = harness.get_relation_data(relation_id, harness.charm.unit.name)
assert not data.get("joined")
Expand All @@ -60,13 +61,13 @@ def test_cluster_name_joined(harness):
collector = token_distributor.TokenCollector(harness.charm, "my-node")
relation_id = harness.add_relation("cluster", "k8s", unit_data={"cluster-name": "my-cluster"})
# Fetching the remote doesn't update joined field
remote = collector.cluster_name(harness.charm.model.get_relation("cluster"), False)
remote = collector.cluster_name(harness.charm.model.get_relation(CLUSTER_RELATION), False)
assert remote == "my-cluster"
data = harness.get_relation_data(relation_id, harness.charm.unit.name)
assert not data.get("joined")

# Fetching the local does update joined field
local = collector.cluster_name(harness.charm.model.get_relation("cluster"), True)
local = collector.cluster_name(harness.charm.model.get_relation(CLUSTER_RELATION), True)
assert remote == local == "my-cluster"
data = harness.get_relation_data(relation_id, harness.charm.unit.name)
assert data["joined"] == "my-cluster"
5 changes: 3 additions & 2 deletions charms/worker/k8s/tests/unit/test_upgrade.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from inspector import ClusterInspector
from lightkube.models.core_v1 import Node
from lightkube.models.meta_v1 import ObjectMeta
from literals import CLUSTER_RELATION
from upgrade import K8sDependenciesModel, K8sUpgrade


Expand Down Expand Up @@ -103,7 +104,7 @@ def test_build_upgrade_stack_no_relation(self):
result = self.upgrade.build_upgrade_stack()

self.assertEqual(result, [0])
self.charm.model.get_relation.assert_called_once_with("cluster")
self.charm.model.get_relation.assert_called_once_with(CLUSTER_RELATION)

def test_build_upgrade_stack_with_relation(self):
"""Test build_upgrade_stack with cluster relation."""
Expand All @@ -119,7 +120,7 @@ def test_build_upgrade_stack_with_relation(self):
result = self.upgrade.build_upgrade_stack()

self.assertEqual(sorted(result), [0, 1, 2])
self.charm.model.get_relation.assert_called_once_with("cluster")
self.charm.model.get_relation.assert_called_once_with(CLUSTER_RELATION)

def test_verify_worker_versions_compatible(self):
"""Test _verify_worker_versions returns True when worker versions is compatible."""
Expand Down

0 comments on commit 7b8af28

Please sign in to comment.