Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improve the juju unit status #72

Merged
merged 7 commits into from
Apr 20, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 18 additions & 21 deletions charms/worker/k8s/src/charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def _apply_cos_requirements(self):
return

log.info("Apply COS Integrations")
status.add(ops.MaintenanceStatus("Configuring COS Integration"))
status.add(ops.MaintenanceStatus("Ensuring COS Integration"))
subprocess.check_call(shlex.split("k8s kubectl apply -f templates/cos_roles.yaml"))
subprocess.check_call(shlex.split("k8s kubectl apply -f templates/ksm.yaml"))

Expand Down Expand Up @@ -184,18 +184,18 @@ def get_cloud_name(self) -> str:
@on_error(ops.BlockedStatus("Failed to install k8s snap."), SnapError)
def _install_k8s_snap(self):
"""Install the k8s snap package."""
status.add(ops.MaintenanceStatus("Installing k8s snap"))
status.add(ops.MaintenanceStatus("Ensuring snap installation"))
log.info("Ensuring k8s snap version")
snap_ensure("k8s", SnapState.Latest.value, self.config["channel"])

@on_error(WaitingStatus("Failed to apply snap requirements"), subprocess.CalledProcessError)
@on_error(WaitingStatus("Waiting to apply snap requirements"), subprocess.CalledProcessError)
def _apply_snap_requirements(self):
"""Apply necessary snap requirements for the k8s snap.

This method executes necessary scripts to ensure that the snap
meets the network and interface requirements.
"""
status.add(ops.MaintenanceStatus("Applying K8s requirements"))
status.add(ops.MaintenanceStatus("Ensuring snap requirements"))
log.info("Applying K8s requirements")
init_sh = "/snap/k8s/current/k8s/hack/init.sh"
subprocess.check_call(shlex.split(init_sh))
Expand All @@ -204,7 +204,7 @@ def _apply_snap_requirements(self):
def _check_k8sd_ready(self):
"""Check if k8sd is ready to accept requests."""
log.info("Check if k8ds is ready")
status.add(ops.MaintenanceStatus("Check k8sd ready"))
status.add(ops.MaintenanceStatus("Ensuring snap readiness"))
self.api_manager.check_k8sd_ready()

@on_error(
Expand Down Expand Up @@ -234,18 +234,13 @@ def _bootstrap_k8s_snap(self):
# TODO: Make port (and address) configurable.
self.api_manager.bootstrap_k8s_snap(payload)

@status.on_error(
ops.WaitingStatus("Configuring COS Integration"),
subprocess.CalledProcessError,
AssertionError,
)
def _configure_cos_integration(self):
"""Retrieve the join token from secret databag and join the cluster."""
if not self.model.get_relation("cos-agent"):
return

status.add(ops.MaintenanceStatus("Configuring COS integration"))
log.info("Configuring COS integration")
status.add(ops.MaintenanceStatus("Updating COS integrations"))
log.info("Updating COS integration")
if relation := self.model.get_relation("cos-tokens"):
self.collector.request(relation)

Expand Down Expand Up @@ -346,14 +341,14 @@ def _create_cos_tokens(self):
self.distributor.allocate_tokens(relation=rel, token_strategy=TokenStrategy.COS)

@on_error(
WaitingStatus("Waiting for enable functionalities"),
WaitingStatus("Waiting to enable features"),
InvalidResponseError,
K8sdConnectionError,
)
def _enable_functionalities(self):
"""Enable necessary components for the Kubernetes cluster."""
status.add(ops.MaintenanceStatus("Enabling Functionalities"))
log.info("Enabling Functionalities")
status.add(ops.MaintenanceStatus("Updating K8s features"))
log.info("Enabling K8s features")
dns_config = DNSConfig(enabled=True)
network_config = NetworkConfig(enabled=True)
user_cluster_config = UserFacingClusterConfig(dns=dns_config, network=network_config)
Expand Down Expand Up @@ -495,10 +490,12 @@ def _update_status(self):
status.add(ops.WaitingStatus("Preparing to leave cluster"))
return
if self.is_worker:
relation = self.model.get_relation("cluster")
assert relation, "Missing cluster relation with k8s" # nosec
if not self.model.get_relation("cluster"):
status.add(ops.BlockedStatus("Missing cluster integration"))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Love it!

assert False, "Missing cluster integration" # nosec
else:
assert self.api_manager.is_cluster_ready(), "control-plane not yet ready" # nosec

if version := self._get_snap_version():
self.unit.set_workload_version(version)

Expand Down Expand Up @@ -526,7 +523,7 @@ def _last_gasp(self, event):
if not isinstance(event, ops.StopEvent):
return
busy_wait = 30
status.add(ops.MaintenanceStatus("Awaiting cluster removal"))
status.add(ops.MaintenanceStatus("Ensuring cluster removal"))
while busy_wait and self.api_manager.is_cluster_bootstrapped():
log.info("Waiting for this unit to uncluster")
sleep(1)
Expand All @@ -535,7 +532,7 @@ def _last_gasp(self, event):
@status.on_error(ops.BlockedStatus("Cannot apply node-labels"), LabelMaker.NodeLabelError)
def _apply_node_labels(self):
"""Apply labels to the node."""
status.add(ops.MaintenanceStatus("Apply Node Labels"))
status.add(ops.MaintenanceStatus("Ensuring Kubernetes Node Labels"))
node = self.get_node_name()
if self.labeler.active_labels() is not None:
self.labeler.apply_node_labels()
Expand All @@ -551,7 +548,7 @@ def _on_update_status(self, _event: ops.UpdateStatusEvent):
with status.context(self.unit):
self._update_status()
except status.ReconcilerError:
log.exception("Can't to update_status")
log.exception("Can't update_status")

@property
def _internal_kubeconfig(self) -> Path:
Expand All @@ -561,7 +558,7 @@ def _internal_kubeconfig(self) -> Path:
@on_error(ops.WaitingStatus(""))
def _copy_internal_kubeconfig(self):
"""Write internal kubeconfig to /root/.kube/config."""
status.add(ops.MaintenanceStatus("Generating KubeConfig"))
status.add(ops.MaintenanceStatus("Regenerating KubeConfig"))
KUBECONFIG.parent.mkdir(parents=True, exist_ok=True)
KUBECONFIG.write_bytes(self._internal_kubeconfig.read_bytes())

Expand Down
5 changes: 4 additions & 1 deletion charms/worker/k8s/tests/unit/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,10 @@ def test_update_status(harness):
"""
harness.charm.reconciler.stored.reconciled = True # Pretended to be reconciled
harness.charm.on.update_status.emit()
assert harness.model.unit.status == ops.WaitingStatus("Cluster not yet ready")
if harness.charm.is_worker:
assert harness.model.unit.status == ops.BlockedStatus("Missing cluster integration")
else:
assert harness.model.unit.status == ops.WaitingStatus("Cluster not yet ready")


def test_set_leader(harness):
Expand Down
2 changes: 1 addition & 1 deletion tests/integration/test_etcd.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,4 +40,4 @@ async def test_etcd_datastore(kubernetes_cluster: model.Model):
status = json.loads(result.results["stdout"])
assert status["ready"], "Cluster isn't ready"
assert status["datastore"]["type"] == "external", "Not bootstrapped against etcd"
assert status["datastore"]["external-url"] == f"https://{etcd.public_address}:{etcd_port}"
assert status["datastore"]["servers"] == [f"https://{etcd.public_address}:{etcd_port}"]
addyess marked this conversation as resolved.
Show resolved Hide resolved
Loading