diff --git a/charms/worker/k8s/src/charm.py b/charms/worker/k8s/src/charm.py index c346cc04..b9c47f74 100755 --- a/charms/worker/k8s/src/charm.py +++ b/charms/worker/k8s/src/charm.py @@ -128,7 +128,7 @@ def _apply_cos_requirements(self): return log.info("Apply COS Integrations") - status.add(ops.MaintenanceStatus("Configuring COS Integration")) + status.add(ops.MaintenanceStatus("Ensuring COS Integration")) subprocess.check_call(shlex.split("k8s kubectl apply -f templates/cos_roles.yaml")) subprocess.check_call(shlex.split("k8s kubectl apply -f templates/ksm.yaml")) @@ -184,18 +184,18 @@ def get_cloud_name(self) -> str: @on_error(ops.BlockedStatus("Failed to install k8s snap."), SnapError) def _install_k8s_snap(self): """Install the k8s snap package.""" - status.add(ops.MaintenanceStatus("Installing k8s snap")) + status.add(ops.MaintenanceStatus("Ensuring snap installation")) log.info("Ensuring k8s snap version") snap_ensure("k8s", SnapState.Latest.value, self.config["channel"]) - @on_error(WaitingStatus("Failed to apply snap requirements"), subprocess.CalledProcessError) + @on_error(WaitingStatus("Waiting to apply snap requirements"), subprocess.CalledProcessError) def _apply_snap_requirements(self): """Apply necessary snap requirements for the k8s snap. This method executes necessary scripts to ensure that the snap meets the network and interface requirements. """ - status.add(ops.MaintenanceStatus("Applying K8s requirements")) + status.add(ops.MaintenanceStatus("Ensuring snap requirements")) log.info("Applying K8s requirements") init_sh = "/snap/k8s/current/k8s/hack/init.sh" subprocess.check_call(shlex.split(init_sh)) @@ -204,7 +204,7 @@ def _apply_snap_requirements(self): def _check_k8sd_ready(self): """Check if k8sd is ready to accept requests.""" log.info("Check if k8ds is ready") - status.add(ops.MaintenanceStatus("Check k8sd ready")) + status.add(ops.MaintenanceStatus("Ensuring snap readiness")) self.api_manager.check_k8sd_ready() @on_error( @@ -234,18 +234,13 @@ def _bootstrap_k8s_snap(self): # TODO: Make port (and address) configurable. self.api_manager.bootstrap_k8s_snap(payload) - @status.on_error( - ops.WaitingStatus("Configuring COS Integration"), - subprocess.CalledProcessError, - AssertionError, - ) def _configure_cos_integration(self): """Retrieve the join token from secret databag and join the cluster.""" if not self.model.get_relation("cos-agent"): return - status.add(ops.MaintenanceStatus("Configuring COS integration")) - log.info("Configuring COS integration") + status.add(ops.MaintenanceStatus("Updating COS integrations")) + log.info("Updating COS integration") if relation := self.model.get_relation("cos-tokens"): self.collector.request(relation) @@ -346,14 +341,14 @@ def _create_cos_tokens(self): self.distributor.allocate_tokens(relation=rel, token_strategy=TokenStrategy.COS) @on_error( - WaitingStatus("Waiting for enable functionalities"), + WaitingStatus("Waiting to enable Functionalities"), InvalidResponseError, K8sdConnectionError, ) def _enable_functionalities(self): """Enable necessary components for the Kubernetes cluster.""" - status.add(ops.MaintenanceStatus("Enabling Functionalities")) - log.info("Enabling Functionalities") + status.add(ops.MaintenanceStatus("Updating K8s Functionalities")) + log.info("Enabling K8s Functionalities") dns_config = DNSConfig(enabled=True) network_config = NetworkConfig(enabled=True) user_cluster_config = UserFacingClusterConfig(dns=dns_config, network=network_config) @@ -495,10 +490,12 @@ def _update_status(self): status.add(ops.WaitingStatus("Preparing to leave cluster")) return if self.is_worker: - relation = self.model.get_relation("cluster") - assert relation, "Missing cluster relation with k8s" # nosec + if not self.model.get_relation("cluster"): + status.add(ops.BlockedStatus("Missing cluster integration")) + assert False, "Missing cluster integration" # nosec else: assert self.api_manager.is_cluster_ready(), "control-plane not yet ready" # nosec + if version := self._get_snap_version(): self.unit.set_workload_version(version) @@ -526,7 +523,7 @@ def _last_gasp(self, event): if not isinstance(event, ops.StopEvent): return busy_wait = 30 - status.add(ops.MaintenanceStatus("Awaiting cluster removal")) + status.add(ops.MaintenanceStatus("Ensuring cluster removal")) while busy_wait and self.api_manager.is_cluster_bootstrapped(): log.info("Waiting for this unit to uncluster") sleep(1) @@ -535,7 +532,7 @@ def _last_gasp(self, event): @status.on_error(ops.BlockedStatus("Cannot apply node-labels"), LabelMaker.NodeLabelError) def _apply_node_labels(self): """Apply labels to the node.""" - status.add(ops.MaintenanceStatus("Apply Node Labels")) + status.add(ops.MaintenanceStatus("Ensuring Kubernetes Node Labels")) node = self.get_node_name() if self.labeler.active_labels() is not None: self.labeler.apply_node_labels() @@ -551,7 +548,7 @@ def _on_update_status(self, _event: ops.UpdateStatusEvent): with status.context(self.unit): self._update_status() except status.ReconcilerError: - log.exception("Can't to update_status") + log.exception("Can't update_status") @property def _internal_kubeconfig(self) -> Path: @@ -561,7 +558,7 @@ def _internal_kubeconfig(self) -> Path: @on_error(ops.WaitingStatus("")) def _copy_internal_kubeconfig(self): """Write internal kubeconfig to /root/.kube/config.""" - status.add(ops.MaintenanceStatus("Generating KubeConfig")) + status.add(ops.MaintenanceStatus("Regenerating KubeConfig")) KUBECONFIG.parent.mkdir(parents=True, exist_ok=True) KUBECONFIG.write_bytes(self._internal_kubeconfig.read_bytes()) diff --git a/charms/worker/k8s/tests/unit/test_base.py b/charms/worker/k8s/tests/unit/test_base.py index 1317aa09..5dcef0f4 100644 --- a/charms/worker/k8s/tests/unit/test_base.py +++ b/charms/worker/k8s/tests/unit/test_base.py @@ -86,7 +86,10 @@ def test_update_status(harness): """ harness.charm.reconciler.stored.reconciled = True # Pretended to be reconciled harness.charm.on.update_status.emit() - assert harness.model.unit.status == ops.WaitingStatus("Cluster not yet ready") + if harness.charm.is_worker: + assert harness.model.unit.status == ops.BlockedStatus("Missing cluster integration") + else: + assert harness.model.unit.status == ops.WaitingStatus("Cluster not yet ready") def test_set_leader(harness):