Skip to content

Commit

Permalink
Merge branch 'main' into KU-2113/load-balancer-config
Browse files Browse the repository at this point in the history
  • Loading branch information
addyess authored Nov 21, 2024
2 parents 18bb14f + 6e2a0c8 commit f9703f4
Show file tree
Hide file tree
Showing 20 changed files with 829 additions and 161 deletions.
1 change: 1 addition & 0 deletions .licenserc.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ header:
- 'charms/worker/k8s/lib/charms/k8s/**'
paths-ignore:
- 'charms/worker/k8s/lib/charms/**'
- 'tests/integration/data/*.tar.gz'
- '.github/**'
- '**/.gitkeep'
- '**/*.cfg'
Expand Down
1 change: 1 addition & 0 deletions charms/worker/build-snap-installation.sh
18 changes: 18 additions & 0 deletions charms/worker/charmcraft.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ bases:
- name: ubuntu
channel: "24.04"
architectures: [arm64]

config:
options:
labels:
Expand All @@ -68,6 +69,22 @@ config:
Note: Due to NodeRestriction, workers are limited to how they can label themselves
https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#noderestriction
resources:
snap-installation:
type: file
filename: snap-installation.tar.gz
description: |
Override charm defined snap installation script
This charm is designed to operate with a specific revision of snaps, overriding
with anything will indicate that the charm is running an unsupported configuration.
Content Options:
0-byte resource (Default) -- Use the charm defined snap installation script
./snap-installation.yaml -- Overrides the charm defined snap-installation.yaml
./k8s_XXXX.snap -- Overrides the charm with a specific snap file installed dangerously
parts:
charm:
plugin: charm
Expand Down Expand Up @@ -97,6 +114,7 @@ peers:
provides:
cos-agent:
interface: cos_agent

requires:
aws:
interface: aws-integration
Expand Down
7 changes: 7 additions & 0 deletions charms/worker/k8s/build-snap-installation.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/bin/bash
# Copyright 2024 Canonical Ltd.
# See LICENSE file for licensing details.

# Create an empty tarball to be used as a placeholder for the snap installation override
echo "Creating empty tarball at $1"
touch "${1}"
15 changes: 15 additions & 0 deletions charms/worker/k8s/charmcraft.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,21 @@ config:
Port of the BGP peer for the load balancer. This is only used if
load-balancer-bgp-mode is set to true.
resources:
snap-installation:
type: file
filename: snap-installation.tar.gz
description: |
Override charm defined snap installation script
This charm is designed to operate with a specific revision of snaps, overriding
with anything will indicate that the charm is running an unsupported configuration.
Content Options:
0-byte resource (Default) -- Use the charm defined snap installation script
./snap-installation.yaml -- Overrides the charm defined snap-installation.yaml
./k8s_XXXX.snap -- Overrides the charm with a specific snap file installed dangerously
actions:
get-kubeconfig:
description: Retrieve Public Kubernetes cluster config, including credentials
Expand Down
47 changes: 12 additions & 35 deletions charms/worker/k8s/src/charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@
import charms.operator_libs_linux.v2.snap as snap_lib
import containerd
import ops
import reschedule
import yaml
from charms.contextual_status import ReconcilerError, WaitingStatus, on_error
from charms.grafana_agent.v0.cos_agent import COSAgentProvider
Expand Down Expand Up @@ -59,6 +58,7 @@
from charms.reconciler import Reconciler
from cloud_integration import CloudIntegration
from cos_integration import COSIntegration
from events import update_status
from inspector import ClusterInspector
from kube_control import configure as configure_kube_control
from literals import DEPENDENCIES
Expand Down Expand Up @@ -144,7 +144,10 @@ def __init__(self, *args):
dependency_model=K8sDependenciesModel(**DEPENDENCIES),
)
self.cos = COSIntegration(self)
self.reconciler = Reconciler(self, self._reconcile)
self.update_status = update_status.Handler(self)
self.reconciler = Reconciler(
self, self._reconcile, exit_status=self.update_status.active_status
)
self.distributor = TokenDistributor(self, self.get_node_name(), self.api_manager)
self.collector = TokenCollector(self, self.get_node_name())
self.labeller = LabelMaker(
Expand All @@ -165,7 +168,6 @@ def __init__(self, *args):
],
)

self.framework.observe(self.on.update_status, self._on_update_status)
if self.is_control_plane:
self.etcd = EtcdReactiveRequires(self)
self.kube_control = KubeControlProvides(self, endpoint="kube-control")
Expand Down Expand Up @@ -287,7 +289,7 @@ def get_cloud_name(self) -> str:
def _install_snaps(self):
"""Install snap packages."""
status.add(ops.MaintenanceStatus("Ensuring snap installation"))
snap_management()
snap_management(self)

@on_error(WaitingStatus("Waiting to apply snap requirements"), subprocess.CalledProcessError)
def _apply_snap_requirements(self):
Expand Down Expand Up @@ -637,7 +639,8 @@ def _update_kubernetes_version(self):
if not relation:
status.add(ops.BlockedStatus("Missing cluster integration"))
raise ReconcilerError("Missing cluster integration")
if version := snap_version("k8s"):
version, _ = snap_version("k8s")
if version:
relation.data[self.unit]["version"] = version

@on_error(ops.WaitingStatus("Announcing Kubernetes version"))
Expand All @@ -650,7 +653,8 @@ def _announce_kubernetes_version(self):
ReconcilerError: If the k8s snap is not installed, the version is missing,
or the version does not match the local version.
"""
if not (local_version := snap_version("k8s")):
local_version, _ = snap_version("k8s")
if not local_version:
raise ReconcilerError("k8s-snap is not installed")

peer = self.model.get_relation("cluster")
Expand Down Expand Up @@ -748,7 +752,7 @@ def _death_handler(self, event: ops.EventBase):
"""
if self.lead_control_plane:
self._revoke_cluster_tokens(event)
self._update_status()
self.update_status.run()
self._last_gasp()

relation = self.model.get_relation("cluster")
Expand Down Expand Up @@ -788,28 +792,12 @@ def _reconcile(self, event: ops.EventBase):
self._join_cluster(event)
self._config_containerd_registries()
self._configure_cos_integration()
self._update_status()
self.update_status.run()
self._apply_node_labels()
if self.is_control_plane:
self._copy_internal_kubeconfig()
self._expose_ports()

def _update_status(self):
"""Check k8s snap status."""
if version := snap_version("k8s"):
self.unit.set_workload_version(version)

if not self.get_cluster_name():
status.add(ops.WaitingStatus("Node not Clustered"))
return

trigger = reschedule.PeriodicEvent(self)
if not self._is_node_ready():
status.add(ops.WaitingStatus("Node not Ready"))
trigger.create(reschedule.Period(seconds=30))
return
trigger.cancel()

def _evaluate_removal(self, event: ops.EventBase) -> bool:
"""Determine if my unit is being removed.
Expand Down Expand Up @@ -905,17 +893,6 @@ def _apply_node_labels(self):
else:
log.info("Node %s not yet labelled", node)

def _on_update_status(self, _event: ops.UpdateStatusEvent):
"""Handle update-status event."""
if not self.reconciler.stored.reconciled:
return

try:
with status.context(self.unit):
self._update_status()
except status.ReconcilerError:
log.exception("Can't update_status")

def kubectl(self, *args) -> str:
"""Run kubectl command.
Expand Down
108 changes: 108 additions & 0 deletions charms/worker/k8s/src/events/update_status.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
#!/usr/bin/env python3

# Copyright 2024 Canonical Ltd.
# See LICENSE file for licensing details.

# Learn more at: https://juju.is/docs/sdk

"""Update status handler for the k8s charm.
This handler is responsible for updating the unit's workload version and status
"""

import logging

import charms.contextual_status as status
import ops
import reschedule
from protocols import K8sCharmProtocol
from snap import version as snap_version

# Log messages can be retrieved using juju debug-log
log = logging.getLogger(__name__)


class DynamicActiveStatus(ops.ActiveStatus):
"""An ActiveStatus class that can be updated.
Attributes:
message (str): explanation of the unit status
prefix (str): Optional prefix to the unit status
postfix (str): Optional postfix to the unit status
"""

def __init__(self):
"""Initialise the DynamicActiveStatus."""
super().__init__("Ready")
self.prefix: str = ""
self.postfix: str = ""

@property
def message(self) -> str:
"""Return the message for the status."""
pre = f"{self.prefix} :" if self.prefix else ""
post = f" ({self.postfix})" if self.postfix else ""
return f"{pre}{self._message}{post}"

@message.setter
def message(self, message: str):
"""Set the message for the status.
Args:
message (str): explanation of the unit status
"""
self._message = message


class Handler(ops.Object):
"""Handler for the update-status event in a Kubernetes operator.
This class observes the `update_status` event and handles it by checking the
Kubernetes snap status and updating the unit's workload version accordingly.
Attributes:
charm (CharmBase): The charm instance that this handler is associated with.
active_status (DynamicActiveStatus): The active status object used to manage
the unit's status during the update process.
"""

def __init__(self, charm: K8sCharmProtocol):
"""Initialize the UpdateStatusEvent.
Args:
charm: The charm instance that is instantiating this event.
"""
super().__init__(charm, "update_status")
self.charm = charm
self.active_status = DynamicActiveStatus()
self.charm.framework.observe(self.charm.on.update_status, self._on_update_status)

def _on_update_status(self, _event: ops.UpdateStatusEvent):
"""Handle update-status event."""
if not self.charm.reconciler.stored.reconciled:
return

try:
with status.context(self.charm.unit, exit_status=self.active_status):
self.run()
except status.ReconcilerError:
log.exception("Can't update_status")

def run(self):
"""Check k8s snap status."""
version, overridden = snap_version("k8s")
if version:
self.charm.unit.set_workload_version(version)

self.active_status.postfix = "Snap Override Active" if overridden else ""

if not self.charm.get_cluster_name():
status.add(ops.WaitingStatus("Node not Clustered"))
return

trigger = reschedule.PeriodicEvent(self.charm)
if not self.charm._is_node_ready():
status.add(ops.WaitingStatus("Node not Ready"))
trigger.create(reschedule.Period(seconds=30))
return
trigger.cancel()
11 changes: 11 additions & 0 deletions charms/worker/k8s/src/protocols.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import ops
from charms.interface_external_cloud_provider import ExternalCloudProvider
from charms.k8s.v0.k8sd_api_manager import K8sdAPIManager
from charms.reconciler import Reconciler
from ops.interface_kube_control import KubeControlProvides


Expand All @@ -16,11 +17,13 @@ class K8sCharmProtocol(ops.CharmBase):
api_manager (K8sdAPIManager): The API manager for the charm.
kube_control (KubeControlProvides): The kube-control interface.
xcp (ExternalCloudProvider): The external cloud provider interface.
reconciler (Reconciler): The reconciler for the charm
"""

api_manager: K8sdAPIManager
kube_control: KubeControlProvides
xcp: ExternalCloudProvider
reconciler: Reconciler

def get_cluster_name(self) -> str:
"""Get the cluster name.
Expand All @@ -37,3 +40,11 @@ def get_cloud_name(self) -> str:
NotImplementedError: If the method is not implemented.
"""
raise NotImplementedError

def _is_node_ready(self) -> bool:
"""Check if the node is ready.
Raises:
NotImplementedError: If the method is not implemented.
"""
raise NotImplementedError
Loading

0 comments on commit f9703f4

Please sign in to comment.