From b4a17c50047f42113f4b754db8e1c2d1a4caca9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Berkay=20Tekin=20=C3=96z?= Date: Tue, 3 Dec 2024 21:48:38 +0300 Subject: [PATCH 1/7] Rename annotations to cluster-annotations (#198) * Rename annotations to cluster-annotations --------- Co-authored-by: Adam Dyess --- charms/worker/k8s/charmcraft.yaml | 4 ++-- charms/worker/k8s/src/charm.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/charms/worker/k8s/charmcraft.yaml b/charms/worker/k8s/charmcraft.yaml index 5b3b2de9..12ee9051 100644 --- a/charms/worker/k8s/charmcraft.yaml +++ b/charms/worker/k8s/charmcraft.yaml @@ -68,11 +68,11 @@ bases: architectures: [arm64] config: options: - annotations: + cluster-annotations: type: string default: "" description: | - Annotations is a space separated list of (key/value) pairs) that can be + Space-separated list of (key/value) pairs) that can be used to add arbitrary metadata configuration to the Canonical Kubernetes cluster. For more information, see the upstream Canonical Kubernetes documentation about annotations: diff --git a/charms/worker/k8s/src/charm.py b/charms/worker/k8s/src/charm.py index 03a78b7b..3e5310be 100755 --- a/charms/worker/k8s/src/charm.py +++ b/charms/worker/k8s/src/charm.py @@ -391,7 +391,7 @@ def _configure_cos_integration(self): self.collector.request(relation) def _get_valid_annotations(self) -> Optional[dict]: - """Fetch and validate annotations from charm configuration. + """Fetch and validate cluster-annotations from charm configuration. The values are expected to be a space-separated string of key-value pairs. @@ -401,7 +401,7 @@ def _get_valid_annotations(self) -> Optional[dict]: Raises: ReconcilerError: If any annotation is invalid. """ - raw_annotations = self.config.get("annotations") + raw_annotations = self.config.get("cluster-annotations") if not raw_annotations: return None From 5d6af4b6f05b4d1ee3926df39433f1a898d1c1b0 Mon Sep 17 00:00:00 2001 From: Mateo Florido Date: Wed, 4 Dec 2024 09:08:12 -0500 Subject: [PATCH 2/7] Implement `upgrade-relation` for control plane nodes (#200) This commit implements the upgrades for control plane nodes --- charms/worker/k8s/requirements.txt | 1 + charms/worker/k8s/src/charm.py | 48 +++++- charms/worker/k8s/src/events/update_status.py | 8 +- charms/worker/k8s/src/literals.py | 23 ++- charms/worker/k8s/src/protocols.py | 28 +++ charms/worker/k8s/src/snap.py | 49 +++++- charms/worker/k8s/src/upgrade.py | 159 +++++++++++++++++- .../k8s/templates/snap_installation.yaml | 2 +- charms/worker/k8s/tests/unit/test_upgrade.py | 31 +++- 9 files changed, 324 insertions(+), 25 deletions(-) diff --git a/charms/worker/k8s/requirements.txt b/charms/worker/k8s/requirements.txt index 3c639667..140861f3 100644 --- a/charms/worker/k8s/requirements.txt +++ b/charms/worker/k8s/requirements.txt @@ -16,3 +16,4 @@ typing_extensions==4.12.2 websocket-client==1.8.0 poetry-core==1.9.1 lightkube==0.15.5 +httpx==0.27.2 diff --git a/charms/worker/k8s/src/charm.py b/charms/worker/k8s/src/charm.py index 3e5310be..a3d5206a 100755 --- a/charms/worker/k8s/src/charm.py +++ b/charms/worker/k8s/src/charm.py @@ -21,10 +21,11 @@ import shlex import socket import subprocess +from collections import defaultdict from functools import cached_property from pathlib import Path from time import sleep -from typing import Dict, Optional, Union +from typing import Dict, List, Optional, Union from urllib.parse import urlparse import charms.contextual_status as status @@ -123,6 +124,8 @@ class K8sCharm(ops.CharmBase): is_worker: true if this is a worker unit is_control_plane: true if this is a control-plane unit lead_control_plane: true if this is a control-plane unit and its the leader + is_upgrade_granted: true if the upgrade has been granted + datastore: the datastore used for Kubernetes """ _stored = ops.StoredState() @@ -139,16 +142,16 @@ def __init__(self, *args): xcp_relation = "external-cloud-provider" if self.is_control_plane else "" self.cloud_integration = CloudIntegration(self, self.is_control_plane) self.xcp = ExternalCloudProvider(self, xcp_relation) - self.cluster_inspector = ClusterInspector(kubeconfig_path=KUBECONFIG) + self.cluster_inspector = ClusterInspector(kubeconfig_path=self._internal_kubeconfig) self.upgrade = K8sUpgrade( self, - node_manager=self.cluster_inspector, + cluster_inspector=self.cluster_inspector, relation_name="upgrade", substrate="vm", dependency_model=K8sDependenciesModel(**DEPENDENCIES), ) self.cos = COSIntegration(self) - self.update_status = update_status.Handler(self) + self.update_status = update_status.Handler(self, self.upgrade) self.reconciler = Reconciler( self, self._reconcile, exit_status=self.update_status.active_status ) @@ -161,7 +164,8 @@ def __init__(self, *args): user_label_key="node-labels", timeout=15, ) - self._stored.set_default(is_dying=False, cluster_name=str()) + self._upgrade_snap = False + self._stored.set_default(is_dying=False, cluster_name=str(), upgrade_granted=False) self.cos_agent = COSAgentProvider( self, @@ -227,6 +231,35 @@ def is_worker(self) -> bool: """Returns true if the unit is a worker.""" return self.meta.name == "k8s-worker" + @property + def datastore(self) -> str: + """Return the datastore type.""" + return str(self.config.get("bootstrap-datastore")) + + def get_worker_versions(self) -> Dict[str, List[ops.Unit]]: + """Get the versions of the worker units. + + Returns: + Dict[str, List[ops.Unit]]: A dictionary of versions and the units that have them. + """ + if not (relation := self.model.get_relation("k8s-cluster")): + return {} + + versions = defaultdict(list) + for unit in relation.units: + if version := relation.data[unit].get("version"): + versions[version].append(unit) + return versions + + def grant_upgrade(self): + """Grant the upgrade to the charm.""" + self._upgrade_snap = True + + @property + def is_upgrade_granted(self) -> bool: + """Check if the upgrade has been granted.""" + return self._upgrade_snap + def _apply_proxy_environment(self): """Apply the proxy settings from environment variables.""" proxy_settings = self._get_proxy_env() @@ -694,8 +727,9 @@ def _announce_kubernetes_version(self): if not unit_version: raise ReconcilerError(f"Waiting for version from {unit.name}") if unit_version != local_version: - status.add(ops.BlockedStatus(f"Version mismatch with {unit.name}")) - raise ReconcilerError(f"Version mismatch with {unit.name}") + # NOTE: Add a check to validate if we are doing an upgrade + status.add(ops.WaitingStatus("Upgrading the cluster")) + return relation.data[self.app]["version"] = local_version def _get_proxy_env(self) -> Dict[str, str]: diff --git a/charms/worker/k8s/src/events/update_status.py b/charms/worker/k8s/src/events/update_status.py index 9f67c1c1..6a480846 100644 --- a/charms/worker/k8s/src/events/update_status.py +++ b/charms/worker/k8s/src/events/update_status.py @@ -15,6 +15,7 @@ import reschedule from protocols import K8sCharmProtocol from snap import version as snap_version +from upgrade import K8sUpgrade # Log messages can be retrieved using juju debug-log log = logging.getLogger(__name__) @@ -64,24 +65,27 @@ class Handler(ops.Object): the unit's status during the update process. """ - def __init__(self, charm: K8sCharmProtocol): + def __init__(self, charm: K8sCharmProtocol, upgrade: K8sUpgrade): """Initialize the UpdateStatusEvent. Args: charm: The charm instance that is instantiating this event. + upgrade: The upgrade instance that handles the upgrade process. """ super().__init__(charm, "update_status") self.charm = charm + self.upgrade = upgrade self.active_status = DynamicActiveStatus() self.charm.framework.observe(self.charm.on.update_status, self._on_update_status) - def _on_update_status(self, _event: ops.UpdateStatusEvent): + def _on_update_status(self, event: ops.UpdateStatusEvent): """Handle update-status event.""" if not self.charm.reconciler.stored.reconciled: return try: with status.context(self.charm.unit, exit_status=self.active_status): + self.upgrade.set_upgrade_status(event) self.run() except status.ReconcilerError: log.exception("Can't update_status") diff --git a/charms/worker/k8s/src/literals.py b/charms/worker/k8s/src/literals.py index 950b9edd..2b102d4d 100644 --- a/charms/worker/k8s/src/literals.py +++ b/charms/worker/k8s/src/literals.py @@ -3,6 +3,27 @@ """Literals for the charm.""" +SNAP_NAME = "k8s" + +K8S_COMMON_SERVICES = [ + "kubelet", + "kube-proxy", + "k8sd", +] + +K8S_DQLITE_SERVICE = "k8s-dqlite" + +K8S_CONTROL_PLANE_SERVICES = [ + "kube-apiserver", + K8S_DQLITE_SERVICE, + "kube-controller-manager", + "kube-scheduler", +] + +K8S_WORKER_SERVICES = [ + "k8s-apiserver-proxy", +] + DEPENDENCIES = { # NOTE: Update the dependencies for the k8s-charm before releasing. "k8s_charm": { @@ -16,6 +37,6 @@ "dependencies": {"k8s-worker": "^1.30, < 1.32"}, "name": "k8s", "upgrade_supported": "^1.30, < 1.32", - "version": "1.31.2", + "version": "1.31.3", }, } diff --git a/charms/worker/k8s/src/protocols.py b/charms/worker/k8s/src/protocols.py index 0ca5fc64..dbf97318 100644 --- a/charms/worker/k8s/src/protocols.py +++ b/charms/worker/k8s/src/protocols.py @@ -3,6 +3,8 @@ """Protocol definitions module.""" +from typing import Dict, List + import ops from charms.interface_external_cloud_provider import ExternalCloudProvider from charms.k8s.v0.k8sd_api_manager import K8sdAPIManager @@ -18,12 +20,22 @@ class K8sCharmProtocol(ops.CharmBase): kube_control (KubeControlProvides): The kube-control interface. xcp (ExternalCloudProvider): The external cloud provider interface. reconciler (Reconciler): The reconciler for the charm + is_upgrade_granted (bool): Whether the upgrade is granted. + lead_control_plane (bool): Whether the charm is the lead control plane. + is_control_plane (bool): Whether the charm is a control plane. + is_worker (bool): Whether the charm is a worker. + datastore (str): The datastore for Kubernetes. """ api_manager: K8sdAPIManager kube_control: KubeControlProvides xcp: ExternalCloudProvider reconciler: Reconciler + is_upgrade_granted: bool + lead_control_plane: bool + is_control_plane: bool + is_worker: bool + datastore: str def get_cluster_name(self) -> str: """Get the cluster name. @@ -33,6 +45,14 @@ def get_cluster_name(self) -> str: """ raise NotImplementedError + def grant_upgrade(self) -> None: + """Grant the upgrade. + + Raises: + NotImplementedError: If the method is not implemented. + """ + raise NotImplementedError + def get_cloud_name(self) -> str: """Get the cloud name. @@ -48,3 +68,11 @@ def _is_node_ready(self) -> bool: NotImplementedError: If the method is not implemented. """ raise NotImplementedError + + def get_worker_versions(self) -> Dict[str, List[ops.Unit]]: + """Get the worker versions. + + Raises: + NotImplementedError: If the method is not implemented. + """ + raise NotImplementedError diff --git a/charms/worker/k8s/src/snap.py b/charms/worker/k8s/src/snap.py index e78291b4..0f19a7c5 100644 --- a/charms/worker/k8s/src/snap.py +++ b/charms/worker/k8s/src/snap.py @@ -19,6 +19,7 @@ import charms.operator_libs_linux.v2.snap as snap_lib import ops import yaml +from protocols import K8sCharmProtocol from pydantic import BaseModel, Field, ValidationError, parse_obj_as, validator from typing_extensions import Annotated @@ -263,7 +264,7 @@ def _parse_management_arguments(charm: ops.CharmBase) -> List[SnapArgument]: return args -def management(charm: ops.CharmBase) -> None: +def management(charm: K8sCharmProtocol) -> None: """Manage snap installations on this machine. Arguments: @@ -272,7 +273,7 @@ def management(charm: ops.CharmBase) -> None: cache = snap_lib.SnapCache() for args in _parse_management_arguments(charm): which = cache[args.name] - if block_refresh(which, args): + if block_refresh(which, args, charm.is_upgrade_granted): continue install_args = args.dict(exclude_none=True) if isinstance(args, SnapFileArgument) and which.revision != "x1": @@ -287,12 +288,13 @@ def management(charm: ops.CharmBase) -> None: which.ensure(**install_args) -def block_refresh(which: snap_lib.Snap, args: SnapArgument) -> bool: +def block_refresh(which: snap_lib.Snap, args: SnapArgument, upgrade_granted: bool = False) -> bool: """Block snap refreshes if the snap is in a specific state. Arguments: which: The snap to check args: The snap arguments + upgrade_granted: If the upgrade is granted Returns: bool: True if the snap should be blocked from refreshing @@ -303,6 +305,9 @@ def block_refresh(which: snap_lib.Snap, args: SnapArgument) -> bool: if _overridden_snap_installation().exists(): log.info("Allowing %s snap refresh due to snap installation override", args.name) return False + if upgrade_granted: + log.info("Allowing %s snap refresh due to upgrade-granted", args.name) + return False if isinstance(args, SnapStoreArgument) and args.revision: if block := which.revision != args.revision: log.info("Blocking %s snap refresh to revision=%s", args.name, args.revision) @@ -342,3 +347,41 @@ def version(snap: str) -> Tuple[Optional[str], bool]: log.info("Snap k8s not found or no version available.") return None, overridden + + +def stop(snap_name: str, services: List[str]) -> None: + """Stop the services of the snap on this machine. + + Arguments: + snap_name: The name of the snap + services: The services to stop + + Raises: + SnapError: If the snap isn't installed + """ + cache = snap_lib.SnapCache() + if snap_name not in cache: + message = f"Snap '{snap_name}' not installed" + log.error(message) + raise snap_lib.SnapError(message) + snap = cache[snap_name] + snap.stop(services=services) + + +def start(snap_name: str, services: List[str]) -> None: + """Start the services of the snap on this machine. + + Arguments: + snap_name: The name of the snap + services: The services to start + + Raises: + SnapError: If the snap isn't installed + """ + cache = snap_lib.SnapCache() + if snap_name not in cache: + message = f"Snap '{snap_name}' not installed" + log.error(message) + raise snap_lib.SnapError(message) + snap = cache[snap_name] + snap.start(services=services) diff --git a/charms/worker/k8s/src/upgrade.py b/charms/worker/k8s/src/upgrade.py index 4c8efef1..9038e262 100644 --- a/charms/worker/k8s/src/upgrade.py +++ b/charms/worker/k8s/src/upgrade.py @@ -4,11 +4,32 @@ """A module for upgrading the k8s and k8s-worker charms.""" import logging -from typing import List - -from charms.data_platform_libs.v0.upgrade import ClusterNotReadyError, DataUpgrade, DependencyModel +from typing import List, Union + +import charms.contextual_status as status +import ops +import reschedule +from charms.data_platform_libs.v0.upgrade import ( + ClusterNotReadyError, + DataUpgrade, + DependencyModel, + UpgradeGrantedEvent, + verify_requirements, +) +from charms.operator_libs_linux.v2.snap import SnapError from inspector import ClusterInspector +from literals import ( + K8S_COMMON_SERVICES, + K8S_CONTROL_PLANE_SERVICES, + K8S_DQLITE_SERVICE, + K8S_WORKER_SERVICES, + SNAP_NAME, +) +from protocols import K8sCharmProtocol from pydantic import BaseModel +from snap import management as snap_management +from snap import start, stop +from snap import version as snap_version log = logging.getLogger(__name__) @@ -28,17 +49,36 @@ class K8sDependenciesModel(BaseModel): class K8sUpgrade(DataUpgrade): """A helper class for upgrading the k8s and k8s-worker charms.""" - def __init__(self, charm, node_manager: ClusterInspector, **kwargs): + def __init__(self, charm: K8sCharmProtocol, cluster_inspector: ClusterInspector, **kwargs): """Initialize the K8sUpgrade. Args: charm: The charm instance. - node_manager: The ClusterInspector instance. + cluster_inspector: The ClusterInspector instance. kwargs: Additional keyword arguments. """ super().__init__(charm, **kwargs) self.charm = charm - self.node_manager = node_manager + self.cluster_inspector = cluster_inspector + + def set_upgrade_status(self, event: ops.UpdateStatusEvent) -> None: + """Set the Juju upgrade status. + + Args: + event: The UpdateStatusEvent instance. + """ + upgrade_status = self.state + if not upgrade_status: + return + if upgrade_status == "upgrading": + if not self.charm.is_upgrade_granted: + self._upgrade(event) + elif upgrade_status == "recovery": + status.add(ops.MaintenanceStatus("Charm is in recovery mode. Please check the logs.")) + return + elif upgrade_status == "failed": + status.add(ops.BlockedStatus("Upgrade Failed. Please check the logs.")) + return def pre_upgrade_check(self) -> None: """Check if the cluster is ready for an upgrade. @@ -50,9 +90,10 @@ def pre_upgrade_check(self) -> None: ClusterNotReadyError: If the cluster is not ready for an upgrade. """ try: - nodes = self.node_manager.get_nodes( + nodes = self.cluster_inspector.get_nodes( labels={"juju-charm": "k8s-worker" if self.charm.is_worker else "k8s"} ) + failing_pods = self.cluster_inspector.verify_pods_running(["kube-system"]) except ClusterInspector.ClusterInspectorError as e: raise ClusterNotReadyError( message="Cluster is not ready for an upgrade", @@ -71,13 +112,115 @@ def pre_upgrade_check(self) -> None: Please check the node(s) for more information.""", ) - if failing_pods := self.node_manager.verify_pods_running(["kube-system"]): + if failing_pods: raise ClusterNotReadyError( message="Cluster is not ready", cause=f"Pods not running in namespace(s): {failing_pods}", resolution="Check the logs for the failing pods.", ) + def _verify_worker_versions(self) -> bool: + """Verify that the k8s-worker charm versions meet the requirements. + + This method verifies that all applications related to the cluster relation + satisfy the requirements of the k8s-worker charm. + + Returns: + bool: True if all worker versions meet the requirements, False otherwise. + """ + worker_versions = self.charm.get_worker_versions() + if not worker_versions: + return True + dependency_model: DependencyModel = getattr(self.dependency_model, "k8s_service") + + incompatible = { + version: units + for version, units in worker_versions.items() + if not verify_requirements( + version=version, requirement=dependency_model.dependencies["k8s-worker"] + ) + } + + if incompatible: + units_str = "\n".join( + f"[{v}]: {', '.join(u.name for u in units)}" for v, units in incompatible.items() + ) + log.error( + "k8s worker charm version requirements not met. Incompatible units: %s", units_str + ) + return False + + return True + + def _perform_upgrade(self, services: List[str]) -> None: + """Perform the upgrade. + + Args: + services: The services to stop and start during the upgrade. + """ + status.add(ops.MaintenanceStatus("Stopping the K8s services")) + stop(SNAP_NAME, services) + status.add(ops.MaintenanceStatus("Upgrading the k8s snap.")) + snap_management(self.charm) + status.add(ops.MaintenanceStatus("Starting the K8s services")) + start(SNAP_NAME, services) + + def _on_upgrade_granted(self, event: UpgradeGrantedEvent) -> None: + """Handle the upgrade granted event. + + Args: + event: The UpgradeGrantedEvent instance. + """ + with status.context(self.charm.unit, exit_status=ops.ActiveStatus("Ready")): + self._upgrade(event) + + def _upgrade(self, event: Union[ops.EventBase, ops.HookEvent]) -> None: + """Upgrade the snap workload.""" + trigger = reschedule.PeriodicEvent(self.charm) + current_version, _ = snap_version("k8s") + + status.add(ops.MaintenanceStatus("Verifying the cluster is ready for an upgrade.")) + if not current_version: + log.error("Failed to get the version of the k8s snap.") + self.set_unit_failed(cause="Failed to get the version of the k8s snap.") + status.add(ops.BlockedStatus("Failed to get the version of the k8s snap.")) + return + + status.add(ops.MaintenanceStatus("Upgrading the charm.")) + + if self.charm.lead_control_plane: + if not self._verify_worker_versions(): + self.set_unit_failed( + cause="The k8s worker charm version does not meet the requirements." + ) + trigger.cancel() + return + + self.charm.grant_upgrade() + + services = ( + K8S_CONTROL_PLANE_SERVICES + K8S_COMMON_SERVICES + if self.charm.is_control_plane + else K8S_COMMON_SERVICES + K8S_WORKER_SERVICES + ) + + if K8S_DQLITE_SERVICE in services and self.charm.datastore == "dqlite": + services.remove(K8S_DQLITE_SERVICE) + + try: + self._perform_upgrade(services=services) + self.set_unit_completed() + + if self.charm.unit.is_leader(): + self.on_upgrade_changed(event) + + trigger.cancel() + except SnapError: + status.add(ops.WaitingStatus("Waiting for the snap to be installed.")) + log.exception("Failed to upgrade the snap. Will retry...") + trigger.create(reschedule.Period(seconds=30)) + return + def build_upgrade_stack(self) -> List[int]: """Return a list of unit numbers to upgrade in order. diff --git a/charms/worker/k8s/templates/snap_installation.yaml b/charms/worker/k8s/templates/snap_installation.yaml index f8528062..828d2da3 100644 --- a/charms/worker/k8s/templates/snap_installation.yaml +++ b/charms/worker/k8s/templates/snap_installation.yaml @@ -10,4 +10,4 @@ arm64: - name: k8s install-type: store channel: edge - classic: true \ No newline at end of file + classic: true diff --git a/charms/worker/k8s/tests/unit/test_upgrade.py b/charms/worker/k8s/tests/unit/test_upgrade.py index 60e66bb4..e55f214c 100644 --- a/charms/worker/k8s/tests/unit/test_upgrade.py +++ b/charms/worker/k8s/tests/unit/test_upgrade.py @@ -6,6 +6,7 @@ import unittest from unittest.mock import MagicMock +import ops from charms.data_platform_libs.v0.upgrade import ClusterNotReadyError from inspector import ClusterInspector from lightkube.models.core_v1 import Node @@ -22,7 +23,7 @@ def setUp(self): self.node_manager = MagicMock(spec=ClusterInspector) self.upgrade = K8sUpgrade( self.charm, - node_manager=self.node_manager, + cluster_inspector=self.node_manager, relation_name="upgrade", substrate="vm", dependency_model=K8sDependenciesModel( @@ -34,9 +35,9 @@ def setUp(self): "version": "100", }, "k8s_service": { - "dependencies": {"k8s-worker": "^3"}, + "dependencies": {"k8s-worker": "^1.30, < 1.32"}, "name": "k8s", - "upgrade_supported": ">=0.8", + "upgrade_supported": "^1.30, < 1.32", "version": "1.31.1", }, } @@ -119,3 +120,27 @@ def test_build_upgrade_stack_with_relation(self): self.assertEqual(sorted(result), [0, 1, 2]) self.charm.model.get_relation.assert_called_once_with("cluster") + + def test_verify_worker_versions_compatible(self): + """Test _verify_worker_versions returns True when worker versions is compatible.""" + unit_1 = MagicMock(spec=ops.Unit) + unit_1.name = "k8s-worker/0" + unit_2 = MagicMock(spec=ops.Unit) + unit_2.name = "k8s-worker/1" + self.charm.get_worker_versions.return_value = {"1.31.0": [unit_1], "1.31.5": [unit_2]} + + result = self.upgrade._verify_worker_versions() + + self.assertTrue(result) + + def test_verify_worker_versions_incompatible(self): + """Test _verify_worker_versions returns False when worker versions is incompatible.""" + unit_1 = MagicMock(spec=ops.Unit) + unit_1.name = "k8s-worker/0" + unit_2 = MagicMock(spec=ops.Unit) + unit_2.name = "k8s-worker/1" + self.charm.get_worker_versions.return_value = {"1.32.0": [unit_1], "1.33.0": [unit_2]} + + result = self.upgrade._verify_worker_versions() + + self.assertFalse(result) From 633abdc6dc77838d65f4d637cc72ff5352aca9c0 Mon Sep 17 00:00:00 2001 From: Mateo Florido Date: Wed, 4 Dec 2024 15:10:50 -0500 Subject: [PATCH 3/7] Pin Runner Size/Arch (#203) Pin Test Runner Label --------- Co-authored-by: Adam Dyess --- .github/workflows/integration_test.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/integration_test.yaml b/.github/workflows/integration_test.yaml index bec1cbac..695a0895 100644 --- a/.github/workflows/integration_test.yaml +++ b/.github/workflows/integration_test.yaml @@ -39,8 +39,8 @@ jobs: strategy: matrix: arch: - - {id: amd64, builder-label: ubuntu-22.04, tester-arch: x64} - - {id: arm64, builder-label: ARM64, tester-arch: ARM64} + - {id: amd64, builder-label: self-hosted-linux-amd64-jammy-large, tester-arch: AMD64, tester-size: large} + - {id: arm64, builder-label: self-hosted-linux-arm64-jammy-large, tester-arch: ARM64, tester-size: large} suite: [k8s, etcd, ceph] exclude: - {arch: {id: arm64}, suite: ceph} @@ -56,6 +56,7 @@ jobs: provider: lxd self-hosted-runner: true self-hosted-runner-arch: ${{ matrix.arch.tester-arch }} + self-hosted-runner-label: ${{ matrix.arch.tester-size }} test-timeout: 120 test-tox-env: integration-${{ matrix.suite }} trivy-fs-enabled: false From 3175f43368e41ad6cd6f64f8232d488a73b602f6 Mon Sep 17 00:00:00 2001 From: Adam Dyess Date: Wed, 4 Dec 2024 20:48:30 -0600 Subject: [PATCH 4/7] Adjust workflow pinning again (#210) --- .github/workflows/integration_test.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/integration_test.yaml b/.github/workflows/integration_test.yaml index 695a0895..37f6f7d6 100644 --- a/.github/workflows/integration_test.yaml +++ b/.github/workflows/integration_test.yaml @@ -39,8 +39,8 @@ jobs: strategy: matrix: arch: - - {id: amd64, builder-label: self-hosted-linux-amd64-jammy-large, tester-arch: AMD64, tester-size: large} - - {id: arm64, builder-label: self-hosted-linux-arm64-jammy-large, tester-arch: ARM64, tester-size: large} + - {id: amd64, builder-label: ubuntu-22.04, tester-arch: AMD64} # built on azure + - {id: arm64, builder-label: ARM64, tester-arch: ARM64} # built on self-hosted suite: [k8s, etcd, ceph] exclude: - {arch: {id: arm64}, suite: ceph} @@ -56,7 +56,7 @@ jobs: provider: lxd self-hosted-runner: true self-hosted-runner-arch: ${{ matrix.arch.tester-arch }} - self-hosted-runner-label: ${{ matrix.arch.tester-size }} + self-hosted-runner-label: large test-timeout: 120 test-tox-env: integration-${{ matrix.suite }} trivy-fs-enabled: false From 903e1a710c935a2e598d68c33a966258ac7382a3 Mon Sep 17 00:00:00 2001 From: Lucian Petrut Date: Thu, 5 Dec 2024 07:22:30 +0200 Subject: [PATCH 5/7] Add kube-apiserver-extra-sans option (#201) We're adding a config option that allows specifying additional kube-apiserver Subject Alternative Names. Co-authored-by: Adam Dyess --- charms/worker/k8s/charmcraft.yaml | 11 ++++++++++ charms/worker/k8s/src/charm.py | 11 ++++++++-- charms/worker/k8s/tests/unit/test_base.py | 25 +++++++++++++++++++++++ 3 files changed, 45 insertions(+), 2 deletions(-) diff --git a/charms/worker/k8s/charmcraft.yaml b/charms/worker/k8s/charmcraft.yaml index 12ee9051..cf24b0bb 100644 --- a/charms/worker/k8s/charmcraft.yaml +++ b/charms/worker/k8s/charmcraft.yaml @@ -224,6 +224,17 @@ config: runtime-config=batch/v2alpha1=true profiling=true will result in kube-apiserver being run with the following options: --runtime-config=batch/v2alpha1=true --profiling=true + kube-apiserver-extra-sans: + type: string + default: "" + description: | + Space separated list of extra Subject Alternative Names for the kube-apiserver + self-signed certificates. + + Examples: + - "kubernetes" + - "kubernetes.default.svc" + - "kubernetes.default.svc.cluster.local" kube-controller-manager-extra-args: type: string default: "" diff --git a/charms/worker/k8s/src/charm.py b/charms/worker/k8s/src/charm.py index a3d5206a..1a2af243 100755 --- a/charms/worker/k8s/src/charm.py +++ b/charms/worker/k8s/src/charm.py @@ -350,6 +350,13 @@ def _check_k8sd_ready(self): status.add(ops.MaintenanceStatus("Ensuring snap readiness")) self.api_manager.check_k8sd_ready() + def _get_extra_sans(self): + """Retrieve the certificate extra SANs.""" + extra_sans_str = str(self.config.get("kube-apiserver-extra-sans") or "") + configured_sans = {san for san in extra_sans_str.strip().split() if san} + all_sans = configured_sans | set([_get_public_address()]) + return sorted(all_sans) + def _assemble_bootstrap_config(self): """Assemble the bootstrap configuration for the Kubernetes cluster. @@ -362,7 +369,7 @@ def _assemble_bootstrap_config(self): bootstrap_config.service_cidr = str(self.config["bootstrap-service-cidr"]) bootstrap_config.pod_cidr = str(self.config["bootstrap-pod-cidr"]) bootstrap_config.control_plane_taints = str(self.config["bootstrap-node-taints"]).split() - bootstrap_config.extra_sans = [_get_public_address()] + bootstrap_config.extra_sans = self._get_extra_sans() cluster_name = self.get_cluster_name() config.extra_args.craft(self.config, bootstrap_config, cluster_name) return bootstrap_config @@ -795,7 +802,7 @@ def _join_with_token(self, relation: ops.Relation, token: str, cluster_name: str request = JoinClusterRequest(name=node_name, address=cluster_addr, token=token) if self.is_control_plane: request.config = ControlPlaneNodeJoinConfig() - request.config.extra_sans = [_get_public_address()] + request.config.extra_sans = self._get_extra_sans() config.extra_args.craft(self.config, request.config, cluster_name) else: request.config = NodeJoinConfig() diff --git a/charms/worker/k8s/tests/unit/test_base.py b/charms/worker/k8s/tests/unit/test_base.py index 6a3f419a..a0ee2daa 100644 --- a/charms/worker/k8s/tests/unit/test_base.py +++ b/charms/worker/k8s/tests/unit/test_base.py @@ -195,3 +195,28 @@ def test_configure_datastore_runtime_config_etcd(harness): assert uccr_config.datastore.client_key == "" assert uccr_config.datastore.servers == ["foo:1234", "bar:1234"] assert uccr_config.datastore.type == "external" + + +def test_configure_boostrap_extra_sans(harness): + """Test configuring kube-apiserver-extra-sans on bootstrap. + + Args: + harness: the harness under test + """ + if harness.charm.is_worker: + pytest.skip("Not applicable on workers") + + cfg_extra_sans = ["mykubernetes", "mykubernetes.local"] + public_addr = "11.12.13.14" + harness.update_config({"kube-apiserver-extra-sans": " ".join(cfg_extra_sans)}) + + with mock.patch("charm._get_public_address") as mock_get_public_addr: + mock_get_public_addr.return_value = public_addr + + bs_config = harness.charm._assemble_bootstrap_config() + + # We expect the resulting SANs to include the configured addresses as well + # as the unit address. + exp_extra_sans = cfg_extra_sans + [public_addr] + for san in exp_extra_sans: + assert san in bs_config.extra_sans From 0b7a3771b77384aefa29ea0e3c27c514e09b867a Mon Sep 17 00:00:00 2001 From: Mateo Florido Date: Thu, 5 Dec 2024 14:05:19 -0500 Subject: [PATCH 6/7] Replace Magic Strings with Literals (#208) --- charms/worker/k8s/src/charm.py | 67 ++++++++++--------- charms/worker/k8s/src/literals.py | 27 ++++++++ charms/worker/k8s/src/upgrade.py | 10 ++- .../k8s/tests/unit/test_token_distributor.py | 11 +-- charms/worker/k8s/tests/unit/test_upgrade.py | 5 +- 5 files changed, 77 insertions(+), 43 deletions(-) diff --git a/charms/worker/k8s/src/charm.py b/charms/worker/k8s/src/charm.py index 1a2af243..fc438a8b 100755 --- a/charms/worker/k8s/src/charm.py +++ b/charms/worker/k8s/src/charm.py @@ -66,7 +66,22 @@ from events import update_status from inspector import ClusterInspector from kube_control import configure as configure_kube_control -from literals import DEPENDENCIES +from literals import ( + CLUSTER_RELATION, + CLUSTER_WORKER_RELATION, + CONTAINERD_RELATION, + COS_RELATION, + COS_TOKENS_RELATION, + COS_TOKENS_WORKER_RELATION, + DEPENDENCIES, + ETC_KUBERNETES, + ETCD_RELATION, + K8SD_PORT, + K8SD_SNAP_SOCKET, + KUBECONFIG, + KUBECTL_PATH, + SUPPORTED_DATASTORES, +) from ops.interface_kube_control import KubeControlProvides from snap import management as snap_management from snap import version as snap_version @@ -77,14 +92,6 @@ # Log messages can be retrieved using juju debug-log log = logging.getLogger(__name__) -VALID_LOG_LEVELS = ["info", "debug", "warning", "error", "critical"] -K8SD_SNAP_SOCKET = "/var/snap/k8s/common/var/lib/k8sd/state/control.socket" -KUBECONFIG = Path.home() / ".kube/config" -ETC_KUBERNETES = Path("/etc/kubernetes") -KUBECTL_PATH = Path("/snap/k8s/current/bin/kubectl") -K8SD_PORT = 6400 -SUPPORTED_DATASTORES = ["dqlite", "etcd"] - def _get_public_address() -> str: """Get public address from juju. @@ -208,7 +215,7 @@ def _apply_cos_requirements(self): Integration by applying the manifests for COS Cluster Roles and kube-state-metrics (K-S-M). """ - if not self.model.get_relation("cos-agent"): + if not self.model.get_relation(COS_RELATION): return log.info("Apply COS Integrations") @@ -242,7 +249,7 @@ def get_worker_versions(self) -> Dict[str, List[ops.Unit]]: Returns: Dict[str, List[ops.Unit]]: A dictionary of versions and the units that have them. """ - if not (relation := self.model.get_relation("k8s-cluster")): + if not (relation := self.model.get_relation(CLUSTER_WORKER_RELATION)): return {} versions = defaultdict(list) @@ -293,7 +300,7 @@ def get_cluster_name(self) -> str: if self._stored.cluster_name == "": if self.lead_control_plane and self.api_manager.is_cluster_bootstrapped(): self._stored.cluster_name = self._generate_unique_cluster_name() - elif not (relation := self.model.get_relation("cluster")): + elif not (relation := self.model.get_relation(CLUSTER_RELATION)): pass elif any( [ @@ -409,7 +416,7 @@ def _bootstrap_k8s_snap(self): def _config_containerd_registries(self): """Apply containerd custom registries.""" registries, config = [], "" - containerd_relation = self.model.get_relation("containerd") + containerd_relation = self.model.get_relation(CONTAINERD_RELATION) if self.is_control_plane: config = str(self.config["containerd-custom-registries"]) registries = containerd.parse_registries(config) @@ -422,12 +429,12 @@ def _config_containerd_registries(self): def _configure_cos_integration(self): """Retrieve the join token from secret databag and join the cluster.""" - if not self.model.get_relation("cos-agent"): + if not self.model.get_relation(COS_RELATION): return status.add(ops.MaintenanceStatus("Updating COS integrations")) log.info("Updating COS integration") - if relation := self.model.get_relation("cos-tokens"): + if relation := self.model.get_relation(COS_TOKENS_RELATION): self.collector.request(relation) def _get_valid_annotations(self) -> Optional[dict]: @@ -547,7 +554,7 @@ def _configure_datastore(self, config: Union[BootstrapConfig, UpdateClusterConfi if datastore == "etcd": log.info("Using etcd as external datastore") - etcd_relation = self.model.get_relation("etcd") + etcd_relation = self.model.get_relation(ETCD_RELATION) if not etcd_relation: raise ReconcilerError("Missing etcd relation") @@ -592,7 +599,7 @@ def _revoke_cluster_tokens(self, event: ops.EventBase): elif unit := _cluster_departing_unit(event): to_remove = unit - if peer := self.model.get_relation("cluster"): + if peer := self.model.get_relation(CLUSTER_RELATION): self.distributor.revoke_tokens( relation=peer, token_strategy=TokenStrategy.CLUSTER, @@ -600,7 +607,7 @@ def _revoke_cluster_tokens(self, event: ops.EventBase): to_remove=to_remove, ) - if workers := self.model.get_relation("k8s-cluster"): + if workers := self.model.get_relation(CLUSTER_WORKER_RELATION): self.distributor.revoke_tokens( relation=workers, token_strategy=TokenStrategy.CLUSTER, @@ -611,14 +618,14 @@ def _revoke_cluster_tokens(self, event: ops.EventBase): def _create_cluster_tokens(self): """Create tokens for the units in the cluster and k8s-cluster relations.""" log.info("Prepare clustering") - if peer := self.model.get_relation("cluster"): + if peer := self.model.get_relation(CLUSTER_RELATION): self.distributor.allocate_tokens( relation=peer, token_strategy=TokenStrategy.CLUSTER, token_type=ClusterTokenType.CONTROL_PLANE, ) - if workers := self.model.get_relation("k8s-cluster"): + if workers := self.model.get_relation(CLUSTER_WORKER_RELATION): self.distributor.allocate_tokens( relation=workers, token_strategy=TokenStrategy.CLUSTER, @@ -631,18 +638,18 @@ def _create_cos_tokens(self): This method creates COS tokens and distributes them to peers and workers if relations exist. """ - if not self.model.get_relation("cos-agent"): + if not self.model.get_relation(COS_RELATION): return log.info("Prepare cos tokens") - if rel := self.model.get_relation("cos-tokens"): + if rel := self.model.get_relation(COS_TOKENS_RELATION): self.distributor.allocate_tokens( relation=rel, token_strategy=TokenStrategy.COS, token_type=ClusterTokenType.CONTROL_PLANE, ) - if rel := self.model.get_relation("cos-worker-tokens"): + if rel := self.model.get_relation(COS_TOKENS_WORKER_RELATION): self.distributor.allocate_tokens( relation=rel, token_strategy=TokenStrategy.COS, @@ -679,7 +686,7 @@ def _get_scrape_jobs(self): Returns an empty list if the token cannot be retrieved or if the "cos-tokens" relation does not exist. """ - relation = self.model.get_relation("cos-tokens") + relation = self.model.get_relation(COS_TOKENS_RELATION) if not relation: log.warning("No cos-tokens available") return [] @@ -700,7 +707,7 @@ def _update_kubernetes_version(self): Raises: ReconcilerError: If the cluster integration is missing. """ - relation = self.model.get_relation("cluster") + relation = self.model.get_relation(CLUSTER_RELATION) if not relation: status.add(ops.BlockedStatus("Missing cluster integration")) raise ReconcilerError("Missing cluster integration") @@ -722,8 +729,8 @@ def _announce_kubernetes_version(self): if not local_version: raise ReconcilerError("k8s-snap is not installed") - peer = self.model.get_relation("cluster") - worker = self.model.get_relation("k8s-cluster") + peer = self.model.get_relation(CLUSTER_RELATION) + worker = self.model.get_relation(CLUSTER_WORKER_RELATION) for relation in (peer, worker): if not relation: @@ -772,7 +779,7 @@ def _join_cluster(self, event: ops.EventBase): Args: event (ops.EventBase): event triggering the join """ - if not (relation := self.model.get_relation("cluster")): + if not (relation := self.model.get_relation(CLUSTER_RELATION)): status.add(ops.BlockedStatus("Missing cluster integration")) raise ReconcilerError("Missing cluster integration") @@ -826,7 +833,7 @@ def _death_handler(self, event: ops.EventBase): self.update_status.run() self._last_gasp() - relation = self.model.get_relation("cluster") + relation = self.model.get_relation(CLUSTER_RELATION) local_cluster = self.get_cluster_name() remote_cluster = self.collector.cluster_name(relation, False) if relation else "" if local_cluster and local_cluster != remote_cluster: @@ -894,7 +901,7 @@ def _evaluate_removal(self, event: ops.EventBase) -> bool: elif ( self.is_worker and self.get_cluster_name() - and (relation := self.model.get_relation("cluster")) + and (relation := self.model.get_relation(CLUSTER_RELATION)) and not relation.units ): # If a worker unit has been clustered, diff --git a/charms/worker/k8s/src/literals.py b/charms/worker/k8s/src/literals.py index 2b102d4d..50b2180f 100644 --- a/charms/worker/k8s/src/literals.py +++ b/charms/worker/k8s/src/literals.py @@ -3,8 +3,32 @@ """Literals for the charm.""" +from pathlib import Path + +# Snap SNAP_NAME = "k8s" +# Logging +VALID_LOG_LEVELS = ["info", "debug", "warning", "error", "critical"] + +# Charm +ETC_KUBERNETES = Path("/etc/kubernetes") +KUBECONFIG = Path.home() / ".kube/config" +KUBECTL_PATH = Path("/snap/k8s/current/bin/kubectl") +K8SD_SNAP_SOCKET = "/var/snap/k8s/common/var/lib/k8sd/state/control.socket" +K8SD_PORT = 6400 +SUPPORTED_DATASTORES = ["dqlite", "etcd"] + +# Relations +CLUSTER_RELATION = "cluster" +CLUSTER_WORKER_RELATION = "k8s-cluster" +CONTAINERD_RELATION = "containerd" +COS_TOKENS_RELATION = "cos-tokens" +COS_TOKENS_WORKER_RELATION = "cos-worker-tokens" +COS_RELATION = "cos-agent" +ETCD_RELATION = "etcd" + +# Kubernetes services K8S_COMMON_SERVICES = [ "kubelet", "kube-proxy", @@ -18,12 +42,15 @@ K8S_DQLITE_SERVICE, "kube-controller-manager", "kube-scheduler", + *K8S_COMMON_SERVICES, ] K8S_WORKER_SERVICES = [ "k8s-apiserver-proxy", + *K8S_COMMON_SERVICES, ] +# Upgrade DEPENDENCIES = { # NOTE: Update the dependencies for the k8s-charm before releasing. "k8s_charm": { diff --git a/charms/worker/k8s/src/upgrade.py b/charms/worker/k8s/src/upgrade.py index 9038e262..f85e5122 100644 --- a/charms/worker/k8s/src/upgrade.py +++ b/charms/worker/k8s/src/upgrade.py @@ -19,7 +19,7 @@ from charms.operator_libs_linux.v2.snap import SnapError from inspector import ClusterInspector from literals import ( - K8S_COMMON_SERVICES, + CLUSTER_RELATION, K8S_CONTROL_PLANE_SERVICES, K8S_DQLITE_SERVICE, K8S_WORKER_SERVICES, @@ -199,12 +199,10 @@ def _upgrade(self, event: Union[ops.EventBase, ops.HookEvent]) -> None: self.charm.grant_upgrade() services = ( - K8S_CONTROL_PLANE_SERVICES + K8S_COMMON_SERVICES - if self.charm.is_control_plane - else K8S_COMMON_SERVICES + K8S_WORKER_SERVICES + K8S_CONTROL_PLANE_SERVICES if self.charm.is_control_plane else K8S_WORKER_SERVICES ) - if K8S_DQLITE_SERVICE in services and self.charm.datastore == "dqlite": + if K8S_DQLITE_SERVICE in services and self.charm.datastore != "dqlite": services.remove(K8S_DQLITE_SERVICE) try: @@ -227,7 +225,7 @@ def build_upgrade_stack(self) -> List[int]: Returns: A list of unit numbers to upgrade in order. """ - relation = self.charm.model.get_relation("cluster") + relation = self.charm.model.get_relation(CLUSTER_RELATION) if not relation: return [int(self.charm.unit.name.split("/")[-1])] diff --git a/charms/worker/k8s/tests/unit/test_token_distributor.py b/charms/worker/k8s/tests/unit/test_token_distributor.py index d9b9bfa8..2fbce2f9 100644 --- a/charms/worker/k8s/tests/unit/test_token_distributor.py +++ b/charms/worker/k8s/tests/unit/test_token_distributor.py @@ -13,6 +13,7 @@ import pytest import token_distributor from charm import K8sCharm +from literals import CLUSTER_RELATION @pytest.fixture(params=["worker", "control-plane"]) @@ -37,7 +38,7 @@ def test_request(harness): harness.disable_hooks() collector = token_distributor.TokenCollector(harness.charm, "my-node") relation_id = harness.add_relation("cluster", "remote") - collector.request(harness.charm.model.get_relation("cluster")) + collector.request(harness.charm.model.get_relation(CLUSTER_RELATION)) data = harness.get_relation_data(relation_id, harness.charm.unit.name) assert data["node-name"] == "my-node" @@ -47,8 +48,8 @@ def test_cluster_name_not_joined(harness): harness.disable_hooks() collector = token_distributor.TokenCollector(harness.charm, "my-node") relation_id = harness.add_relation("cluster", "remote") - remote = collector.cluster_name(harness.charm.model.get_relation("cluster"), False) - local = collector.cluster_name(harness.charm.model.get_relation("cluster"), True) + remote = collector.cluster_name(harness.charm.model.get_relation(CLUSTER_RELATION), False) + local = collector.cluster_name(harness.charm.model.get_relation(CLUSTER_RELATION), True) assert remote == local == "" data = harness.get_relation_data(relation_id, harness.charm.unit.name) assert not data.get("joined") @@ -60,13 +61,13 @@ def test_cluster_name_joined(harness): collector = token_distributor.TokenCollector(harness.charm, "my-node") relation_id = harness.add_relation("cluster", "k8s", unit_data={"cluster-name": "my-cluster"}) # Fetching the remote doesn't update joined field - remote = collector.cluster_name(harness.charm.model.get_relation("cluster"), False) + remote = collector.cluster_name(harness.charm.model.get_relation(CLUSTER_RELATION), False) assert remote == "my-cluster" data = harness.get_relation_data(relation_id, harness.charm.unit.name) assert not data.get("joined") # Fetching the local does update joined field - local = collector.cluster_name(harness.charm.model.get_relation("cluster"), True) + local = collector.cluster_name(harness.charm.model.get_relation(CLUSTER_RELATION), True) assert remote == local == "my-cluster" data = harness.get_relation_data(relation_id, harness.charm.unit.name) assert data["joined"] == "my-cluster" diff --git a/charms/worker/k8s/tests/unit/test_upgrade.py b/charms/worker/k8s/tests/unit/test_upgrade.py index e55f214c..8887cfdf 100644 --- a/charms/worker/k8s/tests/unit/test_upgrade.py +++ b/charms/worker/k8s/tests/unit/test_upgrade.py @@ -11,6 +11,7 @@ from inspector import ClusterInspector from lightkube.models.core_v1 import Node from lightkube.models.meta_v1 import ObjectMeta +from literals import CLUSTER_RELATION from upgrade import K8sDependenciesModel, K8sUpgrade @@ -103,7 +104,7 @@ def test_build_upgrade_stack_no_relation(self): result = self.upgrade.build_upgrade_stack() self.assertEqual(result, [0]) - self.charm.model.get_relation.assert_called_once_with("cluster") + self.charm.model.get_relation.assert_called_once_with(CLUSTER_RELATION) def test_build_upgrade_stack_with_relation(self): """Test build_upgrade_stack with cluster relation.""" @@ -119,7 +120,7 @@ def test_build_upgrade_stack_with_relation(self): result = self.upgrade.build_upgrade_stack() self.assertEqual(sorted(result), [0, 1, 2]) - self.charm.model.get_relation.assert_called_once_with("cluster") + self.charm.model.get_relation.assert_called_once_with(CLUSTER_RELATION) def test_verify_worker_versions_compatible(self): """Test _verify_worker_versions returns True when worker versions is compatible.""" From 93775783f2f33af7f1d3c8e6159b22c9a8a6c628 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 5 Dec 2024 21:59:57 -0600 Subject: [PATCH 7/7] Update dependency ops to v2.17.1 (#196) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- charms/worker/k8s/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charms/worker/k8s/requirements.txt b/charms/worker/k8s/requirements.txt index 140861f3..dac5862d 100644 --- a/charms/worker/k8s/requirements.txt +++ b/charms/worker/k8s/requirements.txt @@ -7,7 +7,7 @@ ops.interface_aws @ git+https://github.com/charmed-kubernetes/interface-aws-inte ops.interface_gcp @ git+https://github.com/charmed-kubernetes/interface-gcp-integration@main#subdirectory=ops ops.interface_azure @ git+https://github.com/charmed-kubernetes/interface-azure-integration@main#subdirectory=ops cosl==0.0.43 -ops==2.17.0 +ops==2.17.1 pydantic==1.10.19 PyYAML==6.0.2 tomli ==2.1.0