diff --git a/.licenserc.yaml b/.licenserc.yaml index 9bfd6e8e..05121671 100644 --- a/.licenserc.yaml +++ b/.licenserc.yaml @@ -19,6 +19,7 @@ header: - 'charms/worker/k8s/lib/charms/k8s/**' paths-ignore: - 'charms/worker/k8s/lib/charms/**' + - 'tests/integration/data/*.tar.gz' - '.github/**' - '**/.gitkeep' - '**/*.cfg' diff --git a/charms/worker/build-snap-installation.sh b/charms/worker/build-snap-installation.sh new file mode 120000 index 00000000..f31920b1 --- /dev/null +++ b/charms/worker/build-snap-installation.sh @@ -0,0 +1 @@ +k8s/build-snap-installation.sh \ No newline at end of file diff --git a/charms/worker/charmcraft.yaml b/charms/worker/charmcraft.yaml index 326a4b9b..6f158ad0 100644 --- a/charms/worker/charmcraft.yaml +++ b/charms/worker/charmcraft.yaml @@ -57,6 +57,7 @@ bases: - name: ubuntu channel: "24.04" architectures: [arm64] + config: options: labels: @@ -68,6 +69,22 @@ config: Note: Due to NodeRestriction, workers are limited to how they can label themselves https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#noderestriction + +resources: + snap-installation: + type: file + filename: snap-installation.tar.gz + description: | + Override charm defined snap installation script + + This charm is designed to operate with a specific revision of snaps, overriding + with anything will indicate that the charm is running an unsupported configuration. + + Content Options: + 0-byte resource (Default) -- Use the charm defined snap installation script + ./snap-installation.yaml -- Overrides the charm defined snap-installation.yaml + ./k8s_XXXX.snap -- Overrides the charm with a specific snap file installed dangerously + parts: charm: plugin: charm @@ -97,6 +114,7 @@ peers: provides: cos-agent: interface: cos_agent + requires: aws: interface: aws-integration diff --git a/charms/worker/k8s/build-snap-installation.sh b/charms/worker/k8s/build-snap-installation.sh new file mode 100755 index 00000000..e54a426f --- /dev/null +++ b/charms/worker/k8s/build-snap-installation.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +# Create an empty tarball to be used as a placeholder for the snap installation override +echo "Creating empty tarball at $1" +touch "${1}" \ No newline at end of file diff --git a/charms/worker/k8s/charmcraft.yaml b/charms/worker/k8s/charmcraft.yaml index 61bd0519..8f062a00 100644 --- a/charms/worker/k8s/charmcraft.yaml +++ b/charms/worker/k8s/charmcraft.yaml @@ -195,6 +195,21 @@ config: description: | Enables or disables the network feature. +resources: + snap-installation: + type: file + filename: snap-installation.tar.gz + description: | + Override charm defined snap installation script + + This charm is designed to operate with a specific revision of snaps, overriding + with anything will indicate that the charm is running an unsupported configuration. + + Content Options: + 0-byte resource (Default) -- Use the charm defined snap installation script + ./snap-installation.yaml -- Overrides the charm defined snap-installation.yaml + ./k8s_XXXX.snap -- Overrides the charm with a specific snap file installed dangerously + actions: get-kubeconfig: description: Retrieve Public Kubernetes cluster config, including credentials diff --git a/charms/worker/k8s/src/charm.py b/charms/worker/k8s/src/charm.py index 4b686bd7..68dd36e2 100755 --- a/charms/worker/k8s/src/charm.py +++ b/charms/worker/k8s/src/charm.py @@ -31,7 +31,6 @@ import charms.operator_libs_linux.v2.snap as snap_lib import containerd import ops -import reschedule import yaml from charms.contextual_status import ReconcilerError, WaitingStatus, on_error from charms.grafana_agent.v0.cos_agent import COSAgentProvider @@ -58,6 +57,7 @@ from charms.reconciler import Reconciler from cloud_integration import CloudIntegration from cos_integration import COSIntegration +from events import update_status from inspector import ClusterInspector from kube_control import configure as configure_kube_control from literals import DEPENDENCIES @@ -143,7 +143,10 @@ def __init__(self, *args): dependency_model=K8sDependenciesModel(**DEPENDENCIES), ) self.cos = COSIntegration(self) - self.reconciler = Reconciler(self, self._reconcile) + self.update_status = update_status.Handler(self) + self.reconciler = Reconciler( + self, self._reconcile, exit_status=self.update_status.active_status + ) self.distributor = TokenDistributor(self, self.get_node_name(), self.api_manager) self.collector = TokenCollector(self, self.get_node_name()) self.labeller = LabelMaker( @@ -164,7 +167,6 @@ def __init__(self, *args): ], ) - self.framework.observe(self.on.update_status, self._on_update_status) if self.is_control_plane: self.etcd = EtcdReactiveRequires(self) self.kube_control = KubeControlProvides(self, endpoint="kube-control") @@ -286,7 +288,7 @@ def get_cloud_name(self) -> str: def _install_snaps(self): """Install snap packages.""" status.add(ops.MaintenanceStatus("Ensuring snap installation")) - snap_management() + snap_management(self) @on_error(WaitingStatus("Waiting to apply snap requirements"), subprocess.CalledProcessError) def _apply_snap_requirements(self): @@ -628,7 +630,8 @@ def _update_kubernetes_version(self): if not relation: status.add(ops.BlockedStatus("Missing cluster integration")) raise ReconcilerError("Missing cluster integration") - if version := snap_version("k8s"): + version, _ = snap_version("k8s") + if version: relation.data[self.unit]["version"] = version @on_error(ops.WaitingStatus("Announcing Kubernetes version")) @@ -641,7 +644,8 @@ def _announce_kubernetes_version(self): ReconcilerError: If the k8s snap is not installed, the version is missing, or the version does not match the local version. """ - if not (local_version := snap_version("k8s")): + local_version, _ = snap_version("k8s") + if not local_version: raise ReconcilerError("k8s-snap is not installed") peer = self.model.get_relation("cluster") @@ -739,7 +743,7 @@ def _death_handler(self, event: ops.EventBase): """ if self.lead_control_plane: self._revoke_cluster_tokens(event) - self._update_status() + self.update_status.run() self._last_gasp() relation = self.model.get_relation("cluster") @@ -779,28 +783,12 @@ def _reconcile(self, event: ops.EventBase): self._join_cluster(event) self._config_containerd_registries() self._configure_cos_integration() - self._update_status() + self.update_status.run() self._apply_node_labels() if self.is_control_plane: self._copy_internal_kubeconfig() self._expose_ports() - def _update_status(self): - """Check k8s snap status.""" - if version := snap_version("k8s"): - self.unit.set_workload_version(version) - - if not self.get_cluster_name(): - status.add(ops.WaitingStatus("Node not Clustered")) - return - - trigger = reschedule.PeriodicEvent(self) - if not self._is_node_ready(): - status.add(ops.WaitingStatus("Node not Ready")) - trigger.create(reschedule.Period(seconds=30)) - return - trigger.cancel() - def _evaluate_removal(self, event: ops.EventBase) -> bool: """Determine if my unit is being removed. @@ -896,17 +884,6 @@ def _apply_node_labels(self): else: log.info("Node %s not yet labelled", node) - def _on_update_status(self, _event: ops.UpdateStatusEvent): - """Handle update-status event.""" - if not self.reconciler.stored.reconciled: - return - - try: - with status.context(self.unit): - self._update_status() - except status.ReconcilerError: - log.exception("Can't update_status") - def kubectl(self, *args) -> str: """Run kubectl command. diff --git a/charms/worker/k8s/src/events/update_status.py b/charms/worker/k8s/src/events/update_status.py new file mode 100644 index 00000000..fead5f92 --- /dev/null +++ b/charms/worker/k8s/src/events/update_status.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 + +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +# Learn more at: https://juju.is/docs/sdk + +"""Update status handler for the k8s charm. + +This handler is responsible for updating the unit's workload version and status +""" + +import logging + +import charms.contextual_status as status +import ops +import reschedule +from protocols import K8sCharmProtocol +from snap import version as snap_version + +# Log messages can be retrieved using juju debug-log +log = logging.getLogger(__name__) + + +class DynamicActiveStatus(ops.ActiveStatus): + """An ActiveStatus class that can be updated. + + Attributes: + message (str): explanation of the unit status + prefix (str): Optional prefix to the unit status + postfix (str): Optional postfix to the unit status + """ + + def __init__(self): + """Initialise the DynamicActiveStatus.""" + super().__init__("Ready") + self.prefix: str = "" + self.postfix: str = "" + + @property + def message(self) -> str: + """Return the message for the status.""" + pre = f"{self.prefix} :" if self.prefix else "" + post = f" ({self.postfix})" if self.postfix else "" + return f"{pre}{self._message}{post}" + + @message.setter + def message(self, message: str): + """Set the message for the status. + + Args: + message (str): explanation of the unit status + """ + self._message = message + + +class Handler(ops.Object): + """Handler for the update-status event in a Kubernetes operator. + + This class observes the `update_status` event and handles it by checking the + Kubernetes snap status and updating the unit's workload version accordingly. + + Attributes: + charm (CharmBase): The charm instance that this handler is associated with. + active_status (DynamicActiveStatus): The active status object used to manage + the unit's status during the update process. + """ + + def __init__(self, charm: K8sCharmProtocol): + """Initialize the UpdateStatusEvent. + + Args: + charm: The charm instance that is instantiating this event. + """ + super().__init__(charm, "update_status") + self.charm = charm + self.active_status = DynamicActiveStatus() + self.charm.framework.observe(self.charm.on.update_status, self._on_update_status) + + def _on_update_status(self, _event: ops.UpdateStatusEvent): + """Handle update-status event.""" + if not self.charm.reconciler.stored.reconciled: + return + + try: + with status.context(self.charm.unit, exit_status=self.active_status): + self.run() + except status.ReconcilerError: + log.exception("Can't update_status") + + def run(self): + """Check k8s snap status.""" + version, overridden = snap_version("k8s") + if version: + self.charm.unit.set_workload_version(version) + + self.active_status.postfix = "Snap Override Active" if overridden else "" + + if not self.charm.get_cluster_name(): + status.add(ops.WaitingStatus("Node not Clustered")) + return + + trigger = reschedule.PeriodicEvent(self.charm) + if not self.charm._is_node_ready(): + status.add(ops.WaitingStatus("Node not Ready")) + trigger.create(reschedule.Period(seconds=30)) + return + trigger.cancel() diff --git a/charms/worker/k8s/src/protocols.py b/charms/worker/k8s/src/protocols.py index f06f05c1..0ca5fc64 100644 --- a/charms/worker/k8s/src/protocols.py +++ b/charms/worker/k8s/src/protocols.py @@ -6,6 +6,7 @@ import ops from charms.interface_external_cloud_provider import ExternalCloudProvider from charms.k8s.v0.k8sd_api_manager import K8sdAPIManager +from charms.reconciler import Reconciler from ops.interface_kube_control import KubeControlProvides @@ -16,11 +17,13 @@ class K8sCharmProtocol(ops.CharmBase): api_manager (K8sdAPIManager): The API manager for the charm. kube_control (KubeControlProvides): The kube-control interface. xcp (ExternalCloudProvider): The external cloud provider interface. + reconciler (Reconciler): The reconciler for the charm """ api_manager: K8sdAPIManager kube_control: KubeControlProvides xcp: ExternalCloudProvider + reconciler: Reconciler def get_cluster_name(self) -> str: """Get the cluster name. @@ -37,3 +40,11 @@ def get_cloud_name(self) -> str: NotImplementedError: If the method is not implemented. """ raise NotImplementedError + + def _is_node_ready(self) -> bool: + """Check if the node is ready. + + Raises: + NotImplementedError: If the method is not implemented. + """ + raise NotImplementedError diff --git a/charms/worker/k8s/src/snap.py b/charms/worker/k8s/src/snap.py index 575a9915..e78291b4 100644 --- a/charms/worker/k8s/src/snap.py +++ b/charms/worker/k8s/src/snap.py @@ -10,11 +10,14 @@ import logging import re +import shutil import subprocess +import tarfile from pathlib import Path -from typing import List, Literal, Optional, Union +from typing import List, Literal, Optional, Tuple, Union import charms.operator_libs_linux.v2.snap as snap_lib +import ops import yaml from pydantic import BaseModel, Field, ValidationError, parse_obj_as, validator from typing_extensions import Annotated @@ -23,6 +26,27 @@ log = logging.getLogger(__name__) +def _yaml_read(path: Path) -> dict: + """Read a yaml file into a dictionary. + + Args: + path: The path to the yaml file + """ + with path.open(mode="r", encoding="utf-8") as f: + return yaml.safe_load(f) + + +def _yaml_write(path: Path, content: dict) -> None: + """Write a dictionary to a yaml file. + + Args: + path: The path to the yaml file + content: The dictionary to write + """ + with path.open(mode="w", encoding="utf-8") as f: + yaml.safe_dump(content, f) + + class SnapFileArgument(BaseModel): """Structure to install a snap by file. @@ -31,7 +55,7 @@ class SnapFileArgument(BaseModel): name (str): The name of the snap after installed filename (Path): Path to the snap to locally install classic (bool): If it should be installed as a classic snap - dangerous (bool): If it should be installed as a dangerouse snap + dangerous (bool): If it should be installed as a dangerous snap devmode (bool): If it should be installed as with dev mode enabled """ @@ -91,25 +115,138 @@ def _validate_revision(cls, value: Union[str, int, None]) -> Optional[str]: ] -def _parse_management_arguments() -> List[SnapArgument]: +def _local_arch() -> str: + """Retrieve the local architecture. + + Returns: + str: The architecture of this machine + """ + dpkg_arch = ["dpkg", "--print-architecture"] + return subprocess.check_output(dpkg_arch).decode("UTF-8").strip() + + +def _default_snap_installation() -> Path: + """Return the default snap_installation manifest. + + Returns: + path to the default snap_installation manifest + """ + return Path("templates/snap_installation.yaml") + + +def _overridden_snap_installation() -> Path: + """Return the overridden snap_installation manifest. + + Returns: + path to the overridden snap_installation manifest + """ + return Path("./snap-installation/resource/snap_installation.yaml") + + +def _normalize_paths(snap_installation): + """Normalize the paths in the snap_installation manifest. + + Arguments: + snap_installation: The path to the snap_installation manifest + """ + snap_installation = snap_installation.resolve() + content, updated = _yaml_read(snap_installation), False + for arch, snaps in content.items(): + for idx, snap in enumerate(snaps): + if snap.get("filename"): + resolved = (snap_installation.parent / snap["filename"]).resolve() + log.info("Resolving snap filename: %s to %s", snap["filename"], resolved) + content[arch][idx]["filename"] = str(resolved) + updated = True + if updated: + _yaml_write(snap_installation, content) + + +def _select_snap_installation(charm: ops.CharmBase) -> Path: + """Select the snap_installation manifest. + + Arguments: + charm: The charm instance necessary to check the unit resources + + Returns: + path: The path to the snap_installation manifest + + Raises: + SnapError: when the management issue cannot be resolved + """ + try: + resource_path = charm.model.resources.fetch("snap-installation") + except (ops.ModelError, NameError): + log.error("Something went wrong when claiming 'snap-installation' resource.") + return _default_snap_installation() + + resource_size = resource_path.stat().st_size + log.info("Resource path size: %d bytes", resource_size) + unpack_path = _overridden_snap_installation().parent + shutil.rmtree(unpack_path, ignore_errors=True) + if resource_size == 0: + log.info("Resource size is zero bytes. Use the charm defined snap installation script") + return _default_snap_installation() + + # Unpack the snap-installation resource + unpack_path.mkdir(parents=True, exist_ok=True) + try: + with tarfile.open(resource_path, "r:gz") as tar: + for member in tar.getmembers(): + if member.name.endswith("snap_installation.yaml"): + log.info("Found snap_installation manifest") + tar.extract(member, path=unpack_path) + snap_installation = unpack_path / member.name + _normalize_paths(snap_installation) + return snap_installation + if member.name.endswith(".snap"): + log.info("Found snap_installation snap: %s", member.name) + tar.extract(member, path=unpack_path) + arch = _local_arch() + manifest = { + arch: [ + { + "install-type": "file", + "name": "k8s", + "filename": str(unpack_path / member.name), + "classic": True, + "dangerous": True, + } + ] + } + snap_installation = unpack_path / "snap_installation.yaml" + _yaml_write(snap_installation, manifest) + return snap_installation + except tarfile.TarError as e: + log.error("Failed to extract 'snap-installation:'") + raise snap_lib.SnapError("Invalid snap-installation resource") from e + + log.error("Failed to find a snap file in snap_installation resource") + raise snap_lib.SnapError("Failed to find snap_installation manifest") + + +def _parse_management_arguments(charm: ops.CharmBase) -> List[SnapArgument]: """Parse snap management arguments. + Arguments: + charm: The charm instance necessary to check the unit resources + Raises: SnapError: when the management issue cannot be resolved Returns: Parsed arguments list for the specific host architecture """ - revision = Path("templates/snap_installation.yaml") + revision = _select_snap_installation(charm) if not revision.exists(): raise snap_lib.SnapError(f"Failed to find file={revision}") try: - body = yaml.safe_load(revision.read_text(encoding="utf-8")) + body = _yaml_read(revision) except yaml.YAMLError as e: log.error("Failed to load file=%s, %s", revision, e) raise snap_lib.SnapError(f"Failed to load file={revision}") - dpkg_arch = ["dpkg", "--print-architecture"] - arch = subprocess.check_output(dpkg_arch).decode("UTF-8").strip() + + arch = _local_arch() if not (isinstance(body, dict) and (arch_spec := body.get(arch))): log.warning("Failed to find revision for arch=%s", arch) @@ -126,24 +263,63 @@ def _parse_management_arguments() -> List[SnapArgument]: return args -def management(): - """Manage snap installations on this machine.""" +def management(charm: ops.CharmBase) -> None: + """Manage snap installations on this machine. + + Arguments: + charm: The charm instance + """ cache = snap_lib.SnapCache() - for args in _parse_management_arguments(): + for args in _parse_management_arguments(charm): which = cache[args.name] + if block_refresh(which, args): + continue + install_args = args.dict(exclude_none=True) if isinstance(args, SnapFileArgument) and which.revision != "x1": - snap_lib.install_local(**args.dict(exclude_none=True)) + snap_lib.install_local(**install_args) elif isinstance(args, SnapStoreArgument) and args.revision: if which.revision != args.revision: log.info("Ensuring %s snap revision=%s", args.name, args.revision) - which.ensure(**args.dict(exclude_none=True)) + which.ensure(**install_args) which.hold() elif isinstance(args, SnapStoreArgument): log.info("Ensuring %s snap channel=%s", args.name, args.channel) - which.ensure(**args.dict(exclude_none=True)) + which.ensure(**install_args) + + +def block_refresh(which: snap_lib.Snap, args: SnapArgument) -> bool: + """Block snap refreshes if the snap is in a specific state. + Arguments: + which: The snap to check + args: The snap arguments -def version(snap: str) -> Optional[str]: + Returns: + bool: True if the snap should be blocked from refreshing + """ + if snap_lib.SnapState(which.state) == snap_lib.SnapState.Available: + log.info("Allowing %s snap installation", args.name) + return False + if _overridden_snap_installation().exists(): + log.info("Allowing %s snap refresh due to snap installation override", args.name) + return False + if isinstance(args, SnapStoreArgument) and args.revision: + if block := which.revision != args.revision: + log.info("Blocking %s snap refresh to revision=%s", args.name, args.revision) + else: + log.info("Allowing %s snap refresh to same revision", args.name) + return block + if isinstance(args, SnapStoreArgument): + if block := which.channel != args.channel: + log.info("Blocking %s snap refresh to channel=%s", args.name, args.channel) + else: + log.info("Allowing %s snap refresh to same channel (%s)", args.name, args.channel) + return block + log.info("Blocking %s snap refresh", args.name) + return True + + +def version(snap: str) -> Tuple[Optional[str], bool]: """Retrieve the version of the installed snap package. Arguments: @@ -153,15 +329,16 @@ def version(snap: str) -> Optional[str]: Optional[str]: The version of the installed snap package, or None if not available. """ + overridden = _overridden_snap_installation().exists() try: result = subprocess.check_output(["/usr/bin/snap", "list", snap]) except subprocess.CalledProcessError: - return None + return None, overridden output = result.decode().strip() match = re.search(r"(\d+\.\d+(?:\.\d+)?)", output) if match: - return match.group() + return match.group(), overridden log.info("Snap k8s not found or no version available.") - return None + return None, overridden diff --git a/charms/worker/k8s/tests/unit/test_base.py b/charms/worker/k8s/tests/unit/test_base.py index acff6d85..dc1038ce 100644 --- a/charms/worker/k8s/tests/unit/test_base.py +++ b/charms/worker/k8s/tests/unit/test_base.py @@ -52,7 +52,6 @@ def mock_reconciler_handlers(harness): "_check_k8sd_ready", "_join_cluster", "_configure_cos_integration", - "_update_status", "_apply_node_labels", "_update_kubernetes_version", } @@ -70,9 +69,11 @@ def mock_reconciler_handlers(harness): "_announce_kubernetes_version", } - handlers = [mock.patch(f"charm.K8sCharm.{name}") for name in handler_names] - yield dict(zip(handler_names, (h.start() for h in handlers))) - for handler in handlers: + mocked = [mock.patch(f"charm.K8sCharm.{name}") for name in handler_names] + handlers = dict(zip(handler_names, (m.start() for m in mocked))) + handlers["_update_status"] = mock.patch.object(harness.charm.update_status, "run").start() + yield handlers + for handler in handlers.values(): handler.stop() diff --git a/charms/worker/k8s/tests/unit/test_snap.py b/charms/worker/k8s/tests/unit/test_snap.py index 19c1e4eb..65057caf 100644 --- a/charms/worker/k8s/tests/unit/test_snap.py +++ b/charms/worker/k8s/tests/unit/test_snap.py @@ -6,127 +6,338 @@ # pylint: disable=duplicate-code,missing-function-docstring """Unit tests snap module.""" +import gzip import io import subprocess +import tarfile from pathlib import Path +from textwrap import dedent from unittest import mock +import ops.testing import pytest import snap +from charm import K8sCharm -@mock.patch("pathlib.Path.exists", mock.Mock(return_value=False)) -def test_parse_no_file(): +@pytest.fixture(params=["worker", "control-plane"]) +def harness(request): + """Craft a ops test harness. + + Args: + request: pytest request object + """ + meta = Path(__file__).parent / "../../charmcraft.yaml" + if request.param == "worker": + meta = Path(__file__).parent / "../../../charmcraft.yaml" + harness = ops.testing.Harness(K8sCharm, meta=meta.read_text()) + harness.begin() + harness.charm.is_worker = request.param == "worker" + yield harness + harness.cleanup() + + +@pytest.fixture +def missing_snap_installation(): + """Test missing default snap-installation.""" + with mock.patch("snap._default_snap_installation") as mocked: + mock_path = mocked.return_value + mock_path.exists.return_value = False + yield mocked + mocked.assert_called_once_with() + + +@pytest.fixture +def snap_installation(): + """Test missing default snap-installation.""" + with mock.patch("snap._default_snap_installation") as mocked: + mock_path = mocked.return_value + mock_path.exists.return_value = True + mock_stream = mock_path.open.return_value.__enter__ + yield mock_stream + mocked.assert_called_once_with() + + +@pytest.fixture(autouse=True) +def resource_snap_installation(tmp_path): + """Add snap-installation resource.""" + with mock.patch("snap._overridden_snap_installation") as mocked: + mock_path = Path(tmp_path) / "snap_installation.yaml" + mocked.return_value = mock_path + yield mock_path + + +@pytest.fixture() +def block_refresh(): + """Block snap refresh.""" + with mock.patch("snap.block_refresh") as mocked: + mocked.return_value = False + yield mocked + + +@mock.patch("snap.snap_lib.SnapCache") +@pytest.mark.parametrize( + "state, as_file", + [ + [("present", "1234", None), False], + [("present", None, "edge"), False], + [("present", None, None), True], + ], + ids=[ + "installed & store-by-channel", + "installed & store-by-revision", + "installed & file-without-override", + ], +) +def test_block_refresh(cache, state, as_file, caplog, resource_snap_installation): + """Test block refresh.""" + caplog.set_level(0) + k8s_snap = cache()["k8s"] + k8s_snap.state, k8s_snap.revision, k8s_snap.channel = state + if as_file: + args = snap.SnapFileArgument( + name="k8s", + filename=resource_snap_installation.parent / "k8s_1234.snap", + ) + else: + args = snap.SnapStoreArgument( + name="k8s", + channel="beta" if k8s_snap.channel else None, + revision="5678" if k8s_snap.revision else None, + ) + assert snap.block_refresh(k8s_snap, args) + assert "Blocking k8s snap refresh" in caplog.text + + +@mock.patch("snap.snap_lib.SnapCache") +@pytest.mark.parametrize( + "state, overridden", + [ + [("available", None, None), None], + [("present", "1234", None), None], + [("present", None, "edge"), None], + [("present", None, None), True], + ], + ids=[ + "not installed yet", + "installed & store-by-same-channel", + "installed & store-by-same-revision", + "installed & override", + ], +) +def test_not_block_refresh(cache, state, overridden, caplog, resource_snap_installation): + """Test block refresh.""" + caplog.set_level(0) + k8s_snap = cache()["k8s"] + k8s_snap.state, k8s_snap.revision, k8s_snap.channel = state + if overridden: + resource_snap_installation.write_text( + "amd64:\n- install-type: store\n name: k8s\n channel: edge" + ) + args = snap.SnapStoreArgument( + name="k8s", + channel=k8s_snap.channel, + revision=k8s_snap.revision, + ) + assert not snap.block_refresh(k8s_snap, args) + assert "Allowing k8s snap" in caplog.text + + +@pytest.mark.usefixtures("missing_snap_installation") +def test_parse_no_file(harness): """Test no file exists.""" with pytest.raises(snap.snap_lib.SnapError): - snap._parse_management_arguments() + snap._parse_management_arguments(harness.charm) -@mock.patch("pathlib.Path.exists", mock.Mock(return_value=True)) -@mock.patch("pathlib.Path.open") -def test_parse_invalid_file(mock_open): +def test_parse_invalid_file(snap_installation, harness): """Test file is invalid.""" - mock_open().__enter__.return_value = io.StringIO("example: =") + snap_installation.return_value = io.BytesIO(b"example: =") with pytest.raises(snap.snap_lib.SnapError): - snap._parse_management_arguments() + snap._parse_management_arguments(harness.charm) -@mock.patch("pathlib.Path.exists", mock.Mock(return_value=True)) -@mock.patch("pathlib.Path.open") @mock.patch("subprocess.check_output") -def test_parse_invalid_arch(mock_checkoutput, mock_open): +def test_parse_invalid_arch(mock_checkoutput, snap_installation, harness): """Test file has invalid arch.""" - mock_open().__enter__.return_value = io.StringIO("{}") + snap_installation.return_value = io.BytesIO(b"{}") mock_checkoutput().decode.return_value = "amd64" with pytest.raises(snap.snap_lib.SnapError): - snap._parse_management_arguments() + snap._parse_management_arguments(harness.charm) -@mock.patch("pathlib.Path.exists", mock.Mock(return_value=True)) -@mock.patch("pathlib.Path.open") @mock.patch("subprocess.check_output") -def test_parse_validation_error(mock_checkoutput, mock_open): +def test_parse_validation_error(mock_checkoutput, snap_installation, harness): """Test file cannot be parsed.""" - mock_open().__enter__.return_value = io.StringIO("amd64:\n- {}") + snap_installation.return_value = io.BytesIO(b"amd64:\n- {}") mock_checkoutput().decode.return_value = "amd64" with pytest.raises(snap.snap_lib.SnapError): - snap._parse_management_arguments() + snap._parse_management_arguments(harness.charm) + + +def _create_gzip_tar_string(file_data_dict): + """Create a gzip-compressed tar archive and return it as a base64-encoded string. + + Args: + file_data_dict: Dictionary where keys are filenames and values are file content as strings. + + Returns: + Gzipped tar archive content as a base64-encoded string. + """ + # Create a BytesIO buffer for the tar file + tar_buffer = io.BytesIO() + + # Open a tarfile in the buffer + with tarfile.open(fileobj=tar_buffer, mode="w") as tar: + for filename, file_content in file_data_dict.items(): + # Create a BytesIO buffer for each file's content + file_buffer = io.BytesIO(file_content.encode("utf-8")) + + # Create a tarinfo object with details of the file + tarinfo = tarfile.TarInfo(name=filename) + tarinfo.size = len(file_content) + + # Add the file content to the tar archive + tar.addfile(tarinfo, file_buffer) + + # Get the tar content from the buffer + tar_content = tar_buffer.getvalue() + + # Compress the tar content with gzip + gzip_buffer = io.BytesIO() + with gzip.GzipFile(fileobj=gzip_buffer, mode="wb") as gz: + gz.write(tar_content) + + # Get the gzipped tar content + return gzip_buffer.getvalue() + + +def test_resource_supplied_installation_by_channel(harness): + """Test resource installs by store channel.""" + arch = snap._local_arch() + yaml_data = f"{arch}:\n- install-type: store\n name: k8s\n channel: edge" + file_data = {"./snap_installation.yaml": yaml_data} + harness.add_resource("snap-installation", _create_gzip_tar_string(file_data)) + args = snap._parse_management_arguments(harness.charm) + assert len(args) == 1 + assert isinstance(args[0], snap.SnapStoreArgument) + assert args[0].channel == "edge" + assert args[0].name == "k8s" + assert args[0].install_type == "store" + + +def test_resource_supplied_installation_by_filename(harness, resource_snap_installation): + """Test resource installs by included filename.""" + arch = snap._local_arch() + yaml_data = dedent( + f""" + {arch}: + - install-type: file + name: k8s + filename: ./k8s_xxxx.snap + dangerous: true + classic: true + """ + ).strip() + file_data = {"./snap_installation.yaml": yaml_data, "./k8s_xxxx.snap": ""} + harness.add_resource("snap-installation", _create_gzip_tar_string(file_data)) + args = snap._parse_management_arguments(harness.charm) + assert len(args) == 1 + assert isinstance(args[0], snap.SnapFileArgument) + assert args[0].install_type == "file" + assert args[0].name == "k8s" + assert args[0].filename == resource_snap_installation.parent / "k8s_xxxx.snap" + assert args[0].dangerous + assert args[0].classic + + +def test_resource_supplied_snap(harness, resource_snap_installation): + """Test resource installs by snap only.""" + file_data = {"./k8s_xxxx.snap": ""} + harness.add_resource("snap-installation", _create_gzip_tar_string(file_data)) + args = snap._parse_management_arguments(harness.charm) + assert len(args) == 1 + assert isinstance(args[0], snap.SnapFileArgument) + assert args[0].name == "k8s" + assert args[0].install_type == "file" + assert args[0].filename == resource_snap_installation.parent / "k8s_xxxx.snap" + assert args[0].dangerous + assert args[0].classic -@mock.patch("pathlib.Path.exists", mock.Mock(return_value=True)) -@mock.patch("pathlib.Path.open") @mock.patch("subprocess.check_output") -def test_parse_valid_store(mock_checkoutput, mock_open): +def test_parse_valid_store(mock_checkoutput, snap_installation, harness): """Test file parses as store content.""" - content = """ + content = b""" amd64: - install-type: store name: k8s channel: edge """ - mock_open().__enter__.return_value = io.StringIO(content) + snap_installation.return_value = io.BytesIO(content) mock_checkoutput().decode.return_value = "amd64" - args = snap._parse_management_arguments() + args = snap._parse_management_arguments(harness.charm) assert args == [ snap.SnapStoreArgument(name="k8s", channel="edge"), ] -@mock.patch("pathlib.Path.exists", mock.Mock(return_value=True)) -@mock.patch("pathlib.Path.open") @mock.patch("subprocess.check_output") -def test_parse_valid_file(mock_checkoutput, mock_open): +def test_parse_valid_file(mock_checkoutput, snap_installation, harness): """Test file parses as file content.""" - content = """ + content = b""" amd64: - install-type: file name: k8s filename: path/to/thing """ - mock_open().__enter__.return_value = io.StringIO(content) + snap_installation.return_value = io.BytesIO(content) mock_checkoutput().decode.return_value = "amd64" - args = snap._parse_management_arguments() + args = snap._parse_management_arguments(harness.charm) assert args == [ snap.SnapFileArgument(name="k8s", filename=Path("path/to/thing")), ] +@pytest.mark.usefixtures("block_refresh") @mock.patch("snap._parse_management_arguments") @mock.patch("snap.snap_lib.install_local") @mock.patch("snap.snap_lib.SnapCache") -def test_management_installs_local(cache, install_local, args): +def test_management_installs_local(cache, install_local, args, harness): """Test installer uses local installer.""" k8s_snap = cache()["k8s"] args.return_value = [snap.SnapFileArgument(name="k8s", filename=Path("path/to/thing"))] - snap.management() + snap.management(harness.charm) k8s_snap.ensure.assert_not_called() install_local.assert_called_once_with(filename=Path("path/to/thing")) +@pytest.mark.usefixtures("block_refresh") @mock.patch("snap._parse_management_arguments") @mock.patch("snap.snap_lib.install_local") @mock.patch("snap.snap_lib.SnapCache") @pytest.mark.parametrize("revision", [None, "123"]) -def test_management_installs_store_from_channel(cache, install_local, args, revision): +def test_management_installs_store_from_channel(cache, install_local, args, revision, harness): """Test installer uses store installer.""" k8s_snap = cache()["k8s"] k8s_snap.revision = revision args.return_value = [snap.SnapStoreArgument(name="k8s", channel="edge")] - snap.management() + snap.management(harness.charm) install_local.assert_not_called() k8s_snap.ensure.assert_called_once_with(state=snap.snap_lib.SnapState.Present, channel="edge") +@pytest.mark.usefixtures("block_refresh") @mock.patch("snap._parse_management_arguments") @mock.patch("snap.snap_lib.install_local") @mock.patch("snap.snap_lib.SnapCache") @pytest.mark.parametrize("revision", [None, "456", "123"]) -def test_management_installs_store_from_revision(cache, install_local, args, revision): +def test_management_installs_store_from_revision(cache, install_local, args, revision, harness): """Test installer uses store installer.""" k8s_snap = cache()["k8s"] k8s_snap.revision = revision args.return_value = [snap.SnapStoreArgument(name="k8s", revision=123)] - snap.management() + snap.management(harness.charm) install_local.assert_not_called() if revision == "123": k8s_snap.ensure.assert_not_called() @@ -140,13 +351,13 @@ def test_management_installs_store_from_revision(cache, install_local, args, rev def test_version(check_output): """Test snap list returns the correct version.""" check_output.return_value = b"" - assert snap.version(snap="k8s") is None + assert snap.version(snap="k8s") == (None, False) check_output.return_value = """ Name Version Rev Tracking Publisher Notes k8s 1.30.0 1234 latest/stable canonical✓ """.encode() - assert snap.version(snap="k8s") == "1.30.0" + assert snap.version(snap="k8s") == ("1.30.0", False) check_output.side_effect = subprocess.CalledProcessError(-1, [], None, None) - assert snap.version(snap="k8s") is None + assert snap.version(snap="k8s") == (None, False) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 42a37073..f4a147a6 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -17,6 +17,7 @@ import pytest import pytest_asyncio import yaml +from juju.application import Application from juju.model import Model from juju.tag import untag from kubernetes import config as k8s_config @@ -27,18 +28,24 @@ from .helpers import get_unit_cidrs, is_deployed log = logging.getLogger(__name__) +TEST_DATA = Path(__file__).parent / "data" +DEFAULT_SNAP_INSTALLATION = TEST_DATA / "default-snap-installation.tar.gz" +DEFAULT_RESOURCES = {"snap-installation": None} def pytest_addoption(parser: pytest.Parser): """Parse additional pytest options. - --charm-file can be called multiple times for each - supplied charm + --charm-file can be used multiple times, specifies which local charm files are available + --upgrade-from instruct tests to start with a specific channel, and upgrade to these charms Args: parser: Pytest parser. """ parser.addoption("--charm-file", dest="charm_files", action="append", default=[]) + parser.addoption( + "--snap-installation-resource", default=str(DEFAULT_SNAP_INSTALLATION.resolve()) + ) parser.addoption("--cos", action="store_true", default=False, help="Run COS integration tests") parser.addoption( "--apply-proxy", action="store_true", default=False, help="Apply proxy to model-config" @@ -49,6 +56,9 @@ def pytest_addoption(parser: pytest.Parser): default=False, help="If cloud is LXD, use containers", ) + parser.addoption( + "--upgrade-from", dest="upgrade_from", default=None, help="Charms channel to upgrade from" + ) def pytest_configure(config): @@ -173,6 +183,28 @@ class Bundle: arch: str _content: Mapping = field(default_factory=dict) + @classmethod + async def create(cls, ops_test: OpsTest, path: Path) -> "Bundle": + """Create a bundle object. + + Args: + ops_test: Instance of the pytest-operator plugin + path: Path to the bundle file + + Returns: + Bundle: Instance of the Bundle + """ + arch = await cloud_arch(ops_test) + _type, _vms = await cloud_type(ops_test) + bundle = cls(ops_test, path, arch) + if _type == "lxd" and not _vms: + log.info("Drop lxd machine constraints") + bundle.drop_constraints() + if _type == "lxd" and _vms: + log.info("Constrain lxd machines with virt-type: virtual-machine") + bundle.add_constraints({"virt-type": "virtual-machine"}) + return bundle + @property def content(self) -> Mapping: """Yaml content of the bundle loaded into a dict""" @@ -198,16 +230,27 @@ def render(self) -> Path: yaml.safe_dump(self.content, target.open("w")) return target - def switch(self, name: str, path: Path): - """Replace charmhub application with a local charm path. + def switch(self, name: str, path: Optional[Path] = None, channel: Optional[str] = None): + """Replace charmhub application with a local charm path or specific channel. Args: - name (str): Which application - path (Path): Path to local charm + name (str): Which application + path (Path): Optional path to local charm + channel (str): Optional channel to use + + Raises: + ValueError: if both path and channel are provided, or neither are provided """ app = self.applications[name] - app["charm"] = str(path.resolve()) - app["channel"] = None + if (not path and not channel) or (path and channel): + raise ValueError("channel and path are mutually exclusive") + if path: + app["charm"] = str(path.resolve()) + app["channel"] = None + app["resources"] = DEFAULT_RESOURCES + if channel: + app["charm"] = name + app["channel"] = channel def drop_constraints(self): """Remove constraints on applications. Useful for testing on lxd.""" @@ -282,7 +325,7 @@ async def cloud_proxied(ops_test: OpsTest): assert ops_test.model, "Model must be present" controller = await ops_test.model.get_controller() controller_model = await controller.get_model("controller") - proxy_config_file = Path(__file__).parent / "data" / "static-proxy-config.yaml" + proxy_config_file = TEST_DATA / "static-proxy-config.yaml" proxy_configs = yaml.safe_load(proxy_config_file.read_text()) local_no_proxy = await get_unit_cidrs(controller_model, "controller", 0) no_proxy = {*proxy_configs["juju-no-proxy"], *local_no_proxy} @@ -352,7 +395,7 @@ async def deploy_model( def bundle_file(request) -> Path: - """Fixture to get bundle file. + """Helper to get bundle file. Args: request: pytest request object @@ -379,30 +422,57 @@ async def kubernetes_cluster(request: pytest.FixtureRequest, ops_test: OpsTest): yield ops_test.model return - log.info("Deploying cluster using %s bundle.", bundle_file) - arch = await cloud_arch(ops_test) + log.info("Deploying cluster using %s bundle.", bundle_path) - charm_path = ("worker/k8s", "worker") - charms = [Charm(ops_test, arch, Path("charms") / p) for p in charm_path] - charm_files = await asyncio.gather( - *[charm.resolve(request.config.option.charm_files) for charm in charms] - ) - bundle = Bundle(ops_test, bundle_path, arch) - _type, _vms = await cloud_type(ops_test) - if _type == "lxd" and not _vms: - log.info("Drop lxd machine constraints") - bundle.drop_constraints() - if _type == "lxd" and _vms: - log.info("Constrain lxd machines with virt-type: virtual-machine") - bundle.add_constraints({"virt-type": "virtual-machine"}) + bundle = await Bundle.create(ops_test, bundle_path) if request.config.option.apply_proxy: await cloud_proxied(ops_test) + + charms = [Charm(ops_test, bundle.arch, Path("charms") / p) for p in ("worker/k8s", "worker")] + charm_files_args = request.config.option.charm_files + DEFAULT_RESOURCES["snap-installation"] = request.config.option.snap_installation_resource + charm_files = await asyncio.gather(*[charm.resolve(charm_files_args) for charm in charms]) + switch_to_path = {} for path, charm in zip(charm_files, charms): - bundle.switch(charm.app_name, path) + if upgrade_channel := request.config.option.upgrade_from: + bundle.switch(charm.app_name, channel=upgrade_channel) + switch_to_path[charm.app_name] = path + else: + bundle.switch(charm.app_name, path=path) + async with deploy_model(request, ops_test, model, bundle) as the_model: + await upgrade_model(the_model, switch_to_path) yield the_model +async def upgrade_model(model: Model, switch_to_path: dict[str, Path]): + """Upgrade the model with the provided charms. + + Args: + model: Juju model + switch_to_path: Mapping of app_name to charm + + """ + if not switch_to_path: + return + + async def _refresh(app_name: str): + """Refresh the application. + + Args: + app_name: Name of the application to refresh + """ + app: Application = model.applications[app_name] + await app.refresh(path=switch_to_path[app_name], resources=DEFAULT_RESOURCES) + + await asyncio.gather(*[_refresh(app) for app in switch_to_path]) + await model.wait_for_idle( + apps=list(switch_to_path.keys()), + status="active", + timeout=30 * 60, + ) + + @pytest_asyncio.fixture(name="_grafana_agent", scope="module") async def grafana_agent(kubernetes_cluster: Model): """Deploy Grafana Agent.""" diff --git a/tests/integration/data/default-snap-installation.tar.gz b/tests/integration/data/default-snap-installation.tar.gz new file mode 100644 index 00000000..e69de29b diff --git a/tests/integration/data/override-latest-edge.tar.gz b/tests/integration/data/override-latest-edge.tar.gz new file mode 100644 index 00000000..05013346 Binary files /dev/null and b/tests/integration/data/override-latest-edge.tar.gz differ diff --git a/tests/integration/data/test_ceph/pv-reader-pod.yaml b/tests/integration/data/test_ceph/pv-reader-pod.yaml index c87c8691..9f75c946 100755 --- a/tests/integration/data/test_ceph/pv-reader-pod.yaml +++ b/tests/integration/data/test_ceph/pv-reader-pod.yaml @@ -14,7 +14,7 @@ spec: claimName: raw-block-pvc containers: - name: pv-reader - image: busybox + image: rocks.canonical.com/cdk/busybox:1.36 command: ["/bin/sh", "-c", "cat /pvc/test_file"] volumeMounts: - name: pvc-test diff --git a/tests/integration/data/test_ceph/pv-writer-pod.yaml b/tests/integration/data/test_ceph/pv-writer-pod.yaml index 129849b5..7b659add 100755 --- a/tests/integration/data/test_ceph/pv-writer-pod.yaml +++ b/tests/integration/data/test_ceph/pv-writer-pod.yaml @@ -14,7 +14,7 @@ spec: claimName: raw-block-pvc containers: - name: pv-writer - image: busybox + image: rocks.canonical.com/cdk/busybox:1.36 command: ["/bin/sh", "-c", "echo 'PVC test data.' > /pvc/test_file"] volumeMounts: - name: pvc-test diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index c1cee35a..dae1b2ba 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -7,13 +7,14 @@ import ipaddress import json import logging +import shlex from pathlib import Path from typing import List import yaml from juju import unit from juju.model import Model -from tenacity import AsyncRetrying, retry, stop_after_attempt, wait_fixed +from tenacity import AsyncRetrying, before_sleep_log, retry, stop_after_attempt, wait_fixed log = logging.getLogger(__name__) @@ -125,10 +126,10 @@ async def ready_nodes(k8s, expected_count): async def wait_pod_phase( k8s: unit.Unit, name: str, - phase: str, + *phase: str, namespace: str = "default", retry_times: int = 30, - retry_delay_s: int = 5, + retry_delay_s: int = 15, ): """Wait for the pod to reach the specified phase (e.g. Succeeded). @@ -142,23 +143,34 @@ async def wait_pod_phase( """ async for attempt in AsyncRetrying( - stop=stop_after_attempt(retry_times), wait=wait_fixed(retry_delay_s) + stop=stop_after_attempt(retry_times), + wait=wait_fixed(retry_delay_s), + before_sleep=before_sleep_log(log, logging.WARNING), ): with attempt: - cmd = " ".join( + cmd = shlex.join( [ - "k8s kubectl wait", - f"--namespace {namespace}", - "--for=jsonpath='{.status.phase}'=" + phase, + "k8s", + "kubectl", + "get", + "--namespace", + namespace, + "-o", + "jsonpath={.status.phase}", f"pod/{name}", - "--timeout 1s", ] ) action = await k8s.run(cmd) result = await action.wait() - assert ( - result.results["return-code"] == 0 - ), f"Failed waiting for pod to reach {phase} phase." + stdout, stderr = ( + result.results.get(field, "").strip() for field in ["stdout", "stderr"] + ) + assert result.results["return-code"] == 0, ( + f"\nPod hasn't reached phase: {phase}\n" + f"\tstdout: '{stdout}'\n" + f"\tstderr: '{stderr}'" + ) + assert stdout in phase, f"Pod {name} not yet in phase {phase} ({stdout})" async def get_pod_logs( diff --git a/tests/integration/test_ceph.py b/tests/integration/test_ceph.py index 0bfc02d8..61bcb39f 100644 --- a/tests/integration/test_ceph.py +++ b/tests/integration/test_ceph.py @@ -37,29 +37,36 @@ async def test_ceph_sc(kubernetes_cluster: model.Model): assert "rbd.csi.ceph.com" in stdout, f"No ceph provisioner found in: {stdout}" # Copy pod definitions. - for fname in ["ceph-xfs-pvc.yaml", "pv-writer-pod.yaml", "pv-reader-pod.yaml"]: + definitions = ["ceph-xfs-pvc.yaml", "pv-writer-pod.yaml", "pv-reader-pod.yaml"] + for fname in definitions: await k8s.scp_to(_get_data_file_path(fname), f"/tmp/{fname}") - # Create "ceph-xfs" PVC. - event = await k8s.run("k8s kubectl apply -f /tmp/ceph-xfs-pvc.yaml") - result = await event.wait() - assert result.results["return-code"] == 0, "Failed to create pvc." - - # Create a pod that writes to the Ceph PV. - event = await k8s.run("k8s kubectl apply -f /tmp/pv-writer-pod.yaml") - result = await event.wait() - assert result.results["return-code"] == 0, "Failed to create writer pod." - - # Wait for the pod to exit successfully. - await helpers.wait_pod_phase(k8s, "pv-writer-test", "Succeeded") - - # Create a pod that reads the PV data and writes it to the log. - event = await k8s.run("k8s kubectl apply -f /tmp/pv-reader-pod.yaml") - result = await event.wait() - assert result.results["return-code"] == 0, "Failed to create reader pod." - - await helpers.wait_pod_phase(k8s, "pv-reader-test", "Succeeded") - - # Check the logged PV data. - logs = await helpers.get_pod_logs(k8s, "pv-reader-test") - assert "PVC test data" in logs + try: + # Create "ceph-xfs" PVC. + event = await k8s.run("k8s kubectl apply -f /tmp/ceph-xfs-pvc.yaml") + result = await event.wait() + assert result.results["return-code"] == 0, "Failed to create pvc." + + # Create a pod that writes to the Ceph PV. + event = await k8s.run("k8s kubectl apply -f /tmp/pv-writer-pod.yaml") + result = await event.wait() + assert result.results["return-code"] == 0, "Failed to create writer pod." + + # Wait for the pod to exit successfully. + await helpers.wait_pod_phase(k8s, "pv-writer-test", "Succeeded") + + # Create a pod that reads the PV data and writes it to the log. + event = await k8s.run("k8s kubectl apply -f /tmp/pv-reader-pod.yaml") + result = await event.wait() + assert result.results["return-code"] == 0, "Failed to create reader pod." + + await helpers.wait_pod_phase(k8s, "pv-reader-test", "Succeeded") + + # Check the logged PV data. + logs = await helpers.get_pod_logs(k8s, "pv-reader-test") + assert "PVC test data" in logs + finally: + # Cleanup + for fname in reversed(definitions): + event = await k8s.run(f"k8s kubectl delete -f /tmp/{fname}") + result = await event.wait() diff --git a/tests/integration/test_k8s.py b/tests/integration/test_k8s.py index c6a6da90..258c039d 100644 --- a/tests/integration/test_k8s.py +++ b/tests/integration/test_k8s.py @@ -7,8 +7,10 @@ import asyncio import logging +from pathlib import Path import pytest +import pytest_asyncio from juju import application, model from tenacity import retry, stop_after_attempt, wait_fixed @@ -19,7 +21,7 @@ log = logging.getLogger(__name__) -async def get_leader(app): +async def get_leader(app) -> int: """Find leader unit of an application. Args: @@ -27,11 +29,15 @@ async def get_leader(app): Returns: int: index to leader unit + + Raises: + ValueError: No leader found """ is_leader = await asyncio.gather(*(u.is_leader_from_status() for u in app.units)) for idx, flag in enumerate(is_leader): if flag: return idx + raise ValueError("No leader found") @pytest.mark.abort_on_fail @@ -134,6 +140,52 @@ async def test_remove_leader_control_plane(kubernetes_cluster: model.Model): await ready_nodes(follower, expected_nodes) +@pytest_asyncio.fixture() +async def override_snap_on_k8s(kubernetes_cluster: model.Model, request): + """ + Override the snap resource on a Kubernetes cluster application and revert it after the test. + + This coroutine function overrides the snap resource of the "k8s" application in the given + Kubernetes cluster with a specified override file, waits for the cluster to become idle, + and then reverts the snap resource back to its original state after the test. + + Args: + kubernetes_cluster (model.Model): The Kubernetes cluster model. + request: The pytest request object containing test configuration options. + + Yields: + The "k8s" application object after the snap resource has been overridden. + + Raises: + AssertionError: If the "k8s" application is not found in the Kubernetes cluster. + """ + k8s = kubernetes_cluster.applications["k8s"] + assert k8s, "k8s application not found" + # Override snap resource + revert = Path(request.config.option.snap_installation_resource) + override = Path(__file__).parent / "data" / "override-latest-edge.tar.gz" + + with override.open("rb") as obj: + k8s.attach_resource("snap-installation", override, obj) + await kubernetes_cluster.wait_for_idle(status="active", timeout=1 * 60) + + yield k8s + + with revert.open("rb") as obj: + k8s.attach_resource("snap-installation", revert, obj) + await kubernetes_cluster.wait_for_idle(status="active", timeout=1 * 60) + + +@pytest.mark.abort_on_fail +async def test_override_snap_resource(override_snap_on_k8s: application.Application): + """Override snap resource.""" + k8s = override_snap_on_k8s + assert k8s, "k8s application not found" + + for unit in k8s.units: + assert "Override" in unit.workload_status_message + + @pytest.mark.cos @retry(reraise=True, stop=stop_after_attempt(12), wait=wait_fixed(60)) async def test_grafana(