From 89d9dce54800800977b39184e8c359b612057ee2 Mon Sep 17 00:00:00 2001 From: Lucian Petrut Date: Fri, 19 Jul 2024 12:49:30 +0000 Subject: [PATCH] Refactor tests and add 1.10.1 support We're adding a rock definition for cert-manager 1.10.1. At the same time, we're refactoring the tests to use the updated test harness. --- acmesolver/1.10.1/rockcraft.yaml | 39 +++ acmesolver/{ => 1.12.2}/rockcraft.yaml | 0 cainjector/1.10.1/rockcraft.yaml | 39 +++ cainjector/{ => 1.12.2}/rockcraft.yaml | 2 +- controller/1.10.1/rockcraft.yaml | 39 +++ controller/{ => 1.12.2}/rockcraft.yaml | 0 tests/.copyright.tmpl | 2 + tests/integration/check_certmanager.py | 74 ++++++ tests/integration/config.py | 9 + tests/integration/conftest.py | 134 +--------- tests/integration/test_certmanager.py | 99 ------- tests/integration/test_certmanager_1_10_1.py | 16 ++ tests/integration/test_certmanager_1_12_2.py | 16 ++ tests/integration/test_util/config.py | 63 ----- .../integration/test_util/harness/__init__.py | 18 -- tests/integration/test_util/harness/base.py | 106 -------- tests/integration/test_util/harness/juju.py | 203 -------------- tests/integration/test_util/harness/local.py | 77 ------ tests/integration/test_util/harness/lxd.py | 179 ------------- .../test_util/harness/multipass.py | 134 ---------- tests/integration/test_util/util.py | 247 ------------------ tests/lxd-profile.yaml | 105 -------- tests/requirements-test.txt | 1 + tests/sanity/test_acmesolver.py | 37 ++- tests/sanity/test_cainjector.py | 34 ++- tests/sanity/test_controller.py | 29 +- tests/sanity/test_webhook.py | 32 ++- tests/templates/bootstrap-session.yaml | 7 - tests/tox.ini | 33 ++- webhook/1.10.1/rockcraft.yaml | 39 +++ webhook/{ => 1.12.2}/rockcraft.yaml | 0 31 files changed, 380 insertions(+), 1433 deletions(-) create mode 100644 acmesolver/1.10.1/rockcraft.yaml rename acmesolver/{ => 1.12.2}/rockcraft.yaml (100%) create mode 100644 cainjector/1.10.1/rockcraft.yaml rename cainjector/{ => 1.12.2}/rockcraft.yaml (98%) create mode 100644 controller/1.10.1/rockcraft.yaml rename controller/{ => 1.12.2}/rockcraft.yaml (100%) create mode 100644 tests/.copyright.tmpl create mode 100644 tests/integration/check_certmanager.py create mode 100644 tests/integration/config.py delete mode 100644 tests/integration/test_certmanager.py create mode 100644 tests/integration/test_certmanager_1_10_1.py create mode 100644 tests/integration/test_certmanager_1_12_2.py delete mode 100644 tests/integration/test_util/config.py delete mode 100644 tests/integration/test_util/harness/__init__.py delete mode 100644 tests/integration/test_util/harness/base.py delete mode 100644 tests/integration/test_util/harness/juju.py delete mode 100644 tests/integration/test_util/harness/local.py delete mode 100644 tests/integration/test_util/harness/lxd.py delete mode 100644 tests/integration/test_util/harness/multipass.py delete mode 100644 tests/integration/test_util/util.py delete mode 100644 tests/lxd-profile.yaml delete mode 100644 tests/templates/bootstrap-session.yaml create mode 100644 webhook/1.10.1/rockcraft.yaml rename webhook/{ => 1.12.2}/rockcraft.yaml (100%) diff --git a/acmesolver/1.10.1/rockcraft.yaml b/acmesolver/1.10.1/rockcraft.yaml new file mode 100644 index 0000000..7f6d80f --- /dev/null +++ b/acmesolver/1.10.1/rockcraft.yaml @@ -0,0 +1,39 @@ +name: cert-manager-acmesolver +summary: ROCK for the cert-manager-acmesolver Project. +description: | + This ROCK is a drop-in replacement for the autoscaling/cert-manager-acmesolver image. +version: "1.10.1" +license: Apache-2.0 + +base: bare +build-base: ubuntu@22.04 +platforms: + amd64: + arm64: + +run-user: _daemon_ +entrypoint-service: cert-manager-acmesolver +services: + cert-manager-acmesolver: + override: replace + summary: "cert-manager-acmesolver service" + startup: enabled + command: "/acmesolver-linux [ -h ]" + on-failure: shutdown + +parts: + cert-manager-acmesolver: + plugin: nil + source: https://github.com/cert-manager/cert-manager.git + source-type: git + source-tag: v1.10.1 + source-depth: 1 + build-snaps: + - jq + - go/1.22/stable + override-build: | + # CTR=echo is hacky way of passing docker check not required for build + make CTR=echo _bin/server/acmesolver-linux-${CRAFT_PLATFORM} + cp _bin/server/acmesolver-linux-${CRAFT_PLATFORM} ${CRAFT_PART_INSTALL}/acmesolver-linux + prime: + - acmesolver-linux diff --git a/acmesolver/rockcraft.yaml b/acmesolver/1.12.2/rockcraft.yaml similarity index 100% rename from acmesolver/rockcraft.yaml rename to acmesolver/1.12.2/rockcraft.yaml diff --git a/cainjector/1.10.1/rockcraft.yaml b/cainjector/1.10.1/rockcraft.yaml new file mode 100644 index 0000000..788b537 --- /dev/null +++ b/cainjector/1.10.1/rockcraft.yaml @@ -0,0 +1,39 @@ +name: cert-manager-cainjector +summary: ROCK for the cert-manager-cainjector Project. +description: | + This ROCK is a drop-in replacement for the autoscaling/cert-manager-cainjector image. +version: 1.10.1 +license: Apache-2.0 + +base: bare +build-base: ubuntu@22.04 +platforms: + amd64: + arm64: + +run-user: _daemon_ +entrypoint-service: cert-manager-cainjector +services: + cert-manager-cainjector: + override: replace + summary: "cert-manager-cainjector service" + startup: enabled + command: "/cainjector-linux [ -h ]" + on-failure: shutdown + +parts: + cert-manager-cainjector: + plugin: nil + source: https://github.com/cert-manager/cert-manager.git + source-type: git + source-tag: v1.10.1 + source-depth: 1 + build-snaps: + - jq + - go/1.22/stable + override-build: | + # CTR=echo is hacky way of passing docker check not required for build + make CTR=echo _bin/server/cainjector-linux-${CRAFT_PLATFORM} + cp _bin/server/cainjector-linux-${CRAFT_PLATFORM} ${CRAFT_PART_INSTALL}/cainjector-linux + prime: + - cainjector-linux diff --git a/cainjector/rockcraft.yaml b/cainjector/1.12.2/rockcraft.yaml similarity index 98% rename from cainjector/rockcraft.yaml rename to cainjector/1.12.2/rockcraft.yaml index 6415f9f..967c95d 100644 --- a/cainjector/rockcraft.yaml +++ b/cainjector/1.12.2/rockcraft.yaml @@ -1,6 +1,6 @@ name: cert-manager-cainjector summary: ROCK for the cert-manager-cainjector Project. -description: | +description: | This ROCK is a drop-in replacement for the autoscaling/cert-manager-cainjector image. version: "1.12.2" license: Apache-2.0 diff --git a/controller/1.10.1/rockcraft.yaml b/controller/1.10.1/rockcraft.yaml new file mode 100644 index 0000000..072b7ae --- /dev/null +++ b/controller/1.10.1/rockcraft.yaml @@ -0,0 +1,39 @@ +name: cert-manager-controller +summary: ROCK for the cert-manager-controller Project. +description: | + This ROCK is a drop-in replacement for the autoscaling/cert-manager-controller image. +version: 1.10.1 +license: Apache-2.0 + +base: bare +build-base: ubuntu@22.04 +platforms: + amd64: + arm64: + +run-user: _daemon_ +entrypoint-service: cert-manager-controller +services: + cert-manager-controller: + override: replace + summary: "cert-manager-controller service" + startup: enabled + command: "/controller-linux [ -h ]" + on-failure: shutdown + +parts: + cert-manager-controller: + plugin: nil + source: https://github.com/cert-manager/cert-manager.git + source-type: git + source-tag: v1.10.1 + source-depth: 1 + build-snaps: + - jq + - go/1.22/stable + override-build: | + # CTR=echo is hacky way of passing docker check not required for build + make CTR=echo _bin/server/controller-linux-${CRAFT_PLATFORM} + cp _bin/server/controller-linux-${CRAFT_PLATFORM} ${CRAFT_PART_INSTALL}/controller-linux + prime: + - controller-linux diff --git a/controller/rockcraft.yaml b/controller/1.12.2/rockcraft.yaml similarity index 100% rename from controller/rockcraft.yaml rename to controller/1.12.2/rockcraft.yaml diff --git a/tests/.copyright.tmpl b/tests/.copyright.tmpl new file mode 100644 index 0000000..d04ad9b --- /dev/null +++ b/tests/.copyright.tmpl @@ -0,0 +1,2 @@ +Copyright ${years} ${owner}. +See LICENSE file for licensing details diff --git a/tests/integration/check_certmanager.py b/tests/integration/check_certmanager.py new file mode 100644 index 0000000..f23f0d9 --- /dev/null +++ b/tests/integration/check_certmanager.py @@ -0,0 +1,74 @@ +# +# Copyright 2024 Canonical, Ltd. +# See LICENSE file for licensing details +# + +from pathlib import Path + +from k8s_test_harness import harness +from k8s_test_harness.util import constants, env_util, k8s_util +from k8s_test_harness.util.k8s_util import HelmImage + +from config import MANIFESTS_DIR + +IMG_PLATFORM = "amd64" +IMG_VERSION = "1.10.1" +CHART_VERSION = IMG_VERSION +INSTALL_NAME = "cert-manager" + + +def _get_rock_image(name: str, verson: str): + rock = env_util.get_build_meta_info_for_rock_version( + "cert-manager-%s", IMG_VERSION, IMG_PLATFORM + ) + return rock.image + +def check_certmanager(module_instance: harness.Instance, + img_version: str, chart_version: str): + images = [ + HelmImage(uri=_get_rock_image("controller", img_version)), + HelmImage(uri=_get_rock_image("webhook", img_version), + prefix="webhook"), + HelmImage(uri=_get_rock_image("cainjector", img_version), + prefix="cainjector"), + HelmImage(uri=_get_rock_image("acmesolver", img_version), + prefix="acmesolver"), + ] + + helm_command = k8s_util.get_helm_install_command( + name=INSTALL_NAME, + chart_name="cert-manager", + images=[k8s_util.HelmImage(uri=rock.image)], + namespace=constants.K8S_NS_KUBE_SYSTEM, + chart_version=chart_version, + repository="https://charts.jetstack.io", + ) + helm_command += ['--set', 'installCRDs=true'] + module_instance.exec(helm_command) + + manifest = MANIFESTS_DIR / "cert-manager-test.yaml" + module_instance.exec( + ["k8s", "kubectl", "apply", "-f", "-"], + input=Path(manifest).read_bytes(), + ) + + k8s_utils.wait_for_resource( + module_instance, + resource_type="certificate", + name="selfsigned-cert", + namespace="cert-manager-test") + + exec_util.stubbornly(retries=5, delay_s=10).on(module_instance).until( + lambda p: "selfsigned-cert-tls" in p.stdout.decode() + ).exec( + [ + "k8s", + "kubectl", + "get", + "secret", + "--namespace", + "cert-manager-test", + "-o", + "json", + ] + ) diff --git a/tests/integration/config.py b/tests/integration/config.py new file mode 100644 index 0000000..dd26eb0 --- /dev/null +++ b/tests/integration/config.py @@ -0,0 +1,9 @@ +# +# Copyright 2024 Canonical, Ltd. +# +import os +from pathlib import Path + +DIR = Path(__file__).absolute().parent + +MANIFESTS_DIR = DIR / ".." / "templates" diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 05f2707..1ca658c 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -1,135 +1,5 @@ # # Copyright 2024 Canonical, Ltd. +# See LICENSE file for licensing details # -import logging -from pathlib import Path -from typing import Generator, List - -import pytest -from test_util import config, harness, util - -LOG = logging.getLogger(__name__) - - -def _harness_clean(h: harness.Harness): - "Clean up created instances within the test harness." - - if config.SKIP_CLEANUP: - LOG.warning( - "Skipping harness cleanup. " - "It is your job now to clean up cloud resources" - ) - else: - LOG.debug("Cleanup") - h.cleanup() - - -@pytest.fixture(scope="session") -def h() -> harness.Harness: - LOG.debug("Create harness for %s", config.SUBSTRATE) - if config.SUBSTRATE == "local": - h = harness.LocalHarness() - elif config.SUBSTRATE == "lxd": - h = harness.LXDHarness() - elif config.SUBSTRATE == "multipass": - h = harness.MultipassHarness() - elif config.SUBSTRATE == "juju": - h = harness.JujuHarness() - else: - raise harness.HarnessError( - "TEST_SUBSTRATE must be one of: local, lxd, multipass, juju" - ) - - yield h - - _harness_clean(h) - - -def pytest_configure(config): - config.addinivalue_line( - "markers", - "node_count: Mark a test to specify how many instance nodes need to be created\n" - "disable_k8s_bootstrapping: By default, the first k8s node is bootstrapped. This marker disables that.", - ) - - -@pytest.fixture(scope="function") -def node_count(request) -> int: - node_count_marker = request.node.get_closest_marker("node_count") - if not node_count_marker: - return 1 - node_count_arg, *_ = node_count_marker.args - return int(node_count_arg) - - -@pytest.fixture(scope="function") -def disable_k8s_bootstrapping(request) -> int: - return bool(request.node.get_closest_marker("disable_k8s_bootstrapping")) - - -@pytest.fixture(scope="function") -def instances( - h: harness.Harness, node_count: int, tmp_path: Path, disable_k8s_bootstrapping: bool -) -> Generator[List[harness.Instance], None, None]: - """Construct instances for a cluster. - - Bootstrap and setup networking on the first instance, if `disable_k8s_bootstrapping` marker is not set. - """ - if not config.SNAP_CHANNEL: - pytest.fail("Set TEST_SNAP_CHANNEL to the channel of the k8s snap to install.") - - if node_count <= 0: - pytest.xfail("Test requested 0 or fewer instances, skip this test.") - - LOG.info(f"Creating {node_count} instances") - instances: List[harness.Instance] = [] - - for _ in range(node_count): - # Create instances and setup the k8s snap in each. - instance = h.new_instance() - instances.append(instance) - util.setup_k8s_snap(instance) - - if not disable_k8s_bootstrapping: - first_node, *_ = instances - first_node.exec(["k8s", "bootstrap"]) - - yield instances - - if config.SKIP_CLEANUP: - LOG.warning("Skipping clean-up of instances, delete them on your own") - return - - # Cleanup after each test. - # We cannot execute _harness_clean() here as this would also - # remove the session_instance. The harness ensures that everything is cleaned up - # at the end of the test session. - for instance in instances: - h.delete_instance(instance.id) - - -@pytest.fixture(scope="session") -def session_instance( - h: harness.Harness, tmp_path_factory: pytest.TempPathFactory -) -> Generator[harness.Instance, None, None]: - """Constructs and bootstraps an instance that persists over a test session. - - Bootstraps the instance with all k8sd features enabled to reduce testing time. - """ - LOG.info("Setup node and enable all features") - - instance = h.new_instance() - util.setup_k8s_snap(instance) - - bootstrap_config_path = "/home/ubuntu/bootstrap-session.yaml" - instance.send_file( - (config.MANIFESTS_DIR / "bootstrap-session.yaml").as_posix(), - bootstrap_config_path, - ) - - instance.exec(["k8s", "bootstrap", "--file", bootstrap_config_path]) - util.wait_until_k8s_ready(instance, [instance]) - util.wait_for_network(instance) - util.wait_for_dns(instance) - - yield instance +pytest_plugins = ["k8s_test_harness.plugin"] diff --git a/tests/integration/test_certmanager.py b/tests/integration/test_certmanager.py deleted file mode 100644 index f59183b..0000000 --- a/tests/integration/test_certmanager.py +++ /dev/null @@ -1,99 +0,0 @@ -# -# Copyright 2024 Canonical, Ltd. -# -import logging -from pathlib import Path -import os - -from test_util import harness, util -from test_util.config import MANIFESTS_DIR - -LOG = logging.getLogger(__name__) - - -def test_integration_certmanager(session_instance: harness.Instance): - images = [ - {"variable": "ROCK_CERT_MANAGER_CONTROLLER", "prefix": None}, - {"variable": "ROCK_CERT_MANAGER_WEBHOOK", "prefix": "webhook"}, - {"variable": "ROCK_CERT_MANAGER_CAINJECTOR", "prefix": "cainjector"}, - {"variable": "ROCK_CERT_MANAGER_ACMESOLVER", "prefix": "acmesolver"}, - ] - - helm_command = [ - "k8s", - "helm", - "install", - "cert-manager", - "--repo", - "https://charts.jetstack.io", - "cert-manager", - "--namespace", - "cert-manager", - "--create-namespace", - "--version", - "v1.12.2", - "--set", - "installCRDs=true", - ] - - for image in images: - image_uri = os.getenv(image["variable"]) - assert image_uri is not None, f"{image['variable']} is not set" - image_split = image_uri.split(":") - - if image["prefix"]: - helm_command += [ - "--set", - f"{image['prefix']}.image.repository={image_split[0]}", - "--set", - f"{image['prefix']}.image.tag={image_split[1]}", - "--set", - f"{image['prefix']}.securityContext.runAsUser=584792", - ] - else: - helm_command += [ - "--set", - f"image.repository={image_split[0]}", - "--set", - f"image.tag={image_split[1]}", - "--set", - "securityContext.runAsUser=584792", - ] - - session_instance.exec(helm_command) - - manifest = MANIFESTS_DIR / "cert-manager-test.yaml" - session_instance.exec( - ["k8s", "kubectl", "apply", "-f", "-"], - input=Path(manifest).read_bytes(), - ) - - util.stubbornly(retries=3, delay_s=1).on(session_instance).exec( - [ - "k8s", - "kubectl", - "wait", - "--for=condition=ready", - "certificate", - "selfsigned-cert", - "--namespace", - "cert-manager-test", - "--timeout", - "180s", - ] - ) - - util.stubbornly(retries=5, delay_s=10).on(session_instance).until( - lambda p: "selfsigned-cert-tls" in p.stdout.decode() - ).exec( - [ - "k8s", - "kubectl", - "get", - "secret", - "--namespace", - "cert-manager-test", - "-o", - "json", - ] - ) diff --git a/tests/integration/test_certmanager_1_10_1.py b/tests/integration/test_certmanager_1_10_1.py new file mode 100644 index 0000000..c73e5a4 --- /dev/null +++ b/tests/integration/test_certmanager_1_10_1.py @@ -0,0 +1,16 @@ +# +# Copyright 2024 Canonical, Ltd. +# See LICENSE file for licensing details +# + +import check_certmanager + +IMG_VERSION = "1.10.1" +CHART_VERSION = IMG_VERSION + + +# Our k8s harness is a module scoped fixture, so each rock version +# needs to be tested in a separate module in order to have a +# clean, isolated k8s environment. +def test_certmanager_integration(): + check_certmanager.check_certmanager(IMG_VERSION, CHART_VERSION) diff --git a/tests/integration/test_certmanager_1_12_2.py b/tests/integration/test_certmanager_1_12_2.py new file mode 100644 index 0000000..8086592 --- /dev/null +++ b/tests/integration/test_certmanager_1_12_2.py @@ -0,0 +1,16 @@ +# +# Copyright 2024 Canonical, Ltd. +# See LICENSE file for licensing details +# + +import check_certmanager + +IMG_VERSION = "1.12.2" +CHART_VERSION = IMG_VERSION + + +# Our k8s harness is a module scoped fixture, so each rock version +# needs to be tested in a separate module in order to have a +# clean, isolated k8s environment. +def test_certmanager_integration(): + check_certmanager.check_certmanager(IMG_VERSION, CHART_VERSION) diff --git a/tests/integration/test_util/config.py b/tests/integration/test_util/config.py deleted file mode 100644 index 2fcc7f6..0000000 --- a/tests/integration/test_util/config.py +++ /dev/null @@ -1,63 +0,0 @@ -# -# Copyright 2024 Canonical, Ltd. -# -import os -from pathlib import Path - -DIR = Path(__file__).absolute().parent - -MANIFESTS_DIR = DIR / ".." / ".." / "templates" - -# SNAP is the absolute path to the snap against which we run the integration tests. -SNAP_CHANNEL = os.getenv("TEST_SNAP_CHANNEL") - -# SUBSTRATE is the substrate to use for running the integration tests. -# One of 'local' (default), 'lxd', 'juju', or 'multipass'. -SUBSTRATE = os.getenv("TEST_SUBSTRATE") or "local" - -# SKIP_CLEANUP can be used to prevent machines to be automatically destroyed -# after the tests complete. -SKIP_CLEANUP = (os.getenv("TEST_SKIP_CLEANUP") or "") == "1" - -# LXD_PROFILE_NAME is the profile name to use for LXD containers. -LXD_PROFILE_NAME = os.getenv("TEST_LXD_PROFILE_NAME") or "k8s-integration" - -# LXD_PROFILE is the profile to use for LXD containers. -LXD_PROFILE = ( - os.getenv("TEST_LXD_PROFILE") - or (DIR / ".." / ".." / "lxd-profile.yaml").read_text() -) - -# LXD_IMAGE is the image to use for LXD containers. -LXD_IMAGE = os.getenv("TEST_LXD_IMAGE") or "ubuntu:22.04" - -# LXD_SIDELOAD_IMAGES_DIR is an optional directory with OCI images from the host -# that will be mounted at /var/snap/k8s/common/images on the LXD containers. -LXD_SIDELOAD_IMAGES_DIR = os.getenv("TEST_LXD_SIDELOAD_IMAGES_DIR") or "" - -# MULTIPASS_IMAGE is the image to use for Multipass VMs. -MULTIPASS_IMAGE = os.getenv("TEST_MULTIPASS_IMAGE") or "22.04" - -# MULTIPASS_CPUS is the number of cpus for Multipass VMs. -MULTIPASS_CPUS = os.getenv("TEST_MULTIPASS_CPUS") or "2" - -# MULTIPASS_MEMORY is the memory for Multipass VMs. -MULTIPASS_MEMORY = os.getenv("TEST_MULTIPASS_MEMORY") or "2G" - -# MULTIPASS_DISK is the disk size for Multipass VMs. -MULTIPASS_DISK = os.getenv("TEST_MULTIPASS_DISK") or "10G" - -# JUJU_MODEL is the Juju model to use. -JUJU_MODEL = os.getenv("TEST_JUJU_MODEL") - -# JUJU_CONTROLLER is the Juju controller to use. -JUJU_CONTROLLER = os.getenv("TEST_JUJU_CONTROLLER") - -# JUJU_CONSTRAINTS is the constraints to use when creating Juju machines. -JUJU_CONSTRAINTS = os.getenv("TEST_JUJU_CONSTRAINTS", "mem=4G cores=2 root-disk=20G") - -# JUJU_BASE is the base OS to use when creating Juju machines. -JUJU_BASE = os.getenv("TEST_JUJU_BASE") or "ubuntu@22.04" - -# JUJU_MACHINES is a list of existing Juju machines to use. -JUJU_MACHINES = os.getenv("TEST_JUJU_MACHINES") or "" diff --git a/tests/integration/test_util/harness/__init__.py b/tests/integration/test_util/harness/__init__.py deleted file mode 100644 index 1aa0c6f..0000000 --- a/tests/integration/test_util/harness/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# -# Copyright 2024 Canonical, Ltd. -# -from test_util.harness.base import Harness, HarnessError, Instance -from test_util.harness.juju import JujuHarness -from test_util.harness.local import LocalHarness -from test_util.harness.lxd import LXDHarness -from test_util.harness.multipass import MultipassHarness - -__all__ = [ - HarnessError, - Harness, - Instance, - JujuHarness, - LocalHarness, - LXDHarness, - MultipassHarness, -] diff --git a/tests/integration/test_util/harness/base.py b/tests/integration/test_util/harness/base.py deleted file mode 100644 index 81a969a..0000000 --- a/tests/integration/test_util/harness/base.py +++ /dev/null @@ -1,106 +0,0 @@ -# -# Copyright 2024 Canonical, Ltd. -# -import subprocess -from functools import partial - - -class HarnessError(Exception): - """Base error for all our harness failures""" - - pass - - -class Instance: - """Reference to a harness and a given instance id. - - Provides convenience methods for an instance to call its harness' methods - """ - - def __init__(self, h: "Harness", id: str) -> None: - self._h = h - self._id = id - - self.send_file = partial(h.send_file, id) - self.pull_file = partial(h.pull_file, id) - self.exec = partial(h.exec, id) - self.delete_instance = partial(h.delete_instance, id) - - @property - def id(self) -> str: - return self._id - - def __str__(self) -> str: - return f"{self._h.name}:{self.id}" - - -class Harness: - """Abstract how integration tests can start and manage multiple machines. This allows - writing integration tests that can run on the local machine, LXD, or Multipass with minimum - effort. - """ - - name: str - - def new_instance(self) -> Instance: - """Creates a new instance on the infrastructure and returns an object - which can be used to interact with it. - - If the operation fails, a HarnessError is raised. - """ - raise NotImplementedError - - def send_file(self, instance_id: str, source: str, destination: str): - """Send a local file to the instance. - - :param instance_id: The instance_id, as returned by new_instance() - :param source: Path to the file that will be copied to the instance - :param destination: Path in the instance where the file will be copied. - This must always be an absolute path. - - - If the operation fails, a HarnessError is raised. - """ - raise NotImplementedError - - def pull_file(self, instance_id: str, source: str, destination: str): - """Pull a file from the instance and save it on the local machine - - :param instance_id: The instance_id, as returned by new_instance() - :param source: Path to the file that will be copied from the instance. - This must always be an absolute path. - :param destination: Path on the local machine the file will be saved. - - If the operation fails, a HarnessError is raised. - """ - raise NotImplementedError - - def exec( - self, instance_id: str, command: list, **kwargs - ) -> subprocess.CompletedProcess: - """Run a command as root on the instance. - - :param instance_id: The instance_id, as returned by new_instance() - :param command: Command for subprocess.run() - :param kwargs: Keyword args compatible with subprocess.run() - - If the operation fails, a subprocesss.CalledProcessError is raised. - """ - raise NotImplementedError - - def delete_instance(self, instance_id: str): - """Delete a previously created instance. - - :param instance_id: The instance_id, as returned by new_instance() - - If the operation fails, a HarnessError is raised. - """ - raise NotImplementedError - - def cleanup(self): - """Delete any leftover resources after the tests are done, e.g. delete any - instances that might still be running. - - If the operation fails, a HarnessError is raised. - """ - raise NotImplementedError diff --git a/tests/integration/test_util/harness/juju.py b/tests/integration/test_util/harness/juju.py deleted file mode 100644 index 4d3a02b..0000000 --- a/tests/integration/test_util/harness/juju.py +++ /dev/null @@ -1,203 +0,0 @@ -# -# Copyright 2024 Canonical, Ltd. -# -import json -import logging -import shlex -import subprocess -from pathlib import Path - -from test_util import config -from test_util.harness import Harness, HarnessError, Instance -from test_util.util import run - -LOG = logging.getLogger(__name__) - - -class JujuHarness(Harness): - """A Harness that creates an Juju machine for each instance.""" - - name = "juju" - - def __init__(self): - super(JujuHarness, self).__init__() - - self.model = config.JUJU_MODEL - if not self.model: - raise HarnessError("Set JUJU_MODEL to the Juju model to use") - - if config.JUJU_CONTROLLER: - self.model = f"{config.JUJU_CONTROLLER}:{self.model}" - - self.constraints = config.JUJU_CONSTRAINTS - self.base = config.JUJU_BASE - self.existing_machines = {} - self.instances = set() - - if config.JUJU_MACHINES: - self.existing_machines = { - instance_id.strip(): False - for instance_id in config.JUJU_MACHINES.split() - } - LOG.debug( - "Configured Juju substrate (model %s, machines %s)", - self.model, - config.JUJU_MACHINES, - ) - - else: - LOG.debug( - "Configured Juju substrate (model %s, base %s, constraints %s)", - self.model, - self.base, - self.constraints, - ) - - def new_instance(self) -> Instance: - for instance_id in self.existing_machines: - if not self.existing_machines[instance_id]: - LOG.debug("Reusing existing machine %s", instance_id) - self.existing_machines[instance_id] = True - self.instances.add(instance_id) - return Instance(self, instance_id) - - LOG.debug("Creating instance with constraints %s", self.constraints) - try: - p = run( - [ - "juju", - "add-machine", - "-m", - self.model, - "--constraints", - self.constraints, - "--base", - self.base, - ], - capture_output=True, - ) - - output = p.stderr.decode().strip() - if not output.startswith("created machine "): - raise HarnessError(f"failed to parse output from juju add-machine {p=}") - - instance_id = output.split(" ")[2] - except subprocess.CalledProcessError as e: - raise HarnessError("Failed to create Juju machine") from e - - self.instances.add(instance_id) - - self.exec(instance_id, ["snap", "wait", "system", "seed.loaded"]) - return Instance(self, instance_id) - - def send_file(self, instance_id: str, source: str, destination: str): - if instance_id not in self.instances: - raise HarnessError(f"unknown instance {instance_id}") - - if not Path(destination).is_absolute(): - raise HarnessError(f"path {destination} must be absolute") - - LOG.debug( - "Copying file %s to instance %s at %s", source, instance_id, destination - ) - try: - self.exec( - instance_id, - ["mkdir", "-m=0777", "-p", Path(destination).parent.as_posix()], - ) - run(["juju", "scp", source, f"{instance_id}:{destination}"]) - except subprocess.CalledProcessError as e: - raise HarnessError("juju scp command failed") from e - - def pull_file(self, instance_id: str, source: str, destination: str): - if instance_id not in self.instances: - raise HarnessError(f"unknown instance {instance_id}") - - if not Path(source).is_absolute(): - raise HarnessError(f"path {source} must be absolute") - - LOG.debug( - "Copying file %s from instance %s to %s", source, instance_id, destination - ) - try: - run(["juju", "scp", f"{instance_id}:{source}", destination]) - except subprocess.CalledProcessError as e: - raise HarnessError("juju scp command failed") from e - - def exec(self, instance_id: str, command: list, **kwargs): - if instance_id not in self.instances: - raise HarnessError(f"unknown instance {instance_id}") - - LOG.debug("Execute command %s in instance %s", command, instance_id) - capture_output = kwargs.pop("capture_output", False) - check = kwargs.pop("check", True) - stdout = kwargs.pop("stdout", None) - stderr = kwargs.pop("stderr", None) - input = f" < Instance: - if self.initialized: - raise HarnessError("local substrate only supports up to one instance") - - self.initialized = True - LOG.debug("Initializing instance") - try: - self.exec(self.hostname, ["snap", "wait", "system", "seed.loaded"]) - except subprocess.CalledProcessError as e: - raise HarnessError("failed to wait for snapd seed") from e - - return Instance(self, self.hostname) - - def send_file(self, _: str, source: str, destination: str): - if not self.initialized: - raise HarnessError("no instance initialized") - - if not Path(destination).is_absolute(): - raise HarnessError(f"path {destination} must be absolute") - - LOG.debug("Copying file %s to %s", source, destination) - try: - self.exec( - _, ["mkdir", "-m=0777", "-p", Path(destination).parent.as_posix()] - ) - shutil.copy(source, destination) - except subprocess.CalledProcessError as e: - raise HarnessError("failed to copy file") from e - except shutil.SameFileError: - pass - - def pull_file(self, _: str, source: str, destination: str): - return self.send_file(_, destination, source) - - def exec(self, _: str, command: list, **kwargs): - if not self.initialized: - raise HarnessError("no instance initialized") - - LOG.debug("Executing command %s on %s", command, self.hostname) - return run(["sudo", "-E", "bash", "-c", shlex.join(command)], **kwargs) - - def delete_instance(self, _: str): - LOG.debug("Stopping instance") - self.initialized = False - - def cleanup(self): - LOG.debug("Stopping instance") - self.initialized = False diff --git a/tests/integration/test_util/harness/lxd.py b/tests/integration/test_util/harness/lxd.py deleted file mode 100644 index a5aaebd..0000000 --- a/tests/integration/test_util/harness/lxd.py +++ /dev/null @@ -1,179 +0,0 @@ -# -# Copyright 2024 Canonical, Ltd. -# -import logging -import os -import shlex -import subprocess -from pathlib import Path - -from test_util import config -from test_util.harness import Harness, HarnessError, Instance -from test_util.util import run, stubbornly - -LOG = logging.getLogger(__name__) - - -class LXDHarness(Harness): - """A Harness that creates an LXD container for each instance.""" - - name = "lxd" - - def next_id(self) -> int: - self._next_id += 1 - return self._next_id - - def __init__(self): - super(LXDHarness, self).__init__() - - self._next_id = 0 - - self.profile = config.LXD_PROFILE_NAME - self.sideload_images_dir = config.LXD_SIDELOAD_IMAGES_DIR - self.image = config.LXD_IMAGE - self.instances = set() - - LOG.debug("Checking for LXD profile %s", self.profile) - try: - run(["lxc", "profile", "show", self.profile]) - except subprocess.CalledProcessError: - try: - LOG.debug("Creating LXD profile %s", self.profile) - run(["lxc", "profile", "create", self.profile]) - - except subprocess.CalledProcessError as e: - raise HarnessError( - f"Failed to create LXD profile {self.profile}" - ) from e - - try: - LOG.debug("Configuring LXD profile %s", self.profile) - run( - ["lxc", "profile", "edit", self.profile], - input=config.LXD_PROFILE.encode(), - ) - except subprocess.CalledProcessError as e: - raise HarnessError(f"Failed to configure LXD profile {self.profile}") from e - - LOG.debug( - "Configured LXD substrate (profile %s, image %s)", self.profile, self.image - ) - - def new_instance(self) -> Instance: - instance_id = f"k8s-integration-{os.urandom(3).hex()}-{self.next_id()}" - - LOG.debug("Creating instance %s with image %s", instance_id, self.image) - try: - stubbornly(retries=3, delay_s=1).exec( - [ - "lxc", - "launch", - self.image, - instance_id, - "-p", - "default", - "-p", - self.profile, - ] - ) - self.instances.add(instance_id) - - if self.sideload_images_dir: - stubbornly(retries=3, delay_s=1).exec( - [ - "lxc", - "config", - "device", - "add", - instance_id, - "k8s-e2e-images", - "disk", - f"source={self.sideload_images_dir}", - "path=/mnt/images", - "readonly=true", - ] - ) - - self.exec( - instance_id, - ["mkdir", "-p", "/var/snap/k8s/common"], - ) - self.exec( - instance_id, - ["cp", "-rv", "/mnt/images", "/var/snap/k8s/common/images"], - ) - except subprocess.CalledProcessError as e: - raise HarnessError(f"Failed to create LXD container {instance_id}") from e - - self.exec(instance_id, ["snap", "wait", "system", "seed.loaded"]) - return Instance(self, instance_id) - - def send_file(self, instance_id: str, source: str, destination: str): - if instance_id not in self.instances: - raise HarnessError(f"unknown instance {instance_id}") - - if not Path(destination).is_absolute(): - raise HarnessError(f"path {destination} must be absolute") - - LOG.debug( - "Copying file %s to instance %s at %s", source, instance_id, destination - ) - try: - self.exec( - instance_id, - ["mkdir", "-m=0777", "-p", Path(destination).parent.as_posix()], - capture_output=True, - ) - run( - ["lxc", "file", "push", source, f"{instance_id}{destination}"], - capture_output=True, - ) - except subprocess.CalledProcessError as e: - LOG.error("command {e.cmd} failed") - LOG.error(f" {e.returncode=}") - LOG.error(f" {e.stdout.decode()=}") - LOG.error(f" {e.stderr.decode()=}") - raise HarnessError("failed to push file") from e - - def pull_file(self, instance_id: str, source: str, destination: str): - if instance_id not in self.instances: - raise HarnessError(f"unknown instance {instance_id}") - - if not Path(source).is_absolute(): - raise HarnessError(f"path {source} must be absolute") - - LOG.debug( - "Copying file %s from instance %s to %s", source, instance_id, destination - ) - try: - run( - ["lxc", "file", "pull", f"{instance_id}{source}", destination], - stdout=subprocess.DEVNULL, - ) - except subprocess.CalledProcessError as e: - raise HarnessError("lxc file push command failed") from e - - def exec(self, instance_id: str, command: list, **kwargs): - if instance_id not in self.instances: - raise HarnessError(f"unknown instance {instance_id}") - - LOG.debug("Execute command %s in instance %s", command, instance_id) - return run( - ["lxc", "shell", instance_id, "--", "bash", "-c", shlex.join(command)], - **kwargs, - ) - - def delete_instance(self, instance_id: str): - if instance_id not in self.instances: - raise HarnessError(f"unknown instance {instance_id}") - - try: - run(["lxc", "rm", instance_id, "--force"]) - except subprocess.CalledProcessError as e: - raise HarnessError(f"failed to delete instance {instance_id}") from e - - self.instances.discard(instance_id) - - def cleanup(self): - for instance_id in self.instances.copy(): - self.delete_instance(instance_id) diff --git a/tests/integration/test_util/harness/multipass.py b/tests/integration/test_util/harness/multipass.py deleted file mode 100644 index a98df7e..0000000 --- a/tests/integration/test_util/harness/multipass.py +++ /dev/null @@ -1,134 +0,0 @@ -# -# Copyright 2024 Canonical, Ltd. -# -import logging -import os -import shlex -import subprocess -from pathlib import Path - -from test_util import config -from test_util.harness import Harness, HarnessError, Instance -from test_util.util import run - -LOG = logging.getLogger(__name__) - - -class MultipassHarness(Harness): - """A Harness that creates a Multipass VM for each instance.""" - - name = "multipass" - - def next_id(self) -> int: - self._next_id += 1 - return self._next_id - - def __init__(self): - super(MultipassHarness, self).__init__() - - self._next_id = 0 - - self.image = config.MULTIPASS_IMAGE - self.cpus = config.MULTIPASS_CPUS - self.memory = config.MULTIPASS_MEMORY - self.disk = config.MULTIPASS_DISK - self.instances = set() - - LOG.debug("Configured Multipass substrate (image %s)", self.image) - - def new_instance(self) -> Instance: - instance_id = f"k8s-integration-{os.urandom(3).hex()}-{self.next_id()}" - - LOG.debug("Creating instance %s with image %s", instance_id, self.image) - try: - run( - [ - "multipass", - "launch", - self.image, - "--name", - instance_id, - "--cpus", - self.cpus, - "--memory", - self.memory, - "--disk", - self.disk, - ] - ) - except subprocess.CalledProcessError as e: - raise HarnessError(f"Failed to create multipass VM {instance_id}") from e - - self.instances.add(instance_id) - - self.exec(instance_id, ["snap", "wait", "system", "seed.loaded"]) - return Instance(self, instance_id) - - def send_file(self, instance_id: str, source: str, destination: str): - if instance_id not in self.instances: - raise HarnessError(f"unknown instance {instance_id}") - - if not Path(destination).is_absolute(): - raise HarnessError(f"path {destination} must be absolute") - - LOG.debug( - "Copying file %s to instance %s at %s", source, instance_id, destination - ) - try: - self.exec( - instance_id, - ["mkdir", "-m=0777", "-p", Path(destination).parent.as_posix()], - ) - run(["multipass", "transfer", source, f"{instance_id}:{destination}"]) - except subprocess.CalledProcessError as e: - raise HarnessError("lxc file push command failed") from e - - def pull_file(self, instance_id: str, source: str, destination: str): - if instance_id not in self.instances: - raise HarnessError(f"unknown instance {instance_id}") - - if not Path(source).is_absolute(): - raise HarnessError(f"path {source} must be absolute") - - LOG.debug( - "Copying file %s from instance %s to %s", source, instance_id, destination - ) - try: - run(["multipass", "transfer", f"{instance_id}:{source}", destination]) - except subprocess.CalledProcessError as e: - raise HarnessError("lxc file push command failed") from e - - def exec(self, instance_id: str, command: list, **kwargs): - if instance_id not in self.instances: - raise HarnessError(f"unknown instance {instance_id}") - - LOG.debug("Execute command %s in instance %s", command, instance_id) - return run( - [ - "multipass", - "exec", - instance_id, - "--", - "sudo", - "bash", - "-c", - shlex.join(command), - ], - **kwargs, - ) - - def delete_instance(self, instance_id: str): - if instance_id not in self.instances: - raise HarnessError(f"unknown instance {instance_id}") - - try: - run(["multipass", "delete", instance_id]) - run(["multipass", "purge"]) - except subprocess.CalledProcessError as e: - raise HarnessError(f"failed to delete instance {instance_id}") from e - - self.instances.discard(instance_id) - - def cleanup(self): - for instance_id in self.instances.copy(): - self.delete_instance(instance_id) diff --git a/tests/integration/test_util/util.py b/tests/integration/test_util/util.py deleted file mode 100644 index 2d766f3..0000000 --- a/tests/integration/test_util/util.py +++ /dev/null @@ -1,247 +0,0 @@ -# -# Copyright 2024 Canonical, Ltd. -# -import json -import logging -import shlex -import subprocess -from functools import partial -from pathlib import Path -from typing import Any, Callable, List, Optional, Union - -from tenacity import ( - RetryCallState, - retry, - retry_if_exception_type, - stop_after_attempt, - stop_never, - wait_fixed, -) -from test_util import config, harness - -LOG = logging.getLogger(__name__) - - -def run(command: list, **kwargs) -> subprocess.CompletedProcess: - """Log and run command.""" - kwargs.setdefault("check", True) - - LOG.debug("Execute command %s (kwargs=%s)", shlex.join(command), kwargs) - return subprocess.run(command, **kwargs) - - -def stubbornly( - retries: Optional[int] = None, - delay_s: Optional[Union[float, int]] = None, - exceptions: Optional[tuple] = None, - **retry_kds, -): - """ - Retry a command for a while, using tenacity - - By default, retry immediately and forever until no exceptions occur. - - Some commands need to execute until they pass some condition - > stubbornly(*retry_args).until(*some_condition).exec(*some_command) - - Some commands need to execute until they complete - > stubbornly(*retry_args).exec(*some_command) - - : param retries int: convenience param to use stop=retry.stop_after_attempt() - : param delay_s float|int: convenience param to use wait=retry.wait_fixed(delay_s) - : param exceptions Tuple[Exception]: convenience param to use retry=retry.retry_if_exception_type(exceptions) - : param retry_kds Mapping: direct interface to all tenacity arguments for retrying - """ - - def _before_sleep(retry_state: RetryCallState): - attempt = retry_state.attempt_number - tries = f"/{retries}" if retries is not None else "" - LOG.info( - f"Attempt {attempt}{tries} failed. Error: {retry_state.outcome.exception()}" - ) - LOG.info(f"Retrying in {delay_s} seconds...") - - _waits = wait_fixed(delay_s) if delay_s is not None else wait_fixed(0) - _stops = stop_after_attempt(retries) if retries is not None else stop_never - _exceptions = exceptions or (Exception,) # default to retry on all exceptions - - _retry_args = dict( - wait=_waits, - stop=_stops, - retry=retry_if_exception_type(_exceptions), - before_sleep=_before_sleep, - ) - # Permit any tenacity retry overrides from these ^defaults - _retry_args.update(retry_kds) - - class Retriable: - def __init__(self) -> None: - self._condition = None - self._run = partial(run, capture_output=True) - - @retry(**_retry_args) - def exec( - self, - command_args: List[str], - **command_kwds, - ): - """ - Execute a command against a harness or locally with subprocess to be retried. - - :param List[str] command_args: The command to be executed, as a str or list of str - :param Mapping[str,str] command_kwds: Additional keyword arguments to be passed to exec - """ - - try: - resp = self._run(command_args, **command_kwds) - except subprocess.CalledProcessError as e: - LOG.warning(f" rc={e.returncode}") - LOG.warning(f" stdout={e.stdout.decode()}") - LOG.warning(f" stderr={e.stderr.decode()}") - raise - if self._condition: - assert self._condition(resp), "Failed to meet condition" - return resp - - def on(self, instance: harness.Instance) -> "Retriable": - """ - Target the command at some instance. - - :param instance Instance: Instance on a test harness. - """ - self._run = partial(instance.exec, capture_output=True) - return self - - def until( - self, condition: Callable[[subprocess.CompletedProcess], bool] = None - ) -> "Retriable": - """ - Test the output of the executed command against an expected response - - :param Callable condition: a callable which returns a truth about the command output - """ - self._condition = condition - return self - - return Retriable() - - -# Installs and setups the k8s snap on the given instance and connects the interfaces. -def setup_k8s_snap(instance: harness.Instance): - LOG.info("Install k8s snap") - instance.exec( - ["snap", "install", "k8s", "--classic", "--channel", config.SNAP_CHANNEL] - ) - - -# Validates that the K8s node is in Ready state. -def wait_until_k8s_ready( - control_node: harness.Instance, instances: List[harness.Instance] -): - for instance in instances: - host = hostname(instance) - result = ( - stubbornly(retries=15, delay_s=5) - .on(control_node) - .until(lambda p: " Ready" in p.stdout.decode()) - .exec(["k8s", "kubectl", "get", "node", host, "--no-headers"]) - ) - LOG.info("Kubelet registered successfully!") - LOG.info("%s", result.stdout.decode()) - - -def wait_for_dns(instance: harness.Instance): - LOG.info("Waiting for DNS to be ready") - instance.exec(["k8s", "x-wait-for", "dns"]) - - -def wait_for_network(instance: harness.Instance): - LOG.info("Waiting for network to be ready") - instance.exec(["k8s", "x-wait-for", "network"]) - - -def hostname(instance: harness.Instance) -> str: - """Return the hostname for a given instance.""" - resp = instance.exec(["hostname"], capture_output=True) - return resp.stdout.decode().strip() - - -def get_local_node_status(instance: harness.Instance) -> str: - resp = instance.exec(["k8s", "local-node-status"], capture_output=True) - return resp.stdout.decode().strip() - - -def get_nodes(control_node: harness.Instance) -> List[Any]: - """Get a list of existing nodes. - - Args: - control_node: instance on which to execute check - - Returns: - list of nodes - """ - result = control_node.exec( - ["k8s", "kubectl", "get", "nodes", "-o", "json"], capture_output=True - ) - assert result.returncode == 0, "Failed to get nodes with kubectl" - node_list = json.loads(result.stdout.decode()) - assert node_list["kind"] == "List", "Should have found a list of nodes" - return [node for node in node_list["items"]] - - -def ready_nodes(control_node: harness.Instance) -> List[Any]: - """Get a list of the ready nodes. - - Args: - control_node: instance on which to execute check - - Returns: - list of nodes - """ - return [ - node - for node in get_nodes(control_node) - if all( - condition["status"] == "False" - for condition in node["status"]["conditions"] - if condition["type"] != "Ready" - ) - ] - - -# Create a token to join a node to an existing cluster -def get_join_token( - initial_node: harness.Instance, joining_cplane_node: harness.Instance, *args: str -) -> str: - out = initial_node.exec( - ["k8s", "get-join-token", joining_cplane_node.id, *args], - capture_output=True, - ) - return out.stdout.decode().strip() - - -# Join an existing cluster. -def join_cluster(instance: harness.Instance, join_token: str): - instance.exec(["k8s", "join-cluster", join_token]) - - -def get_default_cidr(instance: harness.Instance, instance_default_ip: str): - # ---- - # 1: lo inet 127.0.0.1/8 scope host lo ..... - # 28: eth0 inet 10.42.254.197/24 metric 100 brd 10.42.254.255 scope global dynamic eth0 .... - # ---- - # Fetching the cidr for the default interface by matching with instance ip from the output - p = instance.exec(["ip", "-o", "-f", "inet", "addr", "show"], capture_output=True) - out = p.stdout.decode().split(" ") - return [i for i in out if instance_default_ip in i][0] - - -def get_default_ip(instance: harness.Instance): - # --- - # default via 10.42.254.1 dev eth0 proto dhcp src 10.42.254.197 metric 100 - # --- - # Fetching the default IP address from the output, e.g. 10.42.254.197 - p = instance.exec( - ["ip", "-o", "-4", "route", "show", "to", "default"], capture_output=True - ) - return p.stdout.decode().split(" ")[8] diff --git a/tests/lxd-profile.yaml b/tests/lxd-profile.yaml deleted file mode 100644 index c6a05f3..0000000 --- a/tests/lxd-profile.yaml +++ /dev/null @@ -1,105 +0,0 @@ -description: "LXD profile for Canonical Kubernetes" -config: - linux.kernel_modules: ip_vs,ip_vs_rr,ip_vs_wrr,ip_vs_sh,ip_tables,ip6_tables,iptable_raw,netlink_diag,nf_nat,overlay,br_netfilter,xt_socket - raw.lxc: | - lxc.apparmor.profile=unconfined - lxc.mount.auto=proc:rw sys:rw cgroup:rw - lxc.cgroup.devices.allow=a - lxc.cap.drop= - security.nesting: "true" - security.privileged: "true" -devices: - aadisable2: - path: /dev/kmsg - source: /dev/kmsg - type: unix-char - dev-loop-control: - major: "10" - minor: "237" - path: /dev/loop-control - type: unix-char - dev-loop0: - major: "7" - minor: "0" - path: /dev/loop0 - type: unix-block - dev-loop1: - major: "7" - minor: "1" - path: /dev/loop1 - type: unix-block - dev-loop2: - major: "7" - minor: "2" - path: /dev/loop2 - type: unix-block - dev-loop3: - major: "7" - minor: "3" - path: /dev/loop3 - type: unix-block - dev-loop4: - major: "7" - minor: "4" - path: /dev/loop4 - type: unix-block - dev-loop5: - major: "7" - minor: "5" - path: /dev/loop5 - type: unix-block - dev-loop6: - major: "7" - minor: "6" - path: /dev/loop6 - type: unix-block - dev-loop7: - major: "7" - minor: "7" - path: /dev/loop7 - type: unix-block - dev-loop8: - major: "7" - minor: "8" - path: /dev/loop8 - type: unix-block - dev-loop9: - major: "7" - minor: "9" - path: /dev/loop9 - type: unix-block - dev-loop10: - major: "7" - minor: "10" - path: /dev/loop10 - type: unix-block - dev-loop11: - major: "7" - minor: "11" - path: /dev/loop11 - type: unix-block - dev-loop12: - major: "7" - minor: "12" - path: /dev/loop12 - type: unix-block - dev-loop13: - major: "7" - minor: "13" - path: /dev/loop13 - type: unix-block - dev-loop14: - major: "7" - minor: "14" - path: /dev/loop14 - type: unix-block - dev-loop15: - major: "7" - minor: "15" - path: /dev/loop15 - type: unix-block - dev-loop16: - major: "7" - minor: "16" - path: /dev/loop16 - type: unix-block diff --git a/tests/requirements-test.txt b/tests/requirements-test.txt index e267307..ff57d6b 100644 --- a/tests/requirements-test.txt +++ b/tests/requirements-test.txt @@ -2,3 +2,4 @@ coverage[toml]==7.2.5 pytest==7.3.1 PyYAML==6.0.1 tenacity==8.2.3 +git+https://github.com/canonical/k8s-test-harness.git@main diff --git a/tests/sanity/test_acmesolver.py b/tests/sanity/test_acmesolver.py index 848f25a..91efca2 100644 --- a/tests/sanity/test_acmesolver.py +++ b/tests/sanity/test_acmesolver.py @@ -1,14 +1,25 @@ -import subprocess -import os - - -def test_sanity_acmesolver(): - image = os.getenv("ROCK_CERT_MANAGER_ACMESOLVER") - assert image is not None, "ROCK_CERT_MANAGER_ACMESOLVER is not set" - docker_run = subprocess.run( - ["docker", "run", "--rm", "--entrypoint", "/acmesolver-linux", image, "--help"], - capture_output=True, - check=True, - text=True, +# +# Copyright 2024 Canonical, Ltd. +# See LICENSE file for licensing details +# + +import pytest + +from k8s_test_harness.util import docker_util, env_util + +IMG_NAME = "cert-manager-acmesolver" +IMG_PLATFORM = "amd64" + +EXP_HELPSTR = "HTTP server used to solve ACME challenges." + + +@pytest.mark.parametrize('version', ("1.10.1", "1.12.2")) +def test_sanity_acmesolver(version): + rock = env_util.get_build_meta_info_for_rock_version( + IMG_NAME, version, IMG_PLATFORM + ) + + docker_run = docker_util.run_in_docker( + rock.image, ["/acmesolver-linux", "--help"] ) - assert "HTTP server used to solve ACME challenges." in docker_run.stdout + assert EXP_HELPSTR in docker_run.stderr diff --git a/tests/sanity/test_cainjector.py b/tests/sanity/test_cainjector.py index 908b0bd..5099b4b 100644 --- a/tests/sanity/test_cainjector.py +++ b/tests/sanity/test_cainjector.py @@ -1,17 +1,25 @@ -import subprocess -import os +# +# Copyright 2024 Canonical, Ltd. +# See LICENSE file for licensing details +# +import pytest -def test_sanity_cainjector(): - image = os.getenv("ROCK_CERT_MANAGER_CAINJECTOR") - assert image is not None, "ROCK_CERT_MANAGER_CAINJECTOR is not set" - docker_run = subprocess.run( - ["docker", "run", "--rm", "--entrypoint", "/cainjector-linux", image, "--help"], - capture_output=True, - check=True, - text=True, +from k8s_test_harness.util import docker_util, env_util + +IMG_NAME = "cert-manager-cainjector" +IMG_PLATFORM = "amd64" + +EXP_HELPSTR = "cert-manager CA injector is a Kubernetes addon to automate the injection of CA data into" + + +@pytest.mark.parametrize('version', ("1.10.1", "1.12.2")) +def test_sanity_cainjector(version): + rock = env_util.get_build_meta_info_for_rock_version( + IMG_NAME, version, IMG_PLATFORM ) - assert ( - "cert-manager CA injector is a Kubernetes addon to automate the injection of CA data into" - in docker_run.stdout + + docker_run = docker_util.run_in_docker( + rock.image, ["/cainjector-linux", "--help"] ) + assert EXP_HELPSTR in docker_run.stderr diff --git a/tests/sanity/test_controller.py b/tests/sanity/test_controller.py index c22e68d..18ec7a8 100644 --- a/tests/sanity/test_controller.py +++ b/tests/sanity/test_controller.py @@ -1,17 +1,20 @@ -import subprocess -import os +import pytest +from k8s_test_harness.util import docker_util, env_util -def test_sanity_controller(): - image = os.getenv("ROCK_CERT_MANAGER_CONTROLLER") - assert image is not None, "ROCK_CERT_MANAGER_CONTROLLER is not set" - docker_run = subprocess.run( - ["docker", "run", "--rm", "--entrypoint", "/controller-linux", image, "--help"], - capture_output=True, - check=True, - text=True, +IMG_NAME = "cert-manager-controller" +IMG_PLATFORM = "amd64" + +EXP_HELPSTR = "cert-manager is a Kubernetes addon to automate the management and issuance" + + +@pytest.mark.parametrize('version', ("1.10.1", "1.12.2")) +def test_sanity_cainjector(version): + rock = env_util.get_build_meta_info_for_rock_version( + IMG_NAME, version, IMG_PLATFORM ) - assert ( - "cert-manager is a Kubernetes addon to automate the management and issuance" - in docker_run.stdout + + docker_run = docker_util.run_in_docker( + rock.image, ["/controller-linux", "--help"] ) + assert EXP_HELPSTR in docker_run.stderr diff --git a/tests/sanity/test_webhook.py b/tests/sanity/test_webhook.py index ad42c2b..a892544 100644 --- a/tests/sanity/test_webhook.py +++ b/tests/sanity/test_webhook.py @@ -1,14 +1,20 @@ -import subprocess -import os - - -def test_sanity_webhook(): - image = os.getenv("ROCK_CERT_MANAGER_WEBHOOK") - assert image is not None, "ROCK_CERT_MANAGER_WEBHOOK is not set" - docker_run = subprocess.run( - ["docker", "run", "--rm", "--entrypoint", "/webhook-linux", image, "--help"], - capture_output=True, - check=True, - text=True, +import pytest + +from k8s_test_harness.util import docker_util, env_util + +IMG_NAME = "cert-manager-webhook" +IMG_PLATFORM = "amd64" + +EXP_HELPSTR = "Webhook component providing API validation" + + +@pytest.mark.parametrize('version', ("1.10.1", "1.12.2")) +def test_sanity_cainjector(version): + rock = env_util.get_build_meta_info_for_rock_version( + IMG_NAME, version, IMG_PLATFORM + ) + + docker_run = docker_util.run_in_docker( + rock.image, ["/webhook-linux", "--help"] ) - assert "Webhook component providing API validation" in docker_run.stdout + assert EXP_HELPSTR in docker_run.stderr diff --git a/tests/templates/bootstrap-session.yaml b/tests/templates/bootstrap-session.yaml deleted file mode 100644 index 6066e63..0000000 --- a/tests/templates/bootstrap-session.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# Contains the bootstrap configuration for the session instance of the integration tests. -# The session instance persists over test runs and is used to speed-up the integration tests. -cluster-config: - network: - enabled: true - dns: - enabled: true diff --git a/tests/tox.ini b/tests/tox.ini index 9b03827..09d625d 100644 --- a/tests/tox.ini +++ b/tests/tox.ini @@ -1,7 +1,7 @@ [tox] no_package = True skip_missing_interpreters = True -env_list = format, lint, integration +env_list = format, lint, integration, sanity min_version = 4.0.0 [testenv] @@ -15,19 +15,29 @@ pass_env = description = Apply coding style standards to code deps = -r {tox_root}/requirements-dev.txt commands = - licenseheaders -t {tox_root}/.copyright.tmpl -cy -o 'Canonical, Ltd' -d {tox_root}/tests - isort {tox_root}/tests --profile=black - black {tox_root}/tests + licenseheaders -t {tox_root}/.copyright.tmpl -cy -o 'Canonical, Ltd' -d {tox_root}/sanity + isort {tox_root}/sanity --profile=black + black {tox_root}/sanity + + licenseheaders -t {tox_root}/.copyright.tmpl -cy -o 'Canonical, Ltd' -d {tox_root}/integration + isort {tox_root}/integration --profile=black + black {tox_root}/integration [testenv:lint] description = Check code against coding style standards deps = -r {tox_root}/requirements-dev.txt commands = - codespell {tox_root}/tests - flake8 {tox_root}/tests - licenseheaders -t {tox_root}/.copyright.tmpl -cy -o 'Canonical, Ltd' -d {tox_root}/tests --dry - isort {tox_root}/tests --profile=black --check - black {tox_root}/tests --check --diff + codespell {tox_root}/sanity + flake8 {tox_root}/sanity + licenseheaders -t {tox_root}/.copyright.tmpl -cy -o 'Canonical, Ltd' -d {tox_root}/sanity --dry + isort {tox_root}/sanity --profile=black --check + black {tox_root}/sanity --check --diff + + codespell {tox_root}/integration + flake8 {tox_root}/integration + licenseheaders -t {tox_root}/.copyright.tmpl -cy -o 'Canonical, Ltd' -d {tox_root}/integration --dry + isort {tox_root}/integration --profile=black --check + black {tox_root}/integration --check --diff [testenv:sanity] description = Run integration tests @@ -44,6 +54,7 @@ commands = pass_env = TEST_* ROCK_* + BUILT_ROCKS_METADATA [testenv:integration] description = Run integration tests @@ -60,10 +71,12 @@ commands = pass_env = TEST_* ROCK_* + BUILT_ROCKS_METADATA [flake8] max-line-length = 120 select = E,W,F,C,N -ignore = W503 +# E231 rule is not aware of f-strings +ignore = W503,E231 exclude = venv,.git,.tox,.tox_env,.venv,build,dist,*.egg_info show-source = true diff --git a/webhook/1.10.1/rockcraft.yaml b/webhook/1.10.1/rockcraft.yaml new file mode 100644 index 0000000..b314fb7 --- /dev/null +++ b/webhook/1.10.1/rockcraft.yaml @@ -0,0 +1,39 @@ +name: cert-manager-webhook +summary: ROCK for the cert-manager project. +description: | + This ROCK is a drop-in replacement for the autoscaling/cert-manager-webhook image. +version: "1.10.1" +license: Apache-2.0 + +base: bare +build-base: ubuntu@22.04 +platforms: + amd64: + arm64: + +run-user: _daemon_ +entrypoint-service: cert-manager-webhook +services: + cert-manager-webhook: + override: replace + summary: "cert-manager-webhook service" + startup: enabled + command: "/webhook-linux [ -h ]" + on-failure: shutdown + +parts: + cert-manager-webhook: + plugin: nil + source: https://github.com/cert-manager/cert-manager.git + source-type: git + source-tag: v1.10.1 + source-depth: 1 + build-snaps: + - jq + - go/1.22/stable + override-build: | + # CTR=echo is hacky way of passing docker check not required for build + make CTR=echo _bin/server/webhook-linux-${CRAFT_PLATFORM} + cp _bin/server/webhook-linux-${CRAFT_PLATFORM} ${CRAFT_PART_INSTALL}/webhook-linux + prime: + - webhook-linux diff --git a/webhook/rockcraft.yaml b/webhook/1.12.2/rockcraft.yaml similarity index 100% rename from webhook/rockcraft.yaml rename to webhook/1.12.2/rockcraft.yaml