diff --git a/.github/.jira_sync_config.yaml b/.github/.jira_sync_config.yaml new file mode 100644 index 00000000..f2fdc0e6 --- /dev/null +++ b/.github/.jira_sync_config.yaml @@ -0,0 +1,16 @@ +settings: + jira_project_key: "SMS" + status_mapping: + opened: Untriaged + closed: done + not_planned: rejected + + components: + - traefik + + add_gh_comment: false + sync_description: false + sync_comments: false + + label_mapping: + "Type: Enhancement": Story diff --git a/.github/workflows/issues.yaml b/.github/workflows/issues.yaml deleted file mode 100644 index dbd9869d..00000000 --- a/.github/workflows/issues.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: Issues - -on: [issues] - -jobs: - update: - name: Update Issue - uses: canonical/observability/.github/workflows/issues.yaml@main - secrets: inherit - with: - component: traefik diff --git a/.github/workflows/pull-request.yaml b/.github/workflows/pull-request.yaml index 1080b5fd..bda1847b 100644 --- a/.github/workflows/pull-request.yaml +++ b/.github/workflows/pull-request.yaml @@ -10,3 +10,7 @@ jobs: name: PR uses: canonical/observability/.github/workflows/charm-pull-request.yaml@main secrets: inherit + + terraform-checks: + name: Terraform + uses: canonical/observability/.github/workflows/terraform-quality-checks.yaml@main \ No newline at end of file diff --git a/.gitignore b/.gitignore index 10bc6a0e..7dcedfe3 100644 --- a/.gitignore +++ b/.gitignore @@ -12,4 +12,40 @@ tests/integration/testers/*/lib # charmcraft bug: leaves the build folders behind after a pack. **/parts/ **/prime/ -**/stage/ \ No newline at end of file +**/stage/ + +# Local .terraform directories +**/.terraform/* + +# .tfstate files +*.tfstate +*.tfstate.* + +# Crash log files +crash.log +crash.*.log + +# Exclude all .tfvars files, which are likely to contain sensitive data, such as +# password, private keys, and other secrets. These should not be part of version +# control as they are data points which are potentially sensitive and subject +# to change depending on the environment. +*.tfvars +*.tfvars.json + +# Ignore override files as they are usually used to override resources locally and so +# are not checked in +override.tf +override.tf.json +*_override.tf +*_override.tf.json + +# Include override files you do wish to add to version control using negated pattern +# !example_override.tf + +# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan +# example: *tfplan* + +# Ignore CLI configuration files +.terraformrc +terraform.rc +.terraform.lock.hcl \ No newline at end of file diff --git a/config.yaml b/config.yaml index 1e643be6..545e607b 100644 --- a/config.yaml +++ b/config.yaml @@ -51,6 +51,9 @@ options: will provide to the unit `my-unit/2` in the `my-model` model the following URL: `http://my-model-my-unit-2.foo:8080` + + Note that, for 'subdomain' routing mode, the external_hostname must be set and not be set to an IP address. This + is because subdomains are not supported for IP addresses. type: string default: path diff --git a/lib/charms/observability_libs/v0/juju_topology.py b/lib/charms/observability_libs/v0/juju_topology.py deleted file mode 100644 index a79e5d43..00000000 --- a/lib/charms/observability_libs/v0/juju_topology.py +++ /dev/null @@ -1,301 +0,0 @@ -# Copyright 2022 Canonical Ltd. -# See LICENSE file for licensing details. -"""## Overview. - -This document explains how to use the `JujuTopology` class to -create and consume topology information from Juju in a consistent manner. - -The goal of the Juju topology is to uniquely identify a piece -of software running across any of your Juju-managed deployments. -This is achieved by combining the following four elements: - -- Model name -- Model UUID -- Application name -- Unit identifier - - -For a more in-depth description of the concept, as well as a -walk-through of it's use-case in observability, see -[this blog post](https://juju.is/blog/model-driven-observability-part-2-juju-topology-metrics) -on the Juju blog. - -## Library Usage - -This library may be used to create and consume `JujuTopology` objects. -The `JujuTopology` class provides three ways to create instances: - -### Using the `from_charm` method - -Enables instantiation by supplying the charm as an argument. When -creating topology objects for the current charm, this is the recommended -approach. - -```python -topology = JujuTopology.from_charm(self) -``` - -### Using the `from_dict` method - -Allows for instantion using a dictionary of relation data, like the -`scrape_metadata` from Prometheus or the labels of an alert rule. When -creating topology objects for remote charms, this is the recommended -approach. - -```python -scrape_metadata = json.loads(relation.data[relation.app].get("scrape_metadata", "{}")) -topology = JujuTopology.from_dict(scrape_metadata) -``` - -### Using the class constructor - -Enables instantiation using whatever values you want. While this -is useful in some very specific cases, this is almost certainly not -what you are looking for as setting these values manually may -result in observability metrics which do not uniquely identify a -charm in order to provide accurate usage reporting, alerting, -horizontal scaling, or other use cases. - -```python -topology = JujuTopology( - model="some-juju-model", - model_uuid="00000000-0000-0000-0000-000000000001", - application="fancy-juju-application", - unit="fancy-juju-application/0", - charm_name="fancy-juju-application-k8s", -) -``` - -""" -from collections import OrderedDict -from typing import Dict, List, Optional -from uuid import UUID - -# The unique Charmhub library identifier, never change it -LIBID = "bced1658f20f49d28b88f61f83c2d232" - -LIBAPI = 0 -LIBPATCH = 6 - - -class InvalidUUIDError(Exception): - """Invalid UUID was provided.""" - - def __init__(self, uuid: str): - self.message = "'{}' is not a valid UUID.".format(uuid) - super().__init__(self.message) - - -class JujuTopology: - """JujuTopology is used for storing, generating and formatting juju topology information. - - DEPRECATED: This class is deprecated. Use `pip install cosl` and - `from cosl.juju_topology import JujuTopology` instead. - """ - - def __init__( - self, - model: str, - model_uuid: str, - application: str, - unit: Optional[str] = None, - charm_name: Optional[str] = None, - ): - """Build a JujuTopology object. - - A `JujuTopology` object is used for storing and transforming - Juju topology information. This information is used to - annotate Prometheus scrape jobs and alert rules. Such - annotation when applied to scrape jobs helps in identifying - the source of the scrapped metrics. On the other hand when - applied to alert rules topology information ensures that - evaluation of alert expressions is restricted to the source - (charm) from which the alert rules were obtained. - - Args: - model: a string name of the Juju model - model_uuid: a globally unique string identifier for the Juju model - application: an application name as a string - unit: a unit name as a string - charm_name: name of charm as a string - """ - if not self.is_valid_uuid(model_uuid): - raise InvalidUUIDError(model_uuid) - - self._model = model - self._model_uuid = model_uuid - self._application = application - self._charm_name = charm_name - self._unit = unit - - def is_valid_uuid(self, uuid): - """Validate the supplied UUID against the Juju Model UUID pattern. - - Args: - uuid: string that needs to be checked if it is valid v4 UUID. - - Returns: - True if parameter is a valid v4 UUID, False otherwise. - """ - try: - return str(UUID(uuid, version=4)) == uuid - except (ValueError, TypeError): - return False - - @classmethod - def from_charm(cls, charm): - """Creates a JujuTopology instance by using the model data available on a charm object. - - Args: - charm: a `CharmBase` object for which the `JujuTopology` will be constructed - Returns: - a `JujuTopology` object. - """ - return cls( - model=charm.model.name, - model_uuid=charm.model.uuid, - application=charm.model.app.name, - unit=charm.model.unit.name, - charm_name=charm.meta.name, - ) - - @classmethod - def from_dict(cls, data: dict): - """Factory method for creating `JujuTopology` children from a dictionary. - - Args: - data: a dictionary with five keys providing topology information. The keys are - - "model" - - "model_uuid" - - "application" - - "unit" - - "charm_name" - `unit` and `charm_name` may be empty, but will result in more limited - labels. However, this allows us to support charms without workloads. - - Returns: - a `JujuTopology` object. - """ - return cls( - model=data["model"], - model_uuid=data["model_uuid"], - application=data["application"], - unit=data.get("unit", ""), - charm_name=data.get("charm_name", ""), - ) - - def as_dict( - self, - *, - remapped_keys: Optional[Dict[str, str]] = None, - excluded_keys: Optional[List[str]] = None, - ) -> OrderedDict: - """Format the topology information into an ordered dict. - - Keeping the dictionary ordered is important to be able to - compare dicts without having to resort to deep comparisons. - - Args: - remapped_keys: A dictionary mapping old key names to new key names, - which will be substituted when invoked. - excluded_keys: A list of key names to exclude from the returned dict. - uuid_length: The length to crop the UUID to. - """ - ret = OrderedDict( - [ - ("model", self.model), - ("model_uuid", self.model_uuid), - ("application", self.application), - ("unit", self.unit), - ("charm_name", self.charm_name), - ] - ) - if excluded_keys: - ret = OrderedDict({k: v for k, v in ret.items() if k not in excluded_keys}) - - if remapped_keys: - ret = OrderedDict( - (remapped_keys.get(k), v) if remapped_keys.get(k) else (k, v) for k, v in ret.items() # type: ignore - ) - - return ret - - @property - def identifier(self) -> str: - """Format the topology information into a terse string. - - This crops the model UUID, making it unsuitable for comparisons against - anything but other identifiers. Mainly to be used as a display name or file - name where long strings might become an issue. - - >>> JujuTopology( \ - model = "a-model", \ - model_uuid = "00000000-0000-4000-8000-000000000000", \ - application = "some-app", \ - unit = "some-app/1" \ - ).identifier - 'a-model_00000000_some-app' - """ - parts = self.as_dict( - excluded_keys=["unit", "charm_name"], - ) - - parts["model_uuid"] = self.model_uuid_short - values = parts.values() - - return "_".join([str(val) for val in values]).replace("/", "_") - - @property - def label_matcher_dict(self) -> Dict[str, str]: - """Format the topology information into a dict with keys having 'juju_' as prefix. - - Relabelled topology never includes the unit as it would then only match - the leader unit (ie. the unit that produced the dict). - """ - items = self.as_dict( - remapped_keys={"charm_name": "charm"}, - excluded_keys=["unit"], - ).items() - - return {"juju_{}".format(key): value for key, value in items if value} - - @property - def label_matchers(self) -> str: - """Format the topology information into a promql/logql label matcher string. - - Topology label matchers should never include the unit as it - would then only match the leader unit (ie. the unit that - produced the matchers). - """ - items = self.label_matcher_dict.items() - return ", ".join(['{}="{}"'.format(key, value) for key, value in items if value]) - - @property - def model(self) -> str: - """Getter for the juju model value.""" - return self._model - - @property - def model_uuid(self) -> str: - """Getter for the juju model uuid value.""" - return self._model_uuid - - @property - def model_uuid_short(self) -> str: - """Getter for the juju model value, truncated to the first eight letters.""" - return self._model_uuid[:8] - - @property - def application(self) -> str: - """Getter for the juju application value.""" - return self._application - - @property - def charm_name(self) -> Optional[str]: - """Getter for the juju charm name value.""" - return self._charm_name - - @property - def unit(self) -> Optional[str]: - """Getter for the juju unit value.""" - return self._unit diff --git a/lib/charms/tempo_k8s/v1/charm_tracing.py b/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py similarity index 96% rename from lib/charms/tempo_k8s/v1/charm_tracing.py rename to lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py index 2dbdddd6..2604c39e 100644 --- a/lib/charms/tempo_k8s/v1/charm_tracing.py +++ b/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py @@ -12,15 +12,15 @@ # Quickstart Fetch the following charm libs (and ensure the minimum version/revision numbers are satisfied): - charmcraft fetch-lib charms.tempo_k8s.v2.tracing # >= 1.10 - charmcraft fetch-lib charms.tempo_k8s.v1.charm_tracing # >= 2.7 + charmcraft fetch-lib charms.tempo_coordinator_k8s.v0.tracing # >= 1.10 + charmcraft fetch-lib charms.tempo_coordinator_k8s.v0.charm_tracing # >= 2.7 Then edit your charm code to include: ```python # import the necessary charm libs -from charms.tempo_k8s.v2.tracing import TracingEndpointRequirer, charm_tracing_config -from charms.tempo_k8s.v1.charm_tracing import charm_tracing +from charms.tempo_coordinator_k8s.v0.tracing import TracingEndpointRequirer, charm_tracing_config +from charms.tempo_coordinator_k8s.v0.charm_tracing import charm_tracing # decorate your charm class with charm_tracing: @charm_tracing( @@ -51,7 +51,7 @@ def __init__(self, ...): 2) add to your charm a "my_tracing_endpoint" (you can name this attribute whatever you like) **property**, **method** or **instance attribute** that returns an otlp http/https endpoint url. -If you are using the ``charms.tempo_k8s.v2.tracing.TracingEndpointRequirer`` as +If you are using the ``charms.tempo_coordinator_k8s.v0.tracing.TracingEndpointRequirer`` as ``self.tracing = TracingEndpointRequirer(self)``, the implementation could be: ``` @@ -80,7 +80,7 @@ def my_tracing_endpoint(self) -> Optional[str]: For example: ``` -from charms.tempo_k8s.v1.charm_tracing import trace_charm +from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm @trace_charm( tracing_endpoint="my_tracing_endpoint", server_cert="_server_cert" @@ -129,7 +129,7 @@ def get_tracer(self) -> opentelemetry.trace.Tracer: For example: ``` - from charms.tempo_k8s.v0.charm_tracing import trace_charm + from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm @trace_charm( tracing_endpoint="my_tracing_endpoint", @@ -150,7 +150,7 @@ def my_tracing_endpoint(self) -> Optional[str]: needs to be replaced with: ``` - from charms.tempo_k8s.v1.charm_tracing import trace_charm + from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm @trace_charm( tracing_endpoint="my_tracing_endpoint", @@ -249,28 +249,27 @@ def _remove_stale_otel_sdk_packages(): from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import Span, TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor +from opentelemetry.trace import INVALID_SPAN, Tracer +from opentelemetry.trace import get_current_span as otlp_get_current_span from opentelemetry.trace import ( - INVALID_SPAN, - Tracer, get_tracer, get_tracer_provider, set_span_in_context, set_tracer_provider, ) -from opentelemetry.trace import get_current_span as otlp_get_current_span from ops.charm import CharmBase from ops.framework import Framework # The unique Charmhub library identifier, never change it -LIBID = "cb1705dcd1a14ca09b2e60187d1215c7" +LIBID = "01780f1e588c42c3976d26780fdf9b89" # Increment this major API version when introducing breaking changes -LIBAPI = 1 +LIBAPI = 0 # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 15 +LIBPATCH = 1 PYDEPS = ["opentelemetry-exporter-otlp-proto-http==1.21.0"] @@ -332,7 +331,7 @@ def _get_tracer() -> Optional[Tracer]: return tracer.get() except LookupError: # fallback: this course-corrects for a user error where charm_tracing symbols are imported - # from different paths (typically charms.tempo_k8s... and lib.charms.tempo_k8s...) + # from different paths (typically charms.tempo_coordinator_k8s... and lib.charms.tempo_coordinator_k8s...) try: ctx: Context = copy_context() if context_tracer := _get_tracer_from_context(ctx): @@ -562,8 +561,8 @@ def trace_charm( method calls on instances of this class. Usage: - >>> from charms.tempo_k8s.v1.charm_tracing import trace_charm - >>> from charms.tempo_k8s.v1.tracing import TracingEndpointRequirer + >>> from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm + >>> from charms.tempo_coordinator_k8s.v0.tracing import TracingEndpointRequirer >>> from ops import CharmBase >>> >>> @trace_charm( @@ -626,7 +625,7 @@ def _autoinstrument( Usage: - >>> from charms.tempo_k8s.v1.charm_tracing import _autoinstrument + >>> from charms.tempo_coordinator_k8s.v0.charm_tracing import _autoinstrument >>> from ops.main import main >>> _autoinstrument( >>> MyCharm, diff --git a/lib/charms/tempo_k8s/v2/tracing.py b/lib/charms/tempo_coordinator_k8s/v0/tracing.py similarity index 98% rename from lib/charms/tempo_k8s/v2/tracing.py rename to lib/charms/tempo_coordinator_k8s/v0/tracing.py index 81bf1f1f..4af379a5 100644 --- a/lib/charms/tempo_k8s/v2/tracing.py +++ b/lib/charms/tempo_coordinator_k8s/v0/tracing.py @@ -16,7 +16,7 @@ This relation must use the `tracing` interface. The `TracingEndpointRequirer` object may be instantiated as follows - from charms.tempo_k8s.v2.tracing import TracingEndpointRequirer + from charms.tempo_coordinator_k8s.v0.tracing import TracingEndpointRequirer def __init__(self, *args): super().__init__(*args) @@ -58,7 +58,7 @@ def __init__(self, *args): For example a Tempo charm may instantiate the `TracingEndpointProvider` in its constructor as follows - from charms.tempo_k8s.v2.tracing import TracingEndpointProvider + from charms.tempo_coordinator_k8s.v0.tracing import TracingEndpointProvider def __init__(self, *args): super().__init__(*args) @@ -100,14 +100,14 @@ def __init__(self, *args): from pydantic import BaseModel, Field # The unique Charmhub library identifier, never change it -LIBID = "12977e9aa0b34367903d8afeb8c3d85d" +LIBID = "d2f02b1f8d1244b5989fd55bc3a28943" # Increment this major API version when introducing breaking changes -LIBAPI = 2 +LIBAPI = 0 # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 10 +LIBPATCH = 1 PYDEPS = ["pydantic"] @@ -947,8 +947,8 @@ def charm_tracing_config( Usage: If you are using charm_tracing >= v1.9: - >>> from lib.charms.tempo_k8s.v1.charm_tracing import trace_charm - >>> from lib.charms.tempo_k8s.v2.tracing import charm_tracing_config + >>> from lib.charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm + >>> from lib.charms.tempo_coordinator_k8s.v0.tracing import charm_tracing_config >>> @trace_charm(tracing_endpoint="my_endpoint", cert_path="cert_path") >>> class MyCharm(...): >>> _cert_path = "/path/to/cert/on/charm/container.crt" @@ -958,8 +958,8 @@ def charm_tracing_config( ... self.tracing, self._cert_path) If you are using charm_tracing < v1.9: - >>> from lib.charms.tempo_k8s.v1.charm_tracing import trace_charm - >>> from lib.charms.tempo_k8s.v2.tracing import charm_tracing_config + >>> from lib.charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm + >>> from lib.charms.tempo_coordinator_k8s.v0.tracing import charm_tracing_config >>> @trace_charm(tracing_endpoint="my_endpoint", cert_path="cert_path") >>> class MyCharm(...): >>> _cert_path = "/path/to/cert/on/charm/container.crt" diff --git a/lib/charms/traefik_k8s/v2/ingress.py b/lib/charms/traefik_k8s/v2/ingress.py index bb7ac5ed..4c7c12ff 100644 --- a/lib/charms/traefik_k8s/v2/ingress.py +++ b/lib/charms/traefik_k8s/v2/ingress.py @@ -57,7 +57,18 @@ def _on_ingress_revoked(self, event: IngressPerAppRevokedEvent): import typing from dataclasses import dataclass from functools import partial -from typing import Any, Callable, Dict, List, MutableMapping, Optional, Sequence, Tuple, Union +from typing import ( + Any, + Callable, + Dict, + List, + MutableMapping, + Optional, + Sequence, + Tuple, + Union, + cast, +) import pydantic from ops.charm import CharmBase, RelationBrokenEvent, RelationEvent @@ -226,7 +237,7 @@ class IngressUrl(BaseModel): class IngressProviderAppData(DatabagModel): """Ingress application databag schema.""" - ingress: IngressUrl + ingress: Optional[IngressUrl] = None class ProviderSchema(BaseModel): @@ -558,7 +569,16 @@ def _published_url(self, relation: Relation) -> Optional["IngressProviderAppData def publish_url(self, relation: Relation, url: str): """Publish to the app databag the ingress url.""" ingress_url = {"url": url} - IngressProviderAppData(ingress=ingress_url).dump(relation.data[self.app]) # type: ignore + try: + IngressProviderAppData(ingress=ingress_url).dump(relation.data[self.app]) # type: ignore + except pydantic.ValidationError as e: + # If we cannot validate the url as valid, publish an empty databag and log the error. + log.error(f"Failed to validate ingress url '{url}' - got ValidationError {e}") + log.error( + "url was not published to ingress relation for {relation.app}. This error is likely due to an" + " error or misconfiguration of the charm calling this library." + ) + IngressProviderAppData(ingress=None).dump(relation.data[self.app]) # type: ignore @property def proxied_endpoints(self) -> Dict[str, Dict[str, str]]: @@ -596,10 +616,14 @@ def proxied_endpoints(self) -> Dict[str, Dict[str, str]]: if not ingress_data: log.warning(f"relation {ingress_relation} not ready yet: try again in some time.") continue + + # Validation above means ingress cannot be None, but type checker doesn't know that. + ingress = ingress_data.ingress + ingress = cast(IngressProviderAppData, ingress) if PYDANTIC_IS_V1: - results[ingress_relation.app.name] = ingress_data.ingress.dict() + results[ingress_relation.app.name] = ingress.dict() else: - results[ingress_relation.app.name] = ingress_data.ingress.model_dump(mode="json") + results[ingress_relation.app.name] = ingress.model_dump(mode="json") return results @@ -834,7 +858,11 @@ def _get_url_from_relation_data(self) -> Optional[str]: if not databag: # not ready yet return None - return str(IngressProviderAppData.load(databag).ingress.url) + ingress = IngressProviderAppData.load(databag).ingress + if ingress is None: + return None + + return str(ingress.url) @property def url(self) -> Optional[str]: diff --git a/metadata.yaml b/metadata.yaml index e3f8e3a0..1e8b3c80 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -84,9 +84,14 @@ requires: interface: loki_push_api description: | Receives Loki's push api endpoint address to push logs to, and forwards charm's built-in alert rules to Loki. - tracing: + charm-tracing: description: | - Tracing endpoint for integrating with tempo. + Enables sending charm traces to a distributed tracing backend such as Tempo. + limit: 1 + interface: tracing + workload-tracing: + description: | + Enables sending workload traces to a distributed tracing backend such as Tempo. limit: 1 interface: tracing receive-ca-cert: diff --git a/src/charm.py b/src/charm.py index 99e86a75..011e9c4e 100755 --- a/src/charm.py +++ b/src/charm.py @@ -9,6 +9,7 @@ import itertools import json import logging +import re import socket from typing import Any, Dict, List, Optional, Tuple, Union, cast from urllib.parse import urlparse @@ -44,8 +45,8 @@ from charms.observability_libs.v1.cert_handler import CertHandler from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch from charms.prometheus_k8s.v0.prometheus_scrape import MetricsEndpointProvider -from charms.tempo_k8s.v1.charm_tracing import trace_charm -from charms.tempo_k8s.v2.tracing import TracingEndpointRequirer +from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm +from charms.tempo_coordinator_k8s.v0.tracing import TracingEndpointRequirer, charm_tracing_config from charms.traefik_k8s.v0.traefik_route import ( TraefikRouteProvider, TraefikRouteRequirerReadyEvent, @@ -56,6 +57,7 @@ IngressPerUnitProvider, ) from charms.traefik_k8s.v2.ingress import IngressPerAppProvider as IPAv2 +from cosl import JujuTopology from deepmerge import always_merger from lightkube.core.client import Client from lightkube.core.exceptions import ApiError @@ -135,6 +137,20 @@ class TraefikIngressCharm(CharmBase): def __init__(self, *args): super().__init__(*args) + # Before doing anything, validate the charm config. If configuration is invalid, warn the user. + # FIXME: Invalid configuration here SHOULD halt the charm's operation until resolution, or at least make a + # persistent BlockedStatus, but this charm handles events atomically rather than holistically. + # This means that skipping events will result in unexpected issues, so if we halt the charm here we must + # ensure the charm processes all backlogged events on resumption in the original order. Rather than do that + # and risk losing an event, we simply warn the user and continue functioning as best as possible. The charm + # will operate correctly except that it will not publish ingress urls to related applications, instead + # leaving the ingress relation data empty and logging an error. + # If we refactor this charm to handle events holistically (and thus we can skip events without issue), we + # should refactor this validation truly halt the charm. + # If we refactor this charm to use collect_unit_status, we could raise a persistent BlockedStatus message when + # this configuration is invalid. + self._validate_config() + self._stored.set_default( config_hash=None, ) @@ -174,6 +190,20 @@ def __init__(self, *args): charm=self, external_host=self._external_host, scheme=self._scheme # type: ignore ) + self._topology = JujuTopology.from_charm(self) + + # tracing integration + self._charm_tracing = TracingEndpointRequirer( + self, relation_name="charm-tracing", protocols=["otlp_http"] + ) + self._workload_tracing = TracingEndpointRequirer( + self, relation_name="workload-tracing", protocols=["jaeger_thrift_http"] + ) + + self.charm_tracing_endpoint, self.server_cert = charm_tracing_config( + self._charm_tracing, SERVER_CERT_PATH + ) + self.traefik = Traefik( container=self.container, routing_mode=self._routing_mode, @@ -182,6 +212,12 @@ def __init__(self, *args): experimental_forward_auth_enabled=self._is_forward_auth_enabled, traefik_route_static_configs=self._traefik_route_static_configs(), basic_auth_user=self._basic_auth_user, + topology=self._topology, + tracing_endpoint=( + self._workload_tracing.get_endpoint("jaeger_thrift_http") + if self._is_workload_tracing_ready() + else None + ), ) self.service_patch = KubernetesServicePatch( @@ -201,8 +237,6 @@ def __init__(self, *args): ], ) # Observability integrations - # tracing integration - self._tracing = TracingEndpointRequirer(self, protocols=["otlp_http"]) # Provide grafana dashboards over a relation interface # dashboard to use: https://grafana.com/grafana/dashboards/4475-traefik/ @@ -231,12 +265,12 @@ def __init__(self, *args): # TODO update init params once auto-renew is implemented # https://github.com/canonical/tls-certificates-interface/issues/24 observe( - self._tracing.on.endpoint_changed, # type: ignore - self._on_tracing_endpoint_changed, + self._workload_tracing.on.endpoint_changed, # type: ignore + self._on_workload_tracing_endpoint_changed, ) observe( - self._tracing.on.endpoint_removed, # type: ignore - self._on_tracing_endpoint_removed, + self._workload_tracing.on.endpoint_removed, # type: ignore + self._on_workload_tracing_endpoint_removed, ) observe(self.on.traefik_pebble_ready, self._on_traefik_pebble_ready) # type: ignore @@ -383,20 +417,6 @@ def _on_recv_ca_cert_removed( ): self.traefik.remove_cas([event.relation_id]) - @property - def charm_tracing_endpoint(self) -> Optional[str]: - """Otlp http endpoint for charm instrumentation.""" - if self._tracing.is_ready(): - return self._tracing.get_endpoint("otlp_http") - return None - - @property - def server_cert(self) -> Optional[str]: - """Server certificate path for tls tracing.""" - if self._is_tls_enabled(): - return SERVER_CERT_PATH - return None - def _is_tls_enabled(self) -> bool: """Return True if TLS is enabled.""" if self.cert.enabled: @@ -409,30 +429,17 @@ def _is_tls_enabled(self) -> bool: return True return False - def _is_tracing_enabled(self) -> bool: - """Return True if tracing is enabled.""" - if not self._tracing.is_ready(): - return False - return True + def _on_workload_tracing_endpoint_removed(self, _) -> None: + self._update_config_if_changed() - def _on_tracing_endpoint_removed(self, event) -> None: - if not self.container.can_connect(): - # this probably means we're being torn down, so we don't really need to - # clear anything up. We could defer, but again, we're being torn down and the unit db - # will - return - self.traefik.delete_tracing_config() + def _on_workload_tracing_endpoint_changed(self, _) -> None: + self._update_config_if_changed() - def _on_tracing_endpoint_changed(self, event) -> None: - # On slow machines, this event may come up before pebble is ready - if not self.container.can_connect(): - event.defer() - return - - if not self._tracing.is_ready(): - self.traefik.delete_tracing_config() - - self._configure_tracing() + def _is_workload_tracing_ready(self) -> bool: + """Return True if workload tracing is enabled and ready.""" + if not self._workload_tracing.is_ready(): + return False + return True def _on_cert_changed(self, event) -> None: # On slow machines, this event may come up before pebble is ready @@ -522,25 +529,6 @@ def _tcp_entrypoints(self): def _configure_traefik(self): self.traefik.configure() - self._configure_tracing() - - def _configure_tracing(self): - # wokeignore:rule=master - # ref: https://doc.traefik.io/traefik/master/observability/tracing/opentelemetry/ - if not self._is_tracing_enabled(): - logger.info("tracing not enabled: skipping tracing config") - return - - if endpoint := self._tracing.get_endpoint("otlp_http"): - grpc = False - else: - logger.error( - "tracing integration is active but none of the " - "protocols traefik supports is available." - ) - return - - self.traefik.update_tracing_configuration(endpoint, grpc=grpc) def _on_traefik_pebble_ready(self, _: PebbleReadyEvent): # If the Traefik container comes up, e.g., after a pod churn, we @@ -596,8 +584,10 @@ def _config_hash(self) -> int: def _on_config_changed(self, _: ConfigChangedEvent): """Handle the ops.ConfigChanged event.""" - # that we're processing a config-changed event, doesn't necessarily mean that our config has changed (duh!) + self._update_config_if_changed() + def _update_config_if_changed(self): + # that we're processing a config-changed event, doesn't necessarily mean that our config has changed (duh!) # If the config hash has changed since we last calculated it, we need to # recompute our state from scratch, based on all data sent over the relations and all configs new_config_hash = self._config_hash @@ -861,60 +851,47 @@ def _provide_routed_ingress(self, relation: Relation): self._update_dynamic_config_route(relation, dct) def _update_dynamic_config_route(self, relation: Relation, config: dict): - if "http" in config.keys(): - route_config = config["http"].get("routers", {}) - # we want to generate and add a new router with TLS config for each routed path. - # as we mutate the dict, we need to work on a copy - for router_name in route_config.copy().keys(): - route_rule = route_config.get(router_name, {}).get("rule", "") - service_name = route_config.get(router_name, {}).get("service", "") - entrypoints = route_config.get(router_name, {}).get("entryPoints", []) - if len(entrypoints) > 0: - # if entrypoint exists, we check if it's a custom entrypoint to pass it to generated TLS config - entrypoint = entrypoints[0] if entrypoints[0] != "web" else None - else: - entrypoint = None + def _process_routes(route_config, protocol): + for router_name in list(route_config.keys()): # Work on a copy of the keys + router_details = route_config[router_name] + route_rule = router_details.get("rule", "") + service_name = router_details.get("service", "") + entrypoints = router_details.get("entryPoints", []) + tls_config = router_details.get("tls", {}) + + # Skip generating new routes if passthrough is True + if tls_config.get("passthrough", False): + logger.debug( + f"Skipping TLS generation for {protocol} router {router_name} (passthrough True)." + ) + continue + + entrypoint = entrypoints[0] if entrypoints else None + if protocol == "http" and entrypoint == "web": + entrypoint = None # Ignore "web" entrypoint for HTTP if not all([router_name, route_rule, service_name]): - logger.debug("Not enough information to generate a TLS config!") - else: - config["http"]["routers"].update( - self.traefik.generate_tls_config_for_route( - router_name, - route_rule, - service_name, - # we're behind an is_ready guard, so this is guaranteed not to raise - self.external_host, - entrypoint, - ) + logger.debug( + f"Not enough information to generate a TLS config for {protocol} router {router_name}!" ) - if "tcp" in config.keys(): - route_config = config["tcp"].get("routers", {}) - # we want to generate and add a new router with TLS config for each routed path. - # as we mutate the dict, we need to work on a copy - for router_name in route_config.copy().keys(): - route_rule = route_config.get(router_name, {}).get("rule", "") - service_name = route_config.get(router_name, {}).get("service", "") - entrypoints = route_config.get(router_name, {}).get("entryPoints", []) - if len(entrypoints) > 0: - # for grpc, all entrypoints are custom - entrypoint = entrypoints[0] - else: - entrypoint = None + continue - if not all([router_name, route_rule, service_name]): - logger.debug("Not enough information to generate a TLS config!") - else: - config["tcp"]["routers"].update( - self.traefik.generate_tls_config_for_route( - router_name, - route_rule, - service_name, - # we're behind an is_ready guard, so this is guaranteed not to raise - self.external_host, - entrypoint, - ) + config[protocol]["routers"].update( + self.traefik.generate_tls_config_for_route( + router_name, + route_rule, + service_name, + self.external_host, + entrypoint, ) + ) + + if "http" in config: + _process_routes(config["http"].get("routers", {}), protocol="http") + + if "tcp" in config: + _process_routes(config["tcp"].get("routers", {}), protocol="tcp") + self._push_configurations(relation, config) def _provide_ingress( @@ -1229,13 +1206,60 @@ def server_cert_sans_dns(self) -> List[str]: # If all else fails, we'd rather use the bare IP return [target] if target else [] + def _validate_config(self): + """Validate the charm configuration, emitting warning messages on misconfigurations. + + In scope for this validation is: + * validating the combination of external_hostname and routing_mode + """ + # FIXME: This will false positive in cases where the LoadBalancer provides an external host rather than an IP. + # The warning will occur, but the charm will function normally. We could better validate the LoadBalancer if + # we want to avoid this, but it probably isn't worth the effort until someone notices. + invalid_hostname_and_routing_mode_message = ( + "Likely configuration error: When using routing_mode=='subdomain', external_hostname should be " + "set. This is because when external_hostname is unset, Traefik uses the LoadBalancer's address as the " + "hostname for all provided URLS and that hostname is typically an IP address. This leads to invalid urls " + "like `model-app.1.2.3.4`. The charm will continue to operate as currently set, but will not provide urls" + " to any related applications if they would be invalid." + ) + + if self.config.get("routing_mode", "") == "subdomain": + # subdomain mode can only be used if an external_hostname is set and is not an IP address + external_hostname = self.config.get("external_hostname", "") + if not isinstance(external_hostname, str) or not is_valid_hostname(external_hostname): + logger.warning(invalid_hostname_and_routing_mode_message) + + +def is_valid_hostname(hostname: str) -> bool: + """Check if a hostname is valid. + + Modified from https://stackoverflow.com/a/33214423 + """ + if len(hostname) == 0: + return False + if hostname[-1] == ".": + # strip exactly one dot from the right, if present + hostname = hostname[:-1] + if len(hostname) > 253: + return False + + labels = hostname.split(".") + + # the TLD must be not all-numeric + if re.match(r"[0-9]+$", labels[-1]): + return False + + allowed = re.compile(r"(?!-)[a-z0-9-]{1,63}(? Optional[str]: client = Client() # type: ignore try: traefik_service = client.get(Service, name=service_name, namespace=namespace) - except ApiError: + except ApiError as e: + logger.warning(f"Got ApiError when trying to get Loadbalancer status: {e}") return None if not (status := traefik_service.status): # type: ignore diff --git a/src/prometheus_alert_rules/unit_unavailable.rule b/src/prometheus_alert_rules/unit_unavailable.rule index efbe9bd6..51c91636 100644 --- a/src/prometheus_alert_rules/unit_unavailable.rule +++ b/src/prometheus_alert_rules/unit_unavailable.rule @@ -1,6 +1,6 @@ alert: TraefikIngressUnitIsUnavailable expr: up < 1 -for: 0m +for: 5m labels: severity: critical annotations: diff --git a/src/traefik.py b/src/traefik.py index 9efbdc7d..53b1f836 100644 --- a/src/traefik.py +++ b/src/traefik.py @@ -3,7 +3,6 @@ # See LICENSE file for licensing details. """Traefik workload interface.""" -import contextlib import dataclasses import enum import logging @@ -16,6 +15,7 @@ import yaml from charms.oathkeeper.v0.forward_auth import ForwardAuthConfig +from cosl import JujuTopology from ops import Container from ops.pebble import LayerDict, PathError @@ -94,6 +94,7 @@ class Traefik: _layer_name = "traefik" service_name = "traefik" + _tracing_endpoint = None def __init__( self, @@ -104,7 +105,9 @@ def __init__( experimental_forward_auth_enabled: bool, tcp_entrypoints: Dict[str, int], traefik_route_static_configs: Iterable[Dict[str, Any]], + topology: JujuTopology, basic_auth_user: Optional[str] = None, + tracing_endpoint: Optional[str] = None, ): self._container = container self._tcp_entrypoints = tcp_entrypoints @@ -112,7 +115,9 @@ def __init__( self._routing_mode = routing_mode self._tls_enabled = tls_enabled self._experimental_forward_auth_enabled = experimental_forward_auth_enabled + self._topology = topology self._basic_auth_user = basic_auth_user + self._tracing_endpoint = tracing_endpoint @property def scrape_jobs(self) -> list: @@ -280,6 +285,18 @@ def generate_static_config(self, _raise: bool = False) -> Dict[str, Any]: }, } + if self._tracing_endpoint: + # ref: https://github.com/traefik/traefik/blob/v2.11/docs/content/observability/tracing/jaeger.md + # TODO once we bump to Traefik v3, Jaeger needs to be replaced with otlp and config needs to be updated + # see https://doc.traefik.io/traefik/observability/tracing/opentelemetry/ for more reference + static_config["tracing"] = { + "jaeger": { + "collector": { + "endpoint": f"{self._tracing_endpoint}/api/traces?format=jaeger.thrift" + }, + } + } + # we attempt to put together the base config with whatever the user passed via traefik_route. # in case there are conflicts between the base config and some route, or between the routes themselves, # we'll be forced to bail out. @@ -308,27 +325,6 @@ def push_static_config(self, config: Dict[str, Any]): # TODO Use the Traefik user and group? self._container.push(STATIC_CONFIG_PATH, config_yaml, make_dirs=True) - # wokeignore:rule=master - # ref: https://doc.traefik.io/traefik/master/observability/tracing/opentelemetry/ - def update_tracing_configuration(self, endpoint: str, grpc: bool): - """Push yaml config with opentelemetry configuration.""" - config = yaml.safe_dump( - { - "tracing": { - "openTelemetry": { - "address": endpoint, - **({"grpc": {}} if grpc else {}), - # todo: we have an option to use CA or to use CERT+KEY (available with mtls) authentication. - # when we have mTLS, consider this again. - **({"ca": CA_CERT_PATH} if self._tls_enabled else {"insecure": True}), - } - } - } - ) - logger.debug(f"dumping tracing config to {DYNAMIC_TRACING_PATH}") - - self._container.push(DYNAMIC_TRACING_PATH, config, make_dirs=True) - def get_per_unit_http_config( self, *, @@ -629,6 +625,12 @@ def is_ready(self): def restart(self): """Restart the pebble service.""" + environment = {} + if self._tracing_endpoint: + environment = { + "JAEGER_TAGS": f"juju_application={self._topology.application},juju_model={self._topology.model},juju_model_uuid={self._topology.model_uuid},juju_unit={self._topology.unit},juju_charm={self._topology.charm_name}" + } + layer = { "summary": "Traefik layer", "description": "Pebble config layer for Traefik", @@ -639,15 +641,16 @@ def restart(self): # trick to drop the logs to a file but also keep them available in the pod logs "command": '/bin/sh -c "{} | tee {}"'.format(BIN_PATH, LOG_PATH), "startup": "enabled", + "environment": environment, }, }, } - if not self.is_ready: - self._container.add_layer(self._layer_name, cast(LayerDict, layer), combine=True) - logger.debug(f"replanning {self.service_name!r} after a service update") - self._container.replan() - else: + self._container.add_layer(self._layer_name, cast(LayerDict, layer), combine=True) + logger.debug(f"replanning {self.service_name!r} after a service update") + self._container.replan() + + if self.is_ready: logger.debug(f"restarting {self.service_name!r}") self._container.restart(self.service_name) @@ -675,11 +678,6 @@ def add_dynamic_config(self, file_name: str, config: str): logger.debug("Updated dynamic configuration file: %s", file_name) - def delete_tracing_config(self): - """Delete the tracing config yaml.""" - with contextlib.suppress(PathError): - self._container.remove_path(DYNAMIC_TRACING_PATH) - @property def version(self): """Traefik workload version.""" diff --git a/terraform/README.md b/terraform/README.md new file mode 100644 index 00000000..42b1854d --- /dev/null +++ b/terraform/README.md @@ -0,0 +1,4 @@ +# Terraform module for traefik-k8s + + +This module is in experimental status. It is not yet ready for production. diff --git a/terraform/main.tf b/terraform/main.tf new file mode 100644 index 00000000..6b2f6e5a --- /dev/null +++ b/terraform/main.tf @@ -0,0 +1,12 @@ +resource "juju_application" "traefik" { + name = var.app_name + model = var.model_name + trust = true + charm { + name = "traefik-k8s" + channel = var.channel + revision = var.revision + } + units = var.units + config = var.config +} \ No newline at end of file diff --git a/terraform/outputs.tf b/terraform/outputs.tf new file mode 100644 index 00000000..b439439e --- /dev/null +++ b/terraform/outputs.tf @@ -0,0 +1,19 @@ +output "app_name" { + value = juju_application.traefik.name +} + +output "endpoints" { + value = { + # Requires + certificates = "certificates", + experimental_forward_auth = "experimental-forward-auth", + logging = "logging", + tracing = "tracing", + # Provides + grafana_dashboard = "grafana-dashboard", + ingress = "ingress", + ingress_per_unit = "ingress-per-unit", + metrics_endpoint = "metrics-endpoint", + traefik_route = "traefik-route", + } +} diff --git a/terraform/variables.tf b/terraform/variables.tf new file mode 100644 index 00000000..ac8d649b --- /dev/null +++ b/terraform/variables.tf @@ -0,0 +1,42 @@ +variable "app_name" { + description = "Application name" + type = string +} + +variable "channel" { + description = "Charm channel" + type = string + default = "latest/stable" +} + +variable "config" { + description = "Config options as in the ones we pass in juju config" + type = map(string) + default = {} +} + +# We use constraints to set AntiAffinity in K8s +# https://discourse.charmhub.io/t/pod-priority-and-affinity-in-juju-charms/4091/13?u=jose +variable "constraints" { + description = "Constraints to be applied" + type = string + default = "" +} + +variable "model_name" { + description = "Model name" + type = string +} + +variable "revision" { + description = "Charm revision" + type = number + nullable = true + default = null +} + +variable "units" { + description = "Number of units" + type = number + default = 1 +} \ No newline at end of file diff --git a/terraform/versions.tf b/terraform/versions.tf new file mode 100644 index 00000000..77b64403 --- /dev/null +++ b/terraform/versions.tf @@ -0,0 +1,9 @@ +terraform { + required_version = ">= 1.5" + required_providers { + juju = { + source = "juju/juju" + version = "~> 0.14" + } + } +} \ No newline at end of file diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 8f70dd7e..4087bb23 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -2,11 +2,17 @@ # See LICENSE file for licensing details. import asyncio +import json import logging from typing import Optional +import requests import sh +from juju.application import Application +from juju.unit import Unit +from minio import Minio from pytest_operator.plugin import OpsTest +from tenacity import retry, stop_after_attempt, wait_exponential logger = logging.getLogger(__name__) @@ -94,3 +100,102 @@ def dequote(s: str): if isinstance(s, str) and s.startswith('"') and s.endswith('"'): s = s[1:-1] return s + + +async def deploy_and_configure_minio(ops_test: OpsTest) -> None: + """Deploy and set up minio and s3-integrator needed for s3-like storage backend in the HA charms.""" + config = { + "access-key": "accesskey", + "secret-key": "secretkey", + } + await ops_test.model.deploy("minio", channel="edge", trust=True, config=config) + await ops_test.model.wait_for_idle(apps=["minio"], status="active", timeout=2000) + minio_addr = await get_address(ops_test, "minio", 0) + + mc_client = Minio( + f"{minio_addr}:9000", + access_key="accesskey", + secret_key="secretkey", + secure=False, + ) + + # create tempo bucket + found = mc_client.bucket_exists("tempo") + if not found: + mc_client.make_bucket("tempo") + + # configure s3-integrator + s3_integrator_app: Application = ops_test.model.applications["s3-integrator"] + s3_integrator_leader: Unit = s3_integrator_app.units[0] + + await s3_integrator_app.set_config( + { + "endpoint": f"minio-0.minio-endpoints.{ops_test.model.name}.svc.cluster.local:9000", + "bucket": "tempo", + } + ) + + action = await s3_integrator_leader.run_action("sync-s3-credentials", **config) + action_result = await action.wait() + assert action_result.status == "completed" + + +async def deploy_tempo_cluster(ops_test: OpsTest): + """Deploys tempo in its HA version together with minio and s3-integrator.""" + tempo_app = "tempo" + worker_app = "tempo-worker" + tempo_worker_charm_url, worker_channel = "tempo-worker-k8s", "edge" + tempo_coordinator_charm_url, coordinator_channel = "tempo-coordinator-k8s", "edge" + await ops_test.model.deploy( + tempo_worker_charm_url, application_name=worker_app, channel=worker_channel, trust=True + ) + await ops_test.model.deploy( + tempo_coordinator_charm_url, + application_name=tempo_app, + channel=coordinator_channel, + trust=True, + ) + await ops_test.model.deploy("s3-integrator", channel="edge") + + await ops_test.model.integrate(tempo_app + ":s3", "s3-integrator" + ":s3-credentials") + await ops_test.model.integrate(tempo_app + ":tempo-cluster", worker_app + ":tempo-cluster") + + await deploy_and_configure_minio(ops_test) + async with ops_test.fast_forward(): + await ops_test.model.wait_for_idle( + apps=[tempo_app, worker_app, "s3-integrator"], + status="active", + timeout=2000, + idle_period=30, + ) + + +def get_traces(tempo_host: str, service_name="tracegen-otlp_http", tls=True): + """Get traces directly from Tempo REST API.""" + url = f"{'https' if tls else 'http'}://{tempo_host}:3200/api/search?tags=service.name={service_name}" + req = requests.get( + url, + verify=False, + ) + assert req.status_code == 200 + traces = json.loads(req.text)["traces"] + return traces + + +@retry(stop=stop_after_attempt(15), wait=wait_exponential(multiplier=1, min=4, max=10)) +async def get_traces_patiently(tempo_host, service_name="tracegen-otlp_http", tls=True): + """Get traces directly from Tempo REST API, but also try multiple times. + + Useful for cases when Tempo might not return the traces immediately (its API is known for returning data in + random order). + """ + traces = get_traces(tempo_host, service_name=service_name, tls=tls) + assert len(traces) > 0 + return traces + + +async def get_application_ip(ops_test: OpsTest, app_name: str) -> str: + """Get the application IP address.""" + status = await ops_test.model.get_status() + app = status["applications"][app_name] + return app.public_address diff --git a/tests/integration/test_workload_tracing.py b/tests/integration/test_workload_tracing.py new file mode 100644 index 00000000..a515bc2c --- /dev/null +++ b/tests/integration/test_workload_tracing.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +from pathlib import Path + +import pytest +import yaml +from helpers import deploy_tempo_cluster, get_application_ip, get_traces_patiently + +logger = logging.getLogger(__name__) + +METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) +APP_NAME = "traefik" +TEMPO_APP_NAME = "tempo" +RESOURCES = { + "traefik-image": METADATA["resources"]["traefik-image"]["upstream-source"], +} + + +async def test_setup_env(ops_test): + await ops_test.model.set_config({"logging-config": "=WARNING; unit=DEBUG"}) + + +@pytest.mark.abort_on_fail +async def test_workload_tracing_is_present(ops_test, traefik_charm): + logger.info("deploying tempo cluster") + await deploy_tempo_cluster(ops_test) + + logger.info("deploying local charm") + await ops_test.model.deploy( + traefik_charm, resources=RESOURCES, application_name=APP_NAME, trust=True + ) + await ops_test.model.wait_for_idle( + apps=[APP_NAME], status="active", timeout=300, wait_for_exact_units=1 + ) + + # we relate _only_ workload tracing not to confuse with charm traces + await ops_test.model.add_relation( + "{}:workload-tracing".format(APP_NAME), "{}:tracing".format(TEMPO_APP_NAME) + ) + # but we also relate tempo to route through traefik so there's any traffic to generate traces from + await ops_test.model.add_relation( + "{}:ingress".format(TEMPO_APP_NAME), "{}:traefik-route".format(APP_NAME) + ) + await ops_test.model.wait_for_idle(apps=[APP_NAME], status="active") + + # Verify workload traces are ingested into Tempo + assert await get_traces_patiently( + await get_application_ip(ops_test, TEMPO_APP_NAME), + service_name=f"{APP_NAME}", + tls=False, + ) diff --git a/tests/integration/testers/forward-auth/charmcraft.yaml b/tests/integration/testers/forward-auth/charmcraft.yaml index e439b3e7..1fdadc99 100644 --- a/tests/integration/testers/forward-auth/charmcraft.yaml +++ b/tests/integration/testers/forward-auth/charmcraft.yaml @@ -13,6 +13,6 @@ parts: charm-binary-python-packages: - jsonschema - ops - - pydantic-core + - pydantic>=2 build-packages: - git diff --git a/tests/integration/testers/ipa/charmcraft.yaml b/tests/integration/testers/ipa/charmcraft.yaml index c864b362..b4f11527 100644 --- a/tests/integration/testers/ipa/charmcraft.yaml +++ b/tests/integration/testers/ipa/charmcraft.yaml @@ -12,6 +12,6 @@ parts: charm: charm-binary-python-packages: - ops - - pydantic-core + - pydantic>=2 build-packages: - git diff --git a/tests/integration/testers/ipu/charmcraft.yaml b/tests/integration/testers/ipu/charmcraft.yaml index c864b362..b4f11527 100644 --- a/tests/integration/testers/ipu/charmcraft.yaml +++ b/tests/integration/testers/ipu/charmcraft.yaml @@ -12,6 +12,6 @@ parts: charm: charm-binary-python-packages: - ops - - pydantic-core + - pydantic>=2 build-packages: - git diff --git a/tests/integration/testers/route/charmcraft.yaml b/tests/integration/testers/route/charmcraft.yaml index c864b362..b4f11527 100644 --- a/tests/integration/testers/route/charmcraft.yaml +++ b/tests/integration/testers/route/charmcraft.yaml @@ -12,6 +12,6 @@ parts: charm: charm-binary-python-packages: - ops - - pydantic-core + - pydantic>=2 build-packages: - git diff --git a/tests/integration/testers/tcp/charmcraft.yaml b/tests/integration/testers/tcp/charmcraft.yaml index 06980aca..65dafb1c 100644 --- a/tests/integration/testers/tcp/charmcraft.yaml +++ b/tests/integration/testers/tcp/charmcraft.yaml @@ -13,6 +13,6 @@ parts: charm-binary-python-packages: - ops - lightkube - - pydantic-core + - pydantic>=2 build-packages: - git diff --git a/tests/scenario/conftest.py b/tests/scenario/conftest.py index 1bc7fce5..51fc1db3 100644 --- a/tests/scenario/conftest.py +++ b/tests/scenario/conftest.py @@ -1,4 +1,4 @@ -from unittest.mock import PropertyMock, patch +from unittest.mock import patch import pytest from ops import pebble @@ -6,7 +6,7 @@ from charm import TraefikIngressCharm -MOCK_EXTERNAL_HOSTNAME = "testhostname" +MOCK_LB_ADDRESS = "1.2.3.4" @pytest.fixture @@ -14,8 +14,8 @@ def traefik_charm(): with patch("charm.KubernetesServicePatch"): with patch("lightkube.core.client.GenericSyncClient"): with patch( - "charm.TraefikIngressCharm._external_host", - PropertyMock(return_value=MOCK_EXTERNAL_HOSTNAME), + "charm._get_loadbalancer_status", + return_value=MOCK_LB_ADDRESS, ): yield TraefikIngressCharm @@ -48,6 +48,7 @@ def traefik_container(tmp_path): ) opt = Mount("/opt/", tmp_path) + etc_traefik = Mount("/etc/traefik/", tmp_path) return Container( name="traefik", @@ -59,5 +60,5 @@ def traefik_container(tmp_path): ("/usr/bin/traefik", "version"): ExecOutput(stdout="42.42"), }, service_status={"traefik": pebble.ServiceStatus.ACTIVE}, - mounts={"opt": opt}, + mounts={"opt": opt, "/etc/traefik": etc_traefik}, ) diff --git a/tests/scenario/test_ingress_per_app.py b/tests/scenario/test_ingress_per_app.py index 89ae9025..878fef1d 100644 --- a/tests/scenario/test_ingress_per_app.py +++ b/tests/scenario/test_ingress_per_app.py @@ -15,9 +15,10 @@ IngressRequirerUnitData, ) from ops import CharmBase, Framework -from scenario import Context, Mount, Relation, State +from scenario import Context, Model, Mount, Relation, State from tests.scenario._utils import create_ingress_relation +from tests.scenario.conftest import MOCK_LB_ADDRESS @pytest.mark.parametrize( @@ -301,3 +302,53 @@ def test_proxied_endpoints( assert charm.ingress_per_appv1.proxied_endpoints["remote"]["url"] assert charm.ingress_per_appv2.proxied_endpoints["remote"]["url"] assert charm.ingress_per_unit.proxied_endpoints["remote/0"]["url"] + + +MODEL_NAME = "test-model" +UNIT_NAME = "nms" + + +@pytest.mark.parametrize( + "external_hostname, routing_mode, expected_local_app_data", + [ + # Valid configurations + ( + "foo.com", + "path", + {"ingress": json.dumps({"url": f"http://foo.com/{MODEL_NAME}-{UNIT_NAME}"})}, + ), + ( + "foo.com", + "subdomain", + {"ingress": json.dumps({"url": f"http://{MODEL_NAME}-{UNIT_NAME}.foo.com/"})}, + ), + ( + "", + "path", + {"ingress": json.dumps({"url": f"http://{MOCK_LB_ADDRESS}/{MODEL_NAME}-{UNIT_NAME}"})}, + ), + # Invalid configuration, resulting in empty local_app_data + ("", "subdomain", {}), + ], +) +def test_ingress_with_hostname_and_routing_mode( + external_hostname, + routing_mode, + expected_local_app_data, + traefik_ctx, + traefik_container, + tmp_path, +): + """Tests that the ingress relation provides a URL for valid external hostname and routing mode combinations.""" + ipa = create_ingress_relation(strip_prefix=True, unit_name=UNIT_NAME) + state = State( + model=Model(name=MODEL_NAME), + config={"routing_mode": routing_mode, "external_hostname": external_hostname}, + containers=[traefik_container], + relations=[ipa], + leader=True, + ) + + # event = getattr(ipa, f"changed_event") + state_out = traefik_ctx.run("config-changed", state) + assert state_out.relations[0].local_app_data == expected_local_app_data diff --git a/tests/scenario/test_ingress_per_unit.py b/tests/scenario/test_ingress_per_unit.py index 0ab1d7f4..34f65140 100644 --- a/tests/scenario/test_ingress_per_unit.py +++ b/tests/scenario/test_ingress_per_unit.py @@ -1,7 +1,7 @@ import pytest from scenario import Relation, State -from tests.scenario.conftest import MOCK_EXTERNAL_HOSTNAME +from tests.scenario.conftest import MOCK_LB_ADDRESS @pytest.mark.parametrize("leader", (True, False)) @@ -48,12 +48,12 @@ def test_ingress_unit_provider_request_response( assert not local_app_data else: if mode == "tcp": - expected_url = f"{MOCK_EXTERNAL_HOSTNAME}:{port}" + expected_url = f"{MOCK_LB_ADDRESS}:{port}" else: prefix = f"{model}-{remote_unit_name.replace('/', '-')}" if routing_mode == "path": - expected_url = f"http://{MOCK_EXTERNAL_HOSTNAME}/{prefix}" + expected_url = f"http://{MOCK_LB_ADDRESS}/{prefix}" else: - expected_url = f"http://{prefix}.{MOCK_EXTERNAL_HOSTNAME}/" + expected_url = f"http://{prefix}.{MOCK_LB_ADDRESS}/" assert local_app_data == {"ingress": f"{remote_unit_name}:\n url: {expected_url}\n"} diff --git a/tests/scenario/test_tracing_integration.py b/tests/scenario/test_tracing_integration.py index e37ecbca..be9a8ece 100644 --- a/tests/scenario/test_tracing_integration.py +++ b/tests/scenario/test_tracing_integration.py @@ -3,15 +3,15 @@ import opentelemetry import pytest import yaml -from charms.tempo_k8s.v1.charm_tracing import charm_tracing_disabled -from charms.tempo_k8s.v2.tracing import ProtocolType, Receiver, TracingProviderAppData +from charms.tempo_coordinator_k8s.v0.charm_tracing import charm_tracing_disabled +from charms.tempo_coordinator_k8s.v0.tracing import ProtocolType, Receiver, TracingProviderAppData from scenario import Relation, State -from traefik import CA_CERT_PATH, DYNAMIC_TRACING_PATH +from traefik import STATIC_CONFIG_PATH @pytest.fixture -def tracing_relation(): +def charm_tracing_relation(): db = {} TracingProviderAppData( receivers=[ @@ -21,14 +21,29 @@ def tracing_relation(): ) ] ).dump(db) - tracing = Relation("tracing", remote_app_data=db) + tracing = Relation("charm-tracing", remote_app_data=db) return tracing -def test_charm_trace_collection(traefik_ctx, traefik_container, caplog, tracing_relation): +@pytest.fixture +def workload_tracing_relation(): + workload_db = {} + TracingProviderAppData( + receivers=[ + Receiver( + url="http://foo.com:14238", + protocol=ProtocolType(name="jaeger_thrift_http", type="http"), + ) + ] + ).dump(workload_db) + workload_tracing = Relation("workload-tracing", remote_app_data=workload_db) + return workload_tracing + + +def test_charm_trace_collection(traefik_ctx, traefik_container, caplog, charm_tracing_relation): # GIVEN the presence of a tracing relation - state_in = State(relations=[tracing_relation], containers=[traefik_container]) + state_in = State(relations=[charm_tracing_relation], containers=[traefik_container]) # THEN we get some traces with patch( @@ -36,7 +51,7 @@ def test_charm_trace_collection(traefik_ctx, traefik_container, caplog, tracing_ ) as f: f.return_value = opentelemetry.sdk.trace.export.SpanExportResult.SUCCESS # WHEN traefik receives - traefik_ctx.run(tracing_relation.changed_event, state_in) + traefik_ctx.run(charm_tracing_relation.changed_event, state_in) # assert "Setting up span exporter to endpoint: foo.com:81" in caplog.text # assert "Starting root trace with id=" in caplog.text @@ -46,48 +61,20 @@ def test_charm_trace_collection(traefik_ctx, traefik_container, caplog, tracing_ assert span.resource.attributes["charm_type"] == "TraefikIngressCharm" -def test_traefik_tracing_config(traefik_ctx, traefik_container, tracing_relation): - state_in = State(relations=[tracing_relation], containers=[traefik_container]) +def test_traefik_tracing_config(traefik_ctx, traefik_container, workload_tracing_relation): + state_in = State(relations=[workload_tracing_relation], containers=[traefik_container]) with charm_tracing_disabled(): - traefik_ctx.run(tracing_relation.changed_event, state_in) + traefik_ctx.run(workload_tracing_relation.changed_event, state_in) tracing_cfg = ( - traefik_container.get_filesystem(traefik_ctx) - .joinpath(DYNAMIC_TRACING_PATH[1:]) - .read_text() + traefik_container.get_filesystem(traefik_ctx).joinpath(STATIC_CONFIG_PATH[1:]).read_text() ) cfg = yaml.safe_load(tracing_cfg) - assert cfg == { - "tracing": { - "openTelemetry": { - "address": "http://foo.com:81", - "insecure": True, - } - } - } - - -def test_traefik_tracing_config_with_tls(traefik_ctx, traefik_container, tracing_relation): - state_in = State(relations=[tracing_relation], containers=[traefik_container]) - - with patch("charm.TraefikIngressCharm._is_tls_enabled") as tls_enabled: - tls_enabled.return_value = "True" - - with charm_tracing_disabled(): - traefik_ctx.run(tracing_relation.changed_event, state_in) - - tracing_cfg = ( - traefik_container.get_filesystem(traefik_ctx) - .joinpath(DYNAMIC_TRACING_PATH[1:]) - .read_text() - ) - cfg = yaml.safe_load(tracing_cfg) - assert cfg == { - "tracing": { - "openTelemetry": { - "address": "http://foo.com:81", - "ca": CA_CERT_PATH, + assert cfg["tracing"] == { + "jaeger": { + "collector": { + "endpoint": "http://foo.com:14238/api/traces?format=jaeger.thrift", } } } @@ -95,42 +82,48 @@ def test_traefik_tracing_config_with_tls(traefik_ctx, traefik_container, tracing @pytest.mark.parametrize("was_present_before", (True, False)) def test_traefik_tracing_config_removed_if_relation_data_invalid( - traefik_ctx, traefik_container, tracing_relation, was_present_before + traefik_ctx, traefik_container, workload_tracing_relation, was_present_before ): if was_present_before: - dt_path = traefik_container.mounts["opt"].src.joinpath("traefik", "juju", "tracing.yaml") - dt_path.parent.mkdir(parents=True) + dt_path = traefik_container.mounts["/etc/traefik"].src.joinpath("traefik.yaml") + if not dt_path.parent.exists(): + dt_path.parent.mkdir(parents=True) dt_path.write_text("foo") state_in = State( - relations=[tracing_relation.replace(remote_app_data={"foo": "bar"})], + relations=[workload_tracing_relation.replace(remote_app_data={"foo": "bar"})], containers=[traefik_container], ) with charm_tracing_disabled(): - traefik_ctx.run(tracing_relation.changed_event, state_in) + traefik_ctx.run(workload_tracing_relation.changed_event, state_in) - # assert file is not there - assert ( - not traefik_container.get_filesystem(traefik_ctx).joinpath(DYNAMIC_TRACING_PATH).exists() + tracing_cfg = ( + traefik_container.get_filesystem(traefik_ctx).joinpath(STATIC_CONFIG_PATH[1:]).read_text() ) + cfg = yaml.safe_load(tracing_cfg) + # assert tracing config is removed + assert "tracing" not in cfg @pytest.mark.parametrize("was_present_before", (True, False)) def test_traefik_tracing_config_removed_on_relation_broken( - traefik_ctx, traefik_container, tracing_relation, was_present_before + traefik_ctx, traefik_container, workload_tracing_relation, was_present_before ): if was_present_before: - dt_path = traefik_container.mounts["opt"].src.joinpath("traefik", "juju", "tracing.yaml") - dt_path.parent.mkdir(parents=True) + dt_path = traefik_container.mounts["/etc/traefik"].src.joinpath("traefik.yaml") + if not dt_path.parent.exists(): + dt_path.parent.mkdir(parents=True) dt_path.write_text("foo") - state_in = State(relations=[tracing_relation], containers=[traefik_container]) + state_in = State(relations=[workload_tracing_relation], containers=[traefik_container]) with charm_tracing_disabled(): - traefik_ctx.run(tracing_relation.broken_event, state_in) + traefik_ctx.run(workload_tracing_relation.broken_event, state_in) - # assert file is not there - assert ( - not traefik_container.get_filesystem(traefik_ctx).joinpath(DYNAMIC_TRACING_PATH).exists() + tracing_cfg = ( + traefik_container.get_filesystem(traefik_ctx).joinpath(STATIC_CONFIG_PATH[1:]).read_text() ) + cfg = yaml.safe_load(tracing_cfg) + # assert tracing config is removed + assert "tracing" not in cfg diff --git a/tests/unit/test_route.py b/tests/unit/test_route.py index 38f80344..efdc843a 100644 --- a/tests/unit/test_route.py +++ b/tests/unit/test_route.py @@ -1,11 +1,13 @@ # Copyright 2022 Canonical Ltd. # See LICENSE file for licensing details. """Helpers for unit testing charms which use this library.""" +import uuid from unittest.mock import Mock, patch import ops import pytest import yaml +from cosl import JujuTopology from ops.testing import Harness from charm import TraefikIngressCharm @@ -33,6 +35,42 @@ } } +TCP_CONFIG_WITH_PASSTHROUGH = { + "tcp": { + "routers": { + "juju-foo-router": { + "entryPoints": ["websecure"], + "rule": "HostSNI(`*`)", + "service": "juju-foo-service", + "tls": {"passthrough": True}, # Passthrough enabled + } + }, + "services": { + "juju-foo-service": { + "loadBalancer": {"servers": [{"address": "foo.testmodel-endpoints.local:8080"}]} + } + }, + } +} + +HTTP_CONFIG_WITH_PASSTHROUGH = { + "http": { + "routers": { + "juju-foo-router": { + "entryPoints": ["web"], + "rule": "PathPrefix(`/path`)", + "service": "juju-foo-service", + "tls": {"passthrough": True}, # Passthrough enabled + } + }, + "services": { + "juju-foo-service": { + "loadBalancer": {"servers": [{"url": "http://foo.testmodel-endpoints.local:8080"}]} + } + }, + } +} + CONFIG_WITH_TLS = { "http": { "routers": { @@ -80,6 +118,17 @@ def harness() -> Harness[TraefikIngressCharm]: harness.cleanup() +@pytest.fixture(scope="function") +def topology(harness): + topology = JujuTopology( + model="model", + model_uuid=str(uuid.uuid4()), + application="app", + charm_name="charm", + ) + return topology + + @patch("charm.KubernetesServicePatch", lambda *_, **__: None) def initialize_and_setup_tr_relation(harness): harness.update_config({"external_hostname": "testhostname"}) @@ -146,7 +195,7 @@ def test_tls_is_added(harness: Harness[TraefikIngressCharm]): assert conf == CONFIG_WITH_TLS -def test_static_config(harness: Harness[TraefikIngressCharm]): +def test_static_config(harness: Harness[TraefikIngressCharm], topology: JujuTopology): tr_relation_id, relation = initialize_and_setup_tr_relation(harness) config = yaml.dump(CONFIG) static = yaml.safe_dump({"foo": "bar"}) @@ -171,6 +220,7 @@ def test_static_config(harness: Harness[TraefikIngressCharm]): tls_enabled=charm._is_tls_enabled(), experimental_forward_auth_enabled=charm._is_forward_auth_enabled, traefik_route_static_configs=charm._traefik_route_static_configs(), + topology=topology, ) charm.traefik_route.on.ready.emit(charm.model.get_relation("traefik-route")) @@ -189,7 +239,7 @@ def test_static_config(harness: Harness[TraefikIngressCharm]): assert yaml.safe_load(charm.container.pull(file).read()) == CONFIG_WITH_TLS -def test_static_config_broken(harness: Harness[TraefikIngressCharm]): +def test_static_config_broken(harness: Harness[TraefikIngressCharm], topology: JujuTopology): tr_relation_id, relation = initialize_and_setup_tr_relation(harness) config = yaml.dump(CONFIG) @@ -219,6 +269,7 @@ def test_static_config_broken(harness: Harness[TraefikIngressCharm]): tls_enabled=charm._is_tls_enabled(), experimental_forward_auth_enabled=charm._is_forward_auth_enabled, traefik_route_static_configs=charm._traefik_route_static_configs(), + topology=topology, ) # WHEN the charm receives a traefik-route ready event @@ -241,7 +292,9 @@ def test_static_config_broken(harness: Harness[TraefikIngressCharm]): assert yaml.safe_load(charm.container.pull(file).read()) == CONFIG_WITH_TLS -def test_static_config_partially_broken(harness: Harness[TraefikIngressCharm]): +def test_static_config_partially_broken( + harness: Harness[TraefikIngressCharm], topology: JujuTopology +): initialize_and_setup_tr_relation(harness) # IF we initialize Traefik with some specially crafted @@ -263,6 +316,7 @@ def test_static_config_partially_broken(harness: Harness[TraefikIngressCharm]): # GOOD: this one won't conflict with other entrypoints {"entryPoints": {"shondaland": {"address": ":6767"}}}, ], + topology=topology, ) # WHEN the charm receives a traefik-route ready event @@ -283,7 +337,9 @@ def test_static_config_partially_broken(harness: Harness[TraefikIngressCharm]): assert generated_config["foo"] == {"bar": "baz"} -def test_static_config_updates_tcp_entrypoints(harness: Harness[TraefikIngressCharm]): +def test_static_config_updates_tcp_entrypoints( + harness: Harness[TraefikIngressCharm], topology: JujuTopology +): tr_relation_id, relation = initialize_and_setup_tr_relation(harness) config = yaml.dump(CONFIG) static = yaml.safe_dump({"entryPoints": {"shondaland": {"address": ":6767"}}}) @@ -308,6 +364,7 @@ def test_static_config_updates_tcp_entrypoints(harness: Harness[TraefikIngressCh tls_enabled=charm._is_tls_enabled(), experimental_forward_auth_enabled=charm._is_forward_auth_enabled, traefik_route_static_configs=charm._traefik_route_static_configs(), + topology=topology, ) charm.traefik_route.on.ready.emit(charm.model.get_relation("traefik-route")) @@ -318,3 +375,51 @@ def test_static_config_updates_tcp_entrypoints(harness: Harness[TraefikIngressCh # AND that shows up in the service ports assert [p for p in charm._service_ports if p.port == 6767][0] + + +def test_tls_http_passthrough_no_tls_added(harness: Harness[TraefikIngressCharm]): + """Ensure no TLS configuration is generated for routes with tls.passthrough.""" + tr_relation_id, relation = initialize_and_setup_tr_relation(harness) + charm = harness.charm + + # Update relation with the passthrough configuration + config = yaml.dump(HTTP_CONFIG_WITH_PASSTHROUGH) + harness.update_relation_data(tr_relation_id, REMOTE_APP_NAME, {"config": config}) + + # Verify the relation is ready and the configuration is loaded + assert charm.traefik_route.is_ready(relation) + assert charm.traefik_route.get_config(relation) == config + + # Check the dynamic configuration written to the container + file = f"/opt/traefik/juju/juju_ingress_{relation.name}_{relation.id}_{relation.app.name}.yaml" + dynamic_config = yaml.safe_load(charm.container.pull(file).read()) + + # Ensure the passthrough configuration is preserved + assert dynamic_config == HTTP_CONFIG_WITH_PASSTHROUGH + + # Check no additional TLS configurations are added + assert "juju-foo-router-tls" not in dynamic_config["http"]["routers"] + + +def test_tls_tcp_passthrough_no_tls_added(harness: Harness[TraefikIngressCharm]): + """Ensure no TLS configuration is generated for routes with tls.passthrough.""" + tr_relation_id, relation = initialize_and_setup_tr_relation(harness) + charm = harness.charm + + # Update relation with the passthrough configuration + config = yaml.dump(TCP_CONFIG_WITH_PASSTHROUGH) + harness.update_relation_data(tr_relation_id, REMOTE_APP_NAME, {"config": config}) + + # Verify the relation is ready and the configuration is loaded + assert charm.traefik_route.is_ready(relation) + assert charm.traefik_route.get_config(relation) == config + + # Check the dynamic configuration written to the container + file = f"/opt/traefik/juju/juju_ingress_{relation.name}_{relation.id}_{relation.app.name}.yaml" + dynamic_config = yaml.safe_load(charm.container.pull(file).read()) + + # Ensure the passthrough configuration is preserved + assert dynamic_config == TCP_CONFIG_WITH_PASSTHROUGH + + # Check no additional TLS configurations are added + assert "juju-foo-router-tls" not in dynamic_config["tcp"]["routers"] diff --git a/tox.ini b/tox.ini index 395ea929..e1973272 100644 --- a/tox.ini +++ b/tox.ini @@ -67,6 +67,7 @@ deps = # fix for https://github.com/jd/tenacity/issues/471 tenacity==8.3.0 sh + minio -r{toxinidir}/requirements.txt commands = pytest -v --tb native --log-cli-level=INFO -s {[vars]tst_path}/integration {posargs} @@ -96,8 +97,9 @@ commands = description = Run interface tests deps = pytest - ops-scenario>=5.3.1 + ops-scenario~=6.0 pytest-interface-tester > 0.3 -r{toxinidir}/requirements.txt commands = - pytest -v --tb native {[vars]tst_path}/interface --log-cli-level=INFO -s {posargs} + # todo uncomment once scenario v7 migration on interface tester is complete + # pytest -v --tb native {[vars]tst_path}/interface --log-cli-level=INFO -s {posargs} diff --git a/traefik-k8s b/traefik-k8s deleted file mode 100644 index c4b5e865..00000000 Binary files a/traefik-k8s and /dev/null differ