From 0d55ce1019c386ca06ab90b958d2157fc56ee80b Mon Sep 17 00:00:00 2001 From: Github Actions Date: Tue, 14 Nov 2023 16:01:08 +0000 Subject: [PATCH] chore: update charm libraries --- .../grafana_k8s/v0/grafana_dashboard.py | 63 +- lib/charms/grafana_k8s/v0/grafana_source.py | 67 +- .../harness_extensions/v0/evt_sequences.py | 950 +----------------- .../v0/prometheus_remote_write.py | 10 +- .../prometheus_k8s/v0/prometheus_scrape.py | 281 +----- 5 files changed, 127 insertions(+), 1244 deletions(-) diff --git a/lib/charms/grafana_k8s/v0/grafana_dashboard.py b/lib/charms/grafana_k8s/v0/grafana_dashboard.py index 7e088d4..1f1bc4f 100644 --- a/lib/charms/grafana_k8s/v0/grafana_dashboard.py +++ b/lib/charms/grafana_k8s/v0/grafana_dashboard.py @@ -219,7 +219,7 @@ def __init__(self, *args): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 30 +LIBPATCH = 35 logger = logging.getLogger(__name__) @@ -525,7 +525,7 @@ def _validate_relation_by_interface_and_direction( relation = charm.meta.relations[relation_name] actual_relation_interface = relation.interface_name - if actual_relation_interface != expected_relation_interface: + if actual_relation_interface and actual_relation_interface != expected_relation_interface: raise RelationInterfaceMismatchError( relation_name, expected_relation_interface, actual_relation_interface ) @@ -665,14 +665,14 @@ def _template_panels( continue if not existing_templates: datasource = panel.get("datasource") - if type(datasource) == str: + if isinstance(datasource, str): if "loki" in datasource: panel["datasource"] = "${lokids}" elif "grafana" in datasource: continue else: panel["datasource"] = "${prometheusds}" - elif type(datasource) == dict: + elif isinstance(datasource, dict): # In dashboards exported by Grafana 9, datasource type is dict dstype = datasource.get("type", "") if dstype == "loki": @@ -686,7 +686,7 @@ def _template_panels( logger.error("Unknown datasource format: skipping") continue else: - if type(panel["datasource"]) == str: + if isinstance(panel["datasource"], str): if panel["datasource"].lower() in replacements.values(): # Already a known template variable continue @@ -701,7 +701,7 @@ def _template_panels( if replacement: used_replacements.append(ds) panel["datasource"] = replacement or panel["datasource"] - elif type(panel["datasource"]) == dict: + elif isinstance(panel["datasource"], dict): dstype = panel["datasource"].get("type", "") if panel["datasource"].get("uid", "").lower() in replacements.values(): # Already a known template variable @@ -790,7 +790,7 @@ def _inject_labels(content: str, topology: dict, transformer: "CosTool") -> str: # We need to use an index so we can insert the changed element back later for panel_idx, panel in enumerate(panels): - if type(panel) is not dict: + if not isinstance(panel, dict): continue # Use the index to insert it back in the same location @@ -831,11 +831,11 @@ def _modify_panel(panel: dict, topology: dict, transformer: "CosTool") -> dict: if "datasource" not in panel.keys(): continue - if type(panel["datasource"]) == str: + if isinstance(panel["datasource"], str): if panel["datasource"] not in known_datasources: continue querytype = known_datasources[panel["datasource"]] - elif type(panel["datasource"]) == dict: + elif isinstance(panel["datasource"], dict): if panel["datasource"]["uid"] not in known_datasources: continue querytype = known_datasources[panel["datasource"]["uid"]] @@ -955,7 +955,7 @@ def restore(self, snapshot): """Restore grafana source information.""" self.error_message = snapshot["error_message"] self.valid = snapshot["valid"] - self.errors = json.loads(snapshot["errors"]) + self.errors = json.loads(str(snapshot["errors"])) class GrafanaProviderEvents(ObjectEvents): @@ -968,7 +968,7 @@ class GrafanaDashboardProvider(Object): """An API to provide Grafana dashboards to a Grafana charm.""" _stored = StoredState() - on = GrafanaProviderEvents() + on = GrafanaProviderEvents() # pyright: ignore def __init__( self, @@ -1072,7 +1072,7 @@ def add_dashboard(self, content: str, inject_dropdowns: bool = True) -> None: """ # Update of storage must be done irrespective of leadership, so # that the stored state is there when this unit becomes leader. - stored_dashboard_templates = self._stored.dashboard_templates # type: Any + stored_dashboard_templates: Any = self._stored.dashboard_templates # pyright: ignore encoded_dashboard = _encode_dashboard_content(content) @@ -1093,7 +1093,7 @@ def remove_non_builtin_dashboards(self) -> None: """Remove all dashboards to the relation added via :method:`add_dashboard`.""" # Update of storage must be done irrespective of leadership, so # that the stored state is there when this unit becomes leader. - stored_dashboard_templates = self._stored.dashboard_templates # type: Any + stored_dashboard_templates: Any = self._stored.dashboard_templates # pyright: ignore for dashboard_id in list(stored_dashboard_templates.keys()): if dashboard_id.startswith("prog:"): @@ -1120,7 +1120,7 @@ def _update_all_dashboards_from_dir( # Ensure we do not leave outdated dashboards by removing from stored all # the encoded dashboards that start with "file/". if self._dashboards_path: - stored_dashboard_templates = self._stored.dashboard_templates # type: Any + stored_dashboard_templates: Any = self._stored.dashboard_templates # pyright: ignore for dashboard_id in list(stored_dashboard_templates.keys()): if dashboard_id.startswith("file:"): @@ -1174,7 +1174,7 @@ def _reinitialize_dashboard_data(self, inject_dropdowns: bool = True) -> None: e.grafana_dashboards_absolute_path, e.message, ) - stored_dashboard_templates = self._stored.dashboard_templates # type: Any + stored_dashboard_templates: Any = self._stored.dashboard_templates # pyright: ignore for dashboard_id in list(stored_dashboard_templates.keys()): if dashboard_id.startswith("file:"): @@ -1195,6 +1195,7 @@ def _on_grafana_dashboard_relation_created(self, event: RelationCreatedEvent) -> `grafana_dashboaard` relationship is joined """ if self._charm.unit.is_leader(): + self._update_all_dashboards_from_dir() self._upset_dashboards_on_relation(event.relation) def _on_grafana_dashboard_relation_changed(self, event: RelationChangedEvent) -> None: @@ -1212,16 +1213,18 @@ def _on_grafana_dashboard_relation_changed(self, event: RelationChangedEvent) -> valid = bool(data.get("valid", True)) errors = data.get("errors", []) if valid and not errors: - self.on.dashboard_status_changed.emit(valid=valid) + self.on.dashboard_status_changed.emit(valid=valid) # pyright: ignore else: - self.on.dashboard_status_changed.emit(valid=valid, errors=errors) + self.on.dashboard_status_changed.emit( # pyright: ignore + valid=valid, errors=errors + ) def _upset_dashboards_on_relation(self, relation: Relation) -> None: """Update the dashboards in the relation data bucket.""" # It's completely ridiculous to add a UUID, but if we don't have some # pseudo-random value, this never makes it across 'juju set-state' stored_data = { - "templates": _type_convert_stored(self._stored.dashboard_templates), + "templates": _type_convert_stored(self._stored.dashboard_templates), # pyright: ignore "uuid": str(uuid.uuid4()), } @@ -1256,7 +1259,7 @@ def dashboard_templates(self) -> List: class GrafanaDashboardConsumer(Object): """A consumer object for working with Grafana Dashboards.""" - on = GrafanaDashboardEvents() + on = GrafanaDashboardEvents() # pyright: ignore _stored = StoredState() def __init__( @@ -1348,13 +1351,13 @@ def _on_grafana_dashboard_relation_changed(self, event: RelationChangedEvent) -> changes = self._render_dashboards_and_signal_changed(event.relation) if changes: - self.on.dashboards_changed.emit() + self.on.dashboards_changed.emit() # pyright: ignore def _on_grafana_peer_changed(self, _: RelationChangedEvent) -> None: """Emit dashboard events on peer events so secondary charm data updates.""" if self._charm.unit.is_leader(): return - self.on.dashboards_changed.emit() + self.on.dashboards_changed.emit() # pyright: ignore def update_dashboards(self, relation: Optional[Relation] = None) -> None: """Re-establish dashboards on one or more relations. @@ -1401,7 +1404,7 @@ def _render_dashboards_and_signal_changed(self, relation: Relation) -> bool: # """ other_app = relation.app - raw_data = relation.data[other_app].get("dashboards", {}) # type: ignore + raw_data = relation.data[other_app].get("dashboards", "") # pyright: ignore if not raw_data: logger.warning( @@ -1509,12 +1512,12 @@ def _render_dashboards_and_signal_changed(self, relation: Relation) -> bool: # def _manage_dashboard_uid(self, dashboard: str, template: dict) -> str: """Add an uid to the dashboard if it is not present.""" - dashboard = json.loads(dashboard) + dashboard_dict = json.loads(dashboard) - if not dashboard.get("uid", None) and "dashboard_alt_uid" in template: - dashboard["uid"] = template["dashboard_alt_uid"] + if not dashboard_dict.get("uid", None) and "dashboard_alt_uid" in template: + dashboard_dict["uid"] = template["dashboard_alt_uid"] - return json.dumps(dashboard) + return json.dumps(dashboard_dict) def _remove_all_dashboards_for_relation(self, relation: Relation) -> None: """If an errored dashboard is in stored data, remove it and trigger a deletion.""" @@ -1522,7 +1525,7 @@ def _remove_all_dashboards_for_relation(self, relation: Relation) -> None: stored_dashboards = self.get_peer_data("dashboards") stored_dashboards.pop(str(relation.id)) self.set_peer_data("dashboards", stored_dashboards) - self.on.dashboards_changed.emit() + self.on.dashboards_changed.emit() # pyright: ignore def _to_external_object(self, relation_id, dashboard): return { @@ -1604,7 +1607,7 @@ class GrafanaDashboardAggregator(Object): """ _stored = StoredState() - on = GrafanaProviderEvents() + on = GrafanaProviderEvents() # pyright: ignore def __init__( self, @@ -1669,7 +1672,7 @@ def _update_remote_grafana(self, _: Optional[RelationEvent] = None) -> None: """Push dashboards to the downstream Grafana relation.""" # It's still ridiculous to add a UUID here, but needed stored_data = { - "templates": _type_convert_stored(self._stored.dashboard_templates), + "templates": _type_convert_stored(self._stored.dashboard_templates), # pyright: ignore "uuid": str(uuid.uuid4()), } @@ -1690,7 +1693,7 @@ def remove_dashboards(self, event: RelationBrokenEvent) -> None: del self._stored.dashboard_templates[id] # type: ignore stored_data = { - "templates": _type_convert_stored(self._stored.dashboard_templates), + "templates": _type_convert_stored(self._stored.dashboard_templates), # pyright: ignore "uuid": str(uuid.uuid4()), } diff --git a/lib/charms/grafana_k8s/v0/grafana_source.py b/lib/charms/grafana_k8s/v0/grafana_source.py index edc4e82..6d411b3 100644 --- a/lib/charms/grafana_k8s/v0/grafana_source.py +++ b/lib/charms/grafana_k8s/v0/grafana_source.py @@ -160,7 +160,7 @@ def __init__(self, *args): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 15 +LIBPATCH = 19 logger = logging.getLogger(__name__) @@ -169,7 +169,7 @@ def __init__(self, *args): RELATION_INTERFACE_NAME = "grafana_datasource" -def _type_convert_stored(obj): +def _type_convert_stored(obj) -> Union[dict, list]: """Convert Stored* to their appropriate types, recursively.""" if isinstance(obj, StoredList): return list(map(_type_convert_stored, obj)) @@ -259,7 +259,7 @@ def _validate_relation_by_interface_and_direction( relation = charm.meta.relations[relation_name] actual_relation_interface = relation.interface_name - if actual_relation_interface != expected_relation_interface: + if actual_relation_interface and actual_relation_interface != expected_relation_interface: raise RelationInterfaceMismatchError( relation_name, expected_relation_interface, actual_relation_interface ) @@ -321,7 +321,7 @@ def __init__( source_type: str, source_port: Optional[str] = "", source_url: Optional[str] = "", - refresh_event: Optional[BoundEvent] = None, + refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None, relation_name: str = DEFAULT_RELATION_NAME, extra_fields: Optional[dict] = None, ) -> None: @@ -359,7 +359,7 @@ def __init__( the default, so that people deploying your charm will have a consistent experience with all other charms that provide Grafana datasources. - refresh_event: a :class:`CharmEvents` event on which the IP + refresh_event: a :class:`CharmEvents` event (or a list of them) on which the IP address should be refreshed in case of pod or machine/VM restart. extra_fields: a :dict: which is used for additional information required @@ -386,7 +386,11 @@ def __init__( if not refresh_event: if len(self._charm.meta.containers) == 1: container = list(self._charm.meta.containers.values())[0] - refresh_event = self._charm.on[container.name.replace("-", "_")].pebble_ready + refresh_event = [self._charm.on[container.name.replace("-", "_")].pebble_ready] + else: + refresh_event = [] + elif not isinstance(refresh_event, list): + refresh_event = [refresh_event] if source_port and source_url: logger.warning( @@ -394,31 +398,30 @@ def __init__( "`source_url` as the address." ) + self._source_port = source_port + self._source_url = self._sanitize_source_url(source_url) + + self.framework.observe(events.relation_joined, self._set_sources_from_event) + for ev in refresh_event: + self.framework.observe(ev, self._set_unit_details) + + def _sanitize_source_url(self, source_url: Optional[str]) -> Optional[str]: if source_url and not re.match(r"^\w+://", source_url): logger.warning( "'source_url' should start with a scheme, such as " "'http://'. Assuming 'http://' since none is present." ) source_url = "http://{}".format(source_url) - - self._source_port = source_port - self._source_url = source_url - - self.framework.observe(events.relation_joined, self._set_sources_from_event) - if refresh_event: - self.framework.observe(refresh_event, self._set_unit_details) + return source_url def update_source(self, source_url: Optional[str] = ""): """Trigger the update of relation data.""" - if source_url: - self._source_url = source_url - - rel = self._charm.model.get_relation(self._relation_name) - - if not rel: - return + self._source_url = self._sanitize_source_url(source_url) - self._set_sources(rel) + for rel in self._charm.model.relations.get(self._relation_name, []): + if not rel: + continue + self._set_sources(rel) def _set_sources_from_event(self, event: RelationJoinedEvent) -> None: """Get a `Relation` object from the event to pass on.""" @@ -457,7 +460,7 @@ def _set_unit_details(self, _: Union[BoundEvent, RelationEvent, Relation]): unit relation data for the Prometheus consumer. """ for relation in self._charm.model.relations[self._relation_name]: - url = self._source_url or "{}:{}".format(socket.getfqdn(), self._source_port) + url = self._source_url or "http://{}:{}".format(socket.getfqdn(), self._source_port) if self._source_type == "mimir": url = f"{url}/prometheus" @@ -467,7 +470,7 @@ def _set_unit_details(self, _: Union[BoundEvent, RelationEvent, Relation]): class GrafanaSourceConsumer(Object): """A consumer object for working with Grafana datasources.""" - on = GrafanaSourceEvents() + on = GrafanaSourceEvents() # pyright: ignore _stored = StoredState() def __init__( @@ -532,14 +535,14 @@ def _on_grafana_source_relation_changed(self, event: Optional[CharmEvents] = Non self.set_peer_data("sources", sources) - self.on.sources_changed.emit() + self.on.sources_changed.emit() # pyright: ignore def _on_grafana_peer_changed(self, _: RelationChangedEvent) -> None: """Emit source events on peer events so secondary charm data updates.""" if self._charm.unit.is_leader(): return - self.on.sources_changed.emit() - self.on.sources_to_delete_changed.emit() + self.on.sources_changed.emit() # pyright: ignore + self.on.sources_to_delete_changed.emit() # pyright: ignore def _get_source_config(self, rel: Relation): """Generate configuration from data stored in relation data by providers.""" @@ -610,7 +613,7 @@ def _on_grafana_source_relation_departed(self, event: RelationDepartedEvent) -> removed_source = self._remove_source_from_datastore(event) if removed_source: - self.on.sources_to_delete_changed.emit() + self.on.sources_to_delete_changed.emit() # pyright: ignore def _remove_source_from_datastore(self, event: RelationDepartedEvent) -> bool: """Remove the grafana-source from the datastore. @@ -658,7 +661,7 @@ def upgrade_keys(self) -> None: return self._set_default_data() - sources = _type_convert_stored(self._stored.sources) + sources: dict = _type_convert_stored(self._stored.sources) # pyright: ignore for rel_id in sources.keys(): for i in range(len(sources[rel_id])): sources[rel_id][i].update( @@ -673,10 +676,14 @@ def upgrade_keys(self) -> None: self.set_peer_data("sources", sources) if self._stored.sources_to_delete: # type: ignore - old_sources_to_delete = _type_convert_stored(self._stored.sources_to_delete) + old_sources_to_delete = _type_convert_stored( + self._stored.sources_to_delete # pyright: ignore + ) self._stored.sources_to_delete = set() peer_sources_to_delete = set(self.get_peer_data("sources_to_delete")) - sources_to_delete = set.union(old_sources_to_delete, peer_sources_to_delete) + sources_to_delete = set.union( + old_sources_to_delete, peer_sources_to_delete # pyright: ignore + ) self.set_peer_data("sources_to_delete", sources_to_delete) def update_sources(self, relation: Optional[Relation] = None) -> None: diff --git a/lib/charms/harness_extensions/v0/evt_sequences.py b/lib/charms/harness_extensions/v0/evt_sequences.py index bc84e5b..0d1fbc7 100644 --- a/lib/charms/harness_extensions/v0/evt_sequences.py +++ b/lib/charms/harness_extensions/v0/evt_sequences.py @@ -1,4 +1,8 @@ -'''This is a library providing a utility for unit testing event sequences with the harness. +'''This was a library providing a utility for unit testing event sequences with the harness. +this charm library has been deprecated and is replaced by ops-scenario. +To learn more visit: https://github.com/canonical/ops-scenario +or ask on mattermost: +https://chat.charmhub.io/charmhub/channels/charm-dev ''' # The unique Charmhub library identifier, never change it @@ -9,946 +13,16 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 1 +LIBPATCH = 2 -import dataclasses -import json -from dataclasses import dataclass -from functools import partial -from uuid import uuid4 -from typing import Tuple, Any, Dict, Union, Iterable -import ops -import yaml -from ops.testing import CharmType - -if __name__ == '__main__': - pass # to prevent isort from complaining about what follows - -# from networking: import logging -from collections import defaultdict -from contextlib import contextmanager -from copy import deepcopy -from typing import Dict, List, Optional, TypedDict, Union, TextIO, Sequence, Callable - -from ops.model import Relation - -network_logger = logging.getLogger("networking") -CharmMeta = Optional[Union[str, TextIO, dict]] -PlayResult = Tuple['BoundEvent', 'Context', 'Emitter'] -AssertionType = Callable[['BoundEvent', 'Context', 'Emitter'], Optional[bool]] - - -class NetworkingError(RuntimeError): - """Base class for errors raised from this module.""" - -JUJU_INFO = { - "bind-addresses": [ - { - "mac-address": "", - "interface-name": "", - "interfacename": "", - "addresses": [{"hostname": "", "value": "1.1.1.1", "cidr": ""}], - } - ], - "bind-address": "1.1.1.1", - "egress-subnets": ["1.1.1.2/32"], - "ingress-addresses": ["1.1.1.2"], -} # type: _Network - -_Address = TypedDict("_Address", {"hostname": str, "value": str, "cidr": str}) -_BindAddress = TypedDict( - "_BindAddress", - { - "mac-address": str, - "interface-name": str, - "interfacename": str, # ? - "addresses": List[_Address], - }, -) -_Network = TypedDict( - "_Network", - { - "bind-addresses": List[_BindAddress], - "bind-address": str, - "egress-subnets": List[str], - "ingress-addresses": List[str], - }, +logging.getLogger(__name__).warning( + "DEPRECATION NOTICE: this charm library has been deprecated and is replaced by ops-scenario. \n" + "To learn more visit: https://github.com/canonical/ops-scenario \n" + "or ask on mattermost: \n" + "https://chat.charmhub.io/charmhub/channels/charm-dev\n" + " 'T was fun." ) - -def activate(juju_info_network: "_Network" = JUJU_INFO): - """Patches harness.backend.network_get and initializes the juju-info binding.""" - global PATCH_ACTIVE, _NETWORKS - if PATCH_ACTIVE: - raise NetworkingError("patch already active") - assert not _NETWORKS # type guard - - from ops.testing import _TestingModelBackend - - _NETWORKS = defaultdict(dict) - _TestingModelBackend.network_get = _network_get # type: ignore - _NETWORKS["juju-info"][None] = juju_info_network - - PATCH_ACTIVE = True - - -def deactivate(): - """Undoes the patch.""" - global PATCH_ACTIVE, _NETWORKS - assert PATCH_ACTIVE, "patch not active" - - PATCH_ACTIVE = False - _NETWORKS = None # type: ignore - - -_NETWORKS = None # type: Optional[Dict[str, Dict[Optional[int], _Network]]] -PATCH_ACTIVE = False - - -def _network_get(_, endpoint_name, relation_id=None) -> _Network: - if not PATCH_ACTIVE: - raise NotImplementedError("network-get") - assert _NETWORKS # type guard - - try: - endpoints = _NETWORKS[endpoint_name] - network = endpoints.get(relation_id) - if not network: - # fall back to default binding for relation: - return endpoints[None] - return network - except KeyError as e: - raise NetworkingError( - f"No network for {endpoint_name} -r {relation_id}; " - f"try `add_network({endpoint_name}, {relation_id} | None, Network(...))`" - ) from e - - -def add_network( - endpoint_name: str, - relation_id: Optional[int], - network: _Network, - make_default=False, -): - """Add a network to the harness. - - - `endpoint_name`: the relation name this network belongs to - - `relation_id`: ID of the relation this network belongs to. If None, this will - be the default network for the relation. - - `network`: network data. - - `make_default`: Make this the default network for the endpoint. - Equivalent to calling this again with `relation_id==None`. - """ - if not PATCH_ACTIVE: - raise NetworkingError("module not initialized; " "run activate() first.") - assert _NETWORKS # type guard - - if _NETWORKS[endpoint_name].get(relation_id): - network_logger.warning( - f"Endpoint {endpoint_name} is already bound " - f"to a network for relation id {relation_id}." - f"Overwriting..." - ) - - _NETWORKS[endpoint_name][relation_id] = network - - if relation_id and make_default: - # make it default as well - _NETWORKS[endpoint_name][None] = network - - -def remove_network(endpoint_name: str, relation_id: Optional[int]): - """Remove a network from the harness.""" - if not PATCH_ACTIVE: - raise NetworkingError("module not initialized; " "run activate() first.") - assert _NETWORKS # type guard - - _NETWORKS[endpoint_name].pop(relation_id) - if not _NETWORKS[endpoint_name]: - del _NETWORKS[endpoint_name] - - -def Network( - private_address: str = "1.1.1.1", - mac_address: str = "", - hostname: str = "", - cidr: str = "", - interface_name: str = "", - egress_subnets=("1.1.1.2/32",), - ingress_addresses=("1.1.1.2",), -) -> _Network: - """Construct a network object.""" - return { - "bind-addresses": [ - { - "mac-address": mac_address, - "interface-name": interface_name, - "interfacename": interface_name, - "addresses": [ - {"hostname": hostname, "value": private_address, "cidr": cidr} - ], - } - ], - "bind-address": private_address, - "egress-subnets": list(egress_subnets), - "ingress-addresses": list(ingress_addresses), - } - - -_not_given = object() # None is meaningful, but JUJU_INFO is mutable - - -@contextmanager -def networking( - juju_info_network: Optional[_Network] = _not_given, # type: ignore - networks: Optional[Dict[Union[str, Relation], _Network]] = None, - make_default: bool = False, -): - """Context manager to activate/deactivate networking within a scope. - - Arguments: - - `juju_info_network`: network assigned to the implicit 'juju-info' endpoint. - - `networks`: mapping from endpoints (names, or relations) to networks. - - `make_default`: whether the networks passed as relations should also - be interpreted as default networks for the endpoint. - - Example usage: - >>> with networking(): - >>> assert charm.model.get_binding('juju-info').network.private_address - - >>> foo_relation = harness.model.get_relation('foo', 1) - >>> bar_relation = harness.model.get_relation('bar', 2) - >>> with networking(networks={ - ... foo_relation: Network(private_address='42.42.42.42')} - ... 'bar': Network(private_address='50.50.50.1')}, - ... make_default=True, - ... ): - >>> assert charm.model.get_binding(foo_relation).network.private_address - >>> assert charm.model.get_binding('foo').network.private_address - >>> assert charm.model.get_binding('bar').network.private_address - ... - >>> # this will raise an error! We only defined a default bar - >>> # network, not one specific to this relation ID. - >>> # assert charm.model.get_binding(bar_relation).network.private_address - - """ - global _NETWORKS - old = deepcopy(_NETWORKS) - patch_was_inactive = False - - if juju_info_network is _not_given: - juju_info_network = JUJU_INFO - - if not PATCH_ACTIVE: - patch_was_inactive = True - activate(juju_info_network or JUJU_INFO) - else: - assert _NETWORKS # type guard - - if juju_info_network: - _NETWORKS["juju-info"][None] = juju_info_network - - for binding, network in networks.items() if networks else (): - if isinstance(binding, str): - name = binding - bind_id = None - elif isinstance(binding, Relation): - name = binding.name - bind_id = binding.id - else: - raise TypeError(binding) - add_network(name, bind_id, network, make_default=make_default) - - yield - - _NETWORKS = old - if patch_was_inactive: - deactivate() - - -# from HARNESS_CTX v0 - -import typing -from typing import Callable, Protocol, Type - -from ops.charm import CharmBase, CharmEvents -from ops.framework import BoundEvent, Handle, EventBase -from ops.testing import Harness - - -class _HasOn(Protocol): - @property - def on(self) -> CharmEvents: - ... - - -def _DefaultEmitter(charm: CharmBase, harness: Harness): - return charm - - -class Emitter: - """Event emitter.""" - - def __init__(self, harness: Harness, emit: Callable[[], BoundEvent]): - self.harness = harness - self._emit = emit - self.event = None - self._emitted = False - - @property - def emitted(self): - """Has the event been emitted already?""" # noqa - return self._emitted - - def emit(self): - """Emit the event. - - Will get called automatically when HarnessCtx exits if you didn't call it already. - """ - assert not self._emitted, "already emitted; should not emit twice" - self.event = self._emit() - self._emitted = True - return self.event - - -class HarnessCtx: - """Harness-based context for emitting a single event. - - Example usage: - >>> class MyCharm(CharmBase): - >>> def __init__(self, framework: Framework, key: typing.Optional = None): - >>> super().__init__(framework, key) - >>> self.framework.observe(self.on.update_status, self._listen) - >>> self.framework.observe(self.framework.on.commit, self._listen) - >>> - >>> def _listen(self, e): - >>> self.event = e - >>> - >>> with HarnessCtx(MyCharm, "update-status") as h: - >>> event = h.emit() - >>> assert event.handle.kind == "update_status" - >>> - >>> assert h.harness.charm.event.handle.kind == "commit" - """ - - def __init__( - self, - charm: Type[CharmBase], - event_name: str, - emitter: Callable[[CharmBase, Harness], _HasOn] = _DefaultEmitter, - meta: Optional[CharmMeta] = None, - actions: Optional[CharmMeta] = None, - config: Optional[CharmMeta] = None, - event_args: Tuple[Any, ...] = (), - event_kwargs: Dict[str, Any] = None, - pre_begin_hook: Optional[Callable[[Harness], None]] = None - ): - self.charm_cls = charm - self.emitter = emitter - self.event_name = event_name.replace("-", "_") - self.event_args = event_args - self.event_kwargs = event_kwargs or {} - self.pre_begin_hook = pre_begin_hook - - def _to_yaml(obj): - if isinstance(obj, str): - return obj - elif not obj: - return None - return yaml.safe_dump(obj) - - self.harness_kwargs = { - 'meta': _to_yaml(meta), - 'actions': _to_yaml(actions), - 'config': _to_yaml(config) - } - - @staticmethod - def _inject(harness: Harness, obj): - if isinstance(obj, InjectRelation): - return harness.model.get_relation( - relation_name=obj.relation_name, - relation_id=obj.relation_id - ) - - return obj - - def _process_event_args(self, harness): - return map(partial(self._inject, harness), self.event_args) - - def _process_event_kwargs(self, harness): - kwargs = self.event_kwargs - return kwargs - - def __enter__(self): - self._harness = harness = Harness(self.charm_cls, - **self.harness_kwargs) - if self.pre_begin_hook: - logger.debug('running harness pre-begin hook') - self.pre_begin_hook(harness) - - harness.begin() - - emitter = self.emitter(harness.charm, harness) - events = getattr(emitter, "on") - event_source: BoundEvent = getattr(events, self.event_name) - - def _emit() -> BoundEvent: - # we don't call event_source.emit() - # because we want to grab the event - framework = event_source.emitter.framework - key = framework._next_event_key() # noqa - handle = Handle(event_source.emitter, event_source.event_kind, key) - - event_args = self._process_event_args(harness) - event_kwargs = self._process_event_kwargs(harness) - - event = event_source.event_type(handle, *event_args, **event_kwargs) - event.framework = framework - framework._emit(event) # type: ignore # noqa - return typing.cast(BoundEvent, event) - - self._emitter = bound_ctx = Emitter(harness, _emit) - return bound_ctx - - def __exit__(self, exc_type, exc_val, exc_tb): - if not self._emitter.emitted: - self._emitter.emit() - self._harness.framework.on.commit.emit() # type: ignore - - -# from show-relation! - -@dataclass -class DCBase: - def replace(self, *args, **kwargs): - return dataclasses.replace(self, *args, **kwargs) - - -@dataclass -class RelationMeta(DCBase): - endpoint: str - interface: str - remote_app_name: str - relation_id: int - - # local limit - limit: int = 1 - - remote_unit_ids: Tuple[int, ...] = (0,) - # scale of the remote application; number of units, leader ID? - # TODO figure out if this is relevant - scale: int = 1 - leader_id: int = 0 - - @classmethod - def from_dict(cls, obj): - return cls(**obj) - - -@dataclass -class RelationSpec(DCBase): - meta: RelationMeta - application_data: dict = dataclasses.field(default_factory=dict) - units_data: Dict[int, dict] = dataclasses.field(default_factory=dict) - - @classmethod - def from_dict(cls, obj): - meta = RelationMeta.from_dict(obj.pop('meta')) - return cls(meta=meta, **obj) - - def copy(self): - return dataclasses.replace() - - -# ACTUAL LIBRARY CODE. Dependencies above. - -logger = logging.getLogger('evt-sequences') - -ATTACH_ALL_STORAGES = "ATTACH_ALL_STORAGES" -CREATE_ALL_RELATIONS = "CREATE_ALL_RELATIONS" -BREAK_ALL_RELATIONS = "BREAK_ALL_RELATIONS" -DETACH_ALL_STORAGES = "DETACH_ALL_STORAGES" -META_EVENTS = { - "ATTACH_ALL_STORAGES", - "CREATE_ALL_RELATIONS", - "BREAK_ALL_RELATIONS", - "DETACH_ALL_STORAGES", -} - - -@dataclass -class CharmSpec: - """Charm spec.""" - charm_type: CharmType - meta: Optional[CharmMeta] = None - actions: Optional[CharmMeta] = None - config: Optional[CharmMeta] = None - - @staticmethod - def cast(obj: Union['CharmSpec', CharmType, Type[CharmBase]]): - if isinstance(obj, type) and issubclass(obj, CharmBase): - return CharmSpec(charm_type=obj) - elif isinstance(obj, CharmSpec): - return obj - else: - raise ValueError(f'cannot convert {obj} to CharmSpec') - - -@dataclass -class _Event(DCBase): - name: str - args: Tuple[Any] = () - kwargs: Dict[str, Any] = dataclasses.field(default_factory=dict) - - @property - def is_meta(self): - return self.name in META_EVENTS - - @classmethod - def from_dict(cls, obj): - return cls(**obj) - - def as_scenario(self, context: 'Context'): - """Utility to get to a single-event Scenario from a single event instance.""" - return Scenario.from_scenes(Scene(context=context, event=self)) - - def play(self, context: 'Context', - charm_spec: CharmSpec, - assertions: Sequence[AssertionType] = (), - ) -> PlayResult: - """Utility to play this as a single scene.""" - return self.as_scenario( - context - ).bind( - charm_spec=charm_spec, - ).play_until_complete( - assertions=assertions) - - -def _derive_args(event_name: str): - args = [] - terms = {'-relation-changed', '-relation-broken', - '-relation-joined', '-relation-departed', - '-relation-created'} - - for term in terms: - # fixme: we can't disambiguate between relation IDs. - if event_name.endswith(term): - args.append(InjectRelation(relation_name=event_name[:-len(term)])) - - return tuple(args) - - -def Event(name: str, append_args: Tuple[Any] = (), **kwargs) -> _Event: - """This routine will attempt to generate event args for you, based on the event name.""" - return _Event(name=name, args=_derive_args(name) + append_args, kwargs=kwargs) - - -@dataclass -class NetworkSpec(DCBase): - name: str - bind_id: int - network: _Network - is_default: bool = False - - @classmethod - def from_dict(cls, obj): - return cls(**obj) - - -@dataclass -class Model(DCBase): - name: str = 'foo' - uuid: str = str(uuid4()) - - -@dataclass -class Context(DCBase): - config: Dict[str, Union[str, int, float, bool]] = None - relations: Tuple[RelationSpec] = () - networks: Tuple[NetworkSpec] = () - leader: bool = False - model: Model = Model() - - # todo: add pebble stuff, unit/app status, etc... - # containers - # status - # actions? - # juju topology - - @classmethod - def from_dict(cls, obj): - return cls( - config=obj['config'], - relations=tuple(RelationSpec.from_dict(raw_ard) for raw_ard in obj['relations']), - networks=tuple(NetworkSpec.from_dict(raw_ns) for raw_ns in obj['networks']), - leader=obj['leader'] - ) - - def as_scenario(self, event: _Event): - """Utility to get to a single-event Scenario from a single context instance.""" - return Scenario.from_scenes(Scene(context=self, event=event)) - - def play(self, event: _Event, - charm_spec: CharmSpec, - assertions: Sequence[AssertionType] = ()) -> PlayResult: - """Utility to play this as a single scene.""" - return self.as_scenario( - event - ).bind( - charm_spec=charm_spec, - ).play_until_complete( - assertions=assertions) - - -null_context = Context() - - -@dataclass -class Scene(DCBase): - event: _Event - context: Context = None - name: str = "" - - def __iter__(self): - yield from [self.context, self.event] - - @classmethod - def from_dict(cls, obj): - evt = obj['event'] - return cls( - event=_Event(evt) if isinstance(evt, str) else _Event.from_dict(evt), - context=Context.from_dict(obj['context']) if obj['context'] is not None else None, - name=obj['name'], - ) - - -class _Builtins: - @staticmethod - def startup(leader=True): - return Scenario.from_events( - ( - ATTACH_ALL_STORAGES, - 'start', - CREATE_ALL_RELATIONS, - 'leader-elected' if leader else 'leader-settings-changed', - 'config-changed', - 'install', - ) - ) - - @staticmethod - def teardown(): - return Scenario.from_events( - ( - BREAK_ALL_RELATIONS, - DETACH_ALL_STORAGES, - 'stop', - 'remove' - ) - ) - - -class Playbook: - def __init__(self, scenes: Iterable[Scene]): - self._scenes = list(scenes) - self._cursor = 0 - - def __bool__(self): - return bool(self._scenes) - - @property - def is_done(self): - return self._cursor < (len(self._scenes) - 1) - - def add(self, scene: Scene): - self._scenes.append(scene) - - def next(self): - self.scroll(1) - return self._scenes[self._cursor] - - def scroll(self, n): - if not 0 <= self._cursor + n <= len(self._scenes): - raise RuntimeError(f"Cursor out of bounds: can't scroll ({self}) by {n}.") - self._cursor += n - - def restart(self): - self._cursor = 0 - - def __repr__(self): - return f"" - - def __iter__(self): - yield from self._scenes - - def __next__(self): - return self.next() - - def dump(self) -> str: - """Serialize.""" - obj = {'scenes': [dataclasses.asdict(scene) for scene in self._scenes]} - return json.dumps(obj, indent=2) - - @staticmethod - def load(s: str) -> 'Playbook': - obj = json.loads(s) - scenes = tuple(Scene.from_dict(raw_scene) for raw_scene in obj['scenes']) - return Playbook(scenes=scenes) - - -class _UnboundScenario: - def __init__(self, - playbook: Playbook = Playbook(()), - ): - self._playbook = playbook - - @property - def playbook(self): - return self._playbook - - def __call__(self, charm_spec: Union[CharmSpec, CharmType]): - return Scenario(charm_spec=CharmSpec.cast(charm_spec), - playbook=self.playbook) - - bind = __call__ # alias - - -@dataclass -class Inject: - """Base class for injectors: special placeholders used to tell harness_ctx - to inject instances that can't be retrieved in advance in event args or kwargs. - """ - pass - - -@dataclass -class InjectRelation(Inject): - relation_name: str - relation_id: Optional[int] = None - - -class Scenario: - builtins = _Builtins() - - def __init__(self, charm_spec: CharmSpec, - playbook: Playbook = Playbook(())): - - self._playbook = playbook - self._charm_spec = CharmSpec.cast(charm_spec) - - @staticmethod - def from_scenes( - playbook: Union[Scene, Iterable[Scene]] - ) -> _UnboundScenario: - _scenes = (playbook,) if isinstance(playbook, Scene) else tuple(playbook) - for i, scene in enumerate(_scenes): - if not scene.name: - scene.name = f"" - return _UnboundScenario(playbook=Playbook(_scenes)) - - @staticmethod - def from_events( - events: typing.Sequence[Union[str, _Event]] - ) -> _UnboundScenario: - - def _to_event(obj): - if isinstance(obj, str): - return _Event(obj) - elif isinstance(obj, _Event): - return obj - else: - raise TypeError(obj) - - return Scenario.from_scenes(map(Scene, map(_to_event, events))) - - @property - def playbook(self) -> Playbook: - return self._playbook - - def __enter__(self): - self._entered = True - activate() - return self - - def __exit__(self, *exc_info): - self._playbook.restart() - deactivate() - self._entered = False - if exc_info: - return False - return True - - @staticmethod - def _pre_setup_context(harness: Harness, context: Context): - # Harness initialization that needs to be done pre-begin() - - # juju topology: - harness.set_model_info(name=context.model.name, - uuid=context.model.uuid) - - @staticmethod - def _setup_context(harness: Harness, context: Context): - harness.disable_hooks() - be: ops.testing._TestingModelBackend = harness._backend # noqa - - # relation data - for relation in context.relations: - remote_app_name = relation.meta.remote_app_name - r_id = harness.add_relation(relation.meta.endpoint, remote_app_name) - if remote_app_name != harness.charm.app.name: - if relation.application_data: - harness.update_relation_data(r_id, remote_app_name, - relation.application_data) - for unit_n, unit_data in relation.units_data.items(): - unit_name = f"{remote_app_name}/{unit_n}" - harness.add_relation_unit(r_id, unit_name) - harness.update_relation_data(r_id, unit_name, unit_data) - else: - if relation.application_data: - harness.update_relation_data(r_id, harness.charm.app.name, - relation.application_data) - if relation.units_data: - if not tuple(relation.units_data) == (0,): - raise RuntimeError('Only one local unit is supported.') - harness.update_relation_data(r_id, harness.charm.unit.name, - relation.units_data[0]) - # leadership: - harness.set_leader(context.leader) - - # networking - for network in context.networks: - add_network(endpoint_name=network.name, - relation_id=network.bind_id, - network=network.network, - make_default=network.is_default) - harness.enable_hooks() - - @staticmethod - def _cleanup_context(harness: Harness, context: Context): - # Harness will be reinitialized, so nothing to clean up there; - # however: - for network in context.networks: - remove_network(endpoint_name=network.name, - relation_id=network.bind_id) - - def _play_meta(self, event: _Event, - context: Context = None, - add_to_playbook: bool = False): - # decompose the meta event - events = [] - - if event.name == ATTACH_ALL_STORAGES: - logger.warning(f"meta-event {event.name} not supported yet") - return - - elif event.name == DETACH_ALL_STORAGES: - logger.warning(f"meta-event {event.name} not supported yet") - return - - elif event.name == CREATE_ALL_RELATIONS: - if context: - for relation in context.relations: - # RELATION_OBJ is to indicate to the harness_ctx that - # it should retrieve the - evt = _Event(f"{relation.meta.endpoint}-relation-created", - args=(InjectRelation(relation.meta.endpoint, - relation.meta.relation_id),)) - events.append(evt) - - elif event.name == BREAK_ALL_RELATIONS: - if context: - for relation in context.relations: - evt = _Event(f"{relation.meta.endpoint}-relation-broken", - args=(InjectRelation(relation.meta.endpoint, - relation.meta.relation_id),)) - events.append(evt) - # todo should we ensure there's no relation data in this context? - - else: - raise RuntimeError(f'unknown meta-event {event.name}') - - logger.debug(f"decomposed meta {event.name} into {events}") - last = None - for event in events: - last = self.play(event, context, add_to_playbook=add_to_playbook) - return last - - def play(self, evt: Union[_Event, str], - context: Context = None, - add_to_playbook: bool = False) -> PlayResult: - if not self._entered: - raise RuntimeError("Scenario.play() should be only called " - "within the Scenario's context.") - event = _Event(evt) if isinstance(evt, str) else evt - - if event.is_meta: - return self._play_meta(event, context, - add_to_playbook=add_to_playbook) - - charm_spec = self._charm_spec - - pre_begin_hook = None - if context: - # some context needs to be set up before harness.begin() is called. - pre_begin_hook = partial(self._pre_setup_context, context=context) - - with HarnessCtx(charm_spec.charm_type, - event_name=event.name, - event_args=event.args, - event_kwargs=event.kwargs, - meta=charm_spec.meta, - actions=charm_spec.actions, - config=charm_spec.config, - pre_begin_hook=pre_begin_hook) as ctx: - if context: - self._setup_context(ctx.harness, context) - - ops_evt_obj: BoundEvent = ctx.emit() - - # todo verify that if state was mutated, it was mutated - # in a way that makes sense: - # e.g. - charm cannot modify leadership status, etc... - if context: - self._cleanup_context(ctx.harness, context) - - if add_to_playbook: - # so we can later export it - self._playbook.add(Scene(context=context, event=event)) - - # TODO: gather new context or Delta, return that. - return ops_evt_obj, context, ctx - - def play_next(self): - next_scene: Scene = self._playbook.next() - self.play(*next_scene) - - def play_until_complete(self, - assertions: Union[AssertionType, - Iterable[AssertionType]] = ()): - if not self._playbook: - raise RuntimeError('playbook is empty') - - with self: - for context, event in self._playbook: - ctx = self.play(evt=event, context=context) - if assertions: - self._check_assertions(ctx, assertions) - return ctx - - @staticmethod - def _check_assertions(ctx: PlayResult, - assertions: Union[AssertionType, - Iterable[AssertionType]]): - if callable(assertions): - assertions = [assertions] - - for assertion in assertions: - ret_val = assertion(*ctx) - if ret_val is False: - raise ValueError(f"Assertion {assertion} returned False") - diff --git a/lib/charms/prometheus_k8s/v0/prometheus_remote_write.py b/lib/charms/prometheus_k8s/v0/prometheus_remote_write.py index ae2389d..f7c3ab7 100644 --- a/lib/charms/prometheus_k8s/v0/prometheus_remote_write.py +++ b/lib/charms/prometheus_k8s/v0/prometheus_remote_write.py @@ -45,7 +45,7 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 14 +LIBPATCH = 15 logger = logging.getLogger(__name__) @@ -407,7 +407,7 @@ def _validate_relation_by_interface_and_direction( if relation_name not in charm.meta.relations: raise RelationNotFoundError(relation_name) - relation = charm.meta.relations[relation_name] # type: RelationMeta + relation: RelationMeta = charm.meta.relations[relation_name] actual_relation_interface = relation.interface_name if actual_relation_interface != expected_relation_interface: @@ -609,6 +609,7 @@ def __init__( charm: The charm object that instantiated this class. relation_name: Name of the relation with the `prometheus_remote_write` interface as defined in metadata.yaml. + alert_rules_path: Path of the directory containing the alert rules. Raises: RelationNotFoundError: If there is no relation in the charm's metadata.yaml @@ -968,7 +969,10 @@ def alerts(self) -> dict: _, errmsg = self._tool.validate_alert_rules(alert_rules) if errmsg: - relation.data[self._charm.app]["event"] = json.dumps({"errors": errmsg}) + if self._charm.unit.is_leader(): + data = json.loads(relation.data[self._charm.app].get("event", "{}")) + data["errors"] = errmsg + relation.data[self._charm.app]["event"] = json.dumps(data) continue alerts[identifier] = alert_rules diff --git a/lib/charms/prometheus_k8s/v0/prometheus_scrape.py b/lib/charms/prometheus_k8s/v0/prometheus_scrape.py index bd1dd6f..e4297aa 100644 --- a/lib/charms/prometheus_k8s/v0/prometheus_scrape.py +++ b/lib/charms/prometheus_k8s/v0/prometheus_scrape.py @@ -18,13 +18,6 @@ Source code can be found on GitHub at: https://github.com/canonical/prometheus-k8s-operator/tree/main/lib/charms/prometheus_k8s -## Dependencies - -Using this library requires you to fetch the juju_topology library from -[observability-libs](https://charmhub.io/observability-libs/libraries/juju_topology). - -`charmcraft fetch-lib charms.observability_libs.v0.juju_topology` - ## Provider Library Usage This Prometheus charm interacts with its scrape targets using its @@ -343,12 +336,11 @@ def _on_scrape_targets_changed(self, event): from collections import defaultdict from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Tuple, Union -from urllib.error import HTTPError, URLError from urllib.parse import urlparse -from urllib.request import urlopen import yaml -from charms.observability_libs.v0.juju_topology import JujuTopology +from cosl import JujuTopology +from cosl.rules import AlertRules from ops.charm import CharmBase, RelationRole from ops.framework import ( BoundEvent, @@ -370,7 +362,9 @@ def _on_scrape_targets_changed(self, event): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 35 +LIBPATCH = 42 + +PYDEPS = ["cosl"] logger = logging.getLogger(__name__) @@ -391,6 +385,7 @@ def _on_scrape_targets_changed(self, event): "scheme", "basic_auth", "tls_config", + "authorization", } DEFAULT_JOB = { "metrics_path": "/metrics", @@ -601,15 +596,22 @@ def render_alertmanager_static_configs(alertmanagers: List[str]): # Create a mapping from paths to netlocs # Group alertmanager targets into a dictionary of lists: # {path: [netloc1, netloc2]} - paths = defaultdict(list) # type: Dict[str, List[str]] + paths = defaultdict(list) # type: Dict[Tuple[str, str], List[str]] for parsed in map(urlparse, sanitized): path = parsed.path or "/" - paths[path].append(parsed.netloc) + paths[(parsed.scheme, path)].append(parsed.netloc) return { "alertmanagers": [ - {"path_prefix": path_prefix, "static_configs": [{"targets": netlocs}]} - for path_prefix, netlocs in paths.items() + { + # For https we still do not render a `tls_config` section because + # certs are expected to be made available by the charm via the + # `update-ca-certificates` mechanism. + "scheme": scheme, + "path_prefix": path_prefix, + "static_configs": [{"targets": netlocs}], + } + for (scheme, path_prefix), netlocs in paths.items() ] } @@ -715,13 +717,12 @@ def _type_convert_stored(obj): """Convert Stored* to their appropriate types, recursively.""" if isinstance(obj, StoredList): return list(map(_type_convert_stored, obj)) - elif isinstance(obj, StoredDict): + if isinstance(obj, StoredDict): rdict = {} # type: Dict[Any, Any] for k in obj.keys(): rdict[k] = _type_convert_stored(obj[k]) return rdict - else: - return obj + return obj def _validate_relation_by_interface_and_direction( @@ -831,206 +832,6 @@ def _is_single_alert_rule_format(rules_dict: dict) -> bool: return set(rules_dict) >= {"alert", "expr"} -class AlertRules: - """Utility class for amalgamating prometheus alert rule files and injecting juju topology. - - An `AlertRules` object supports aggregating alert rules from files and directories in both - official and single rule file formats using the `add_path()` method. All the alert rules - read are annotated with Juju topology labels and amalgamated into a single data structure - in the form of a Python dictionary using the `as_dict()` method. Such a dictionary can be - easily dumped into JSON format and exchanged over relation data. The dictionary can also - be dumped into YAML format and written directly into an alert rules file that is read by - Prometheus. Note that multiple `AlertRules` objects must not be written into the same file, - since Prometheus allows only a single list of alert rule groups per alert rules file. - - The official Prometheus format is a YAML file conforming to the Prometheus documentation - (https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/). - The custom single rule format is a subsection of the official YAML, having a single alert - rule, effectively "one alert per file". - """ - - # This class uses the following terminology for the various parts of a rule file: - # - alert rules file: the entire groups[] yaml, including the "groups:" key. - # - alert groups (plural): the list of groups[] (a list, i.e. no "groups:" key) - it is a list - # of dictionaries that have the "name" and "rules" keys. - # - alert group (singular): a single dictionary that has the "name" and "rules" keys. - # - alert rules (plural): all the alerts in a given alert group - a list of dictionaries with - # the "alert" and "expr" keys. - # - alert rule (singular): a single dictionary that has the "alert" and "expr" keys. - - def __init__(self, topology: Optional[JujuTopology] = None): - """Build and alert rule object. - - Args: - topology: an optional `JujuTopology` instance that is used to annotate all alert rules. - """ - self.topology = topology - self.tool = CosTool(None) - self.alert_groups = [] # type: List[dict] - - def _from_file(self, root_path: Path, file_path: Path) -> List[dict]: - """Read a rules file from path, injecting juju topology. - - Args: - root_path: full path to the root rules folder (used only for generating group name) - file_path: full path to a *.rule file. - - Returns: - A list of dictionaries representing the rules file, if file is valid (the structure is - formed by `yaml.safe_load` of the file); an empty list otherwise. - """ - with file_path.open() as rf: - # Load a list of rules from file then add labels and filters - try: - rule_file = yaml.safe_load(rf) - - except Exception as e: - logger.error("Failed to read alert rules from %s: %s", file_path.name, e) - return [] - - if not rule_file: - logger.warning("Empty rules file: %s", file_path.name) - return [] - if not isinstance(rule_file, dict): - logger.error("Invalid rules file (must be a dict): %s", file_path.name) - return [] - if _is_official_alert_rule_format(rule_file): - alert_groups = rule_file["groups"] - elif _is_single_alert_rule_format(rule_file): - # convert to list of alert groups - # group name is made up from the file name - alert_groups = [{"name": file_path.stem, "rules": [rule_file]}] - else: - # invalid/unsupported - logger.error("Invalid rules file: %s", file_path.name) - return [] - - # update rules with additional metadata - for alert_group in alert_groups: - # update group name with topology and sub-path - alert_group["name"] = self._group_name( - str(root_path), - str(file_path), - alert_group["name"], - ) - - # add "juju_" topology labels - for alert_rule in alert_group["rules"]: - if "labels" not in alert_rule: - alert_rule["labels"] = {} - - if self.topology: - alert_rule["labels"].update(self.topology.label_matcher_dict) - # insert juju topology filters into a prometheus alert rule - alert_rule["expr"] = self.tool.inject_label_matchers( - re.sub(r"%%juju_topology%%,?", "", alert_rule["expr"]), - self.topology.label_matcher_dict, - ) - - return alert_groups - - def _group_name(self, root_path: str, file_path: str, group_name: str) -> str: - """Generate group name from path and topology. - - The group name is made up of the relative path between the root dir_path, the file path, - and topology identifier. - - Args: - root_path: path to the root rules dir. - file_path: path to rule file. - group_name: original group name to keep as part of the new augmented group name - - Returns: - New group name, augmented by juju topology and relative path. - """ - rel_path = os.path.relpath(os.path.dirname(file_path), root_path) - rel_path = "" if rel_path == "." else rel_path.replace(os.path.sep, "_") - - # Generate group name: - # - name, from juju topology - # - suffix, from the relative path of the rule file; - group_name_parts = [self.topology.identifier] if self.topology else [] - group_name_parts.extend([rel_path, group_name, "alerts"]) - # filter to remove empty strings - return "_".join(filter(None, group_name_parts)) - - @classmethod - def _multi_suffix_glob( - cls, dir_path: Path, suffixes: List[str], recursive: bool = True - ) -> list: - """Helper function for getting all files in a directory that have a matching suffix. - - Args: - dir_path: path to the directory to glob from. - suffixes: list of suffixes to include in the glob (items should begin with a period). - recursive: a flag indicating whether a glob is recursive (nested) or not. - - Returns: - List of files in `dir_path` that have one of the suffixes specified in `suffixes`. - """ - all_files_in_dir = dir_path.glob("**/*" if recursive else "*") - return list(filter(lambda f: f.is_file() and f.suffix in suffixes, all_files_in_dir)) - - def _from_dir(self, dir_path: Path, recursive: bool) -> List[dict]: - """Read all rule files in a directory. - - All rules from files for the same directory are loaded into a single - group. The generated name of this group includes juju topology. - By default, only the top directory is scanned; for nested scanning, pass `recursive=True`. - - Args: - dir_path: directory containing *.rule files (alert rules without groups). - recursive: flag indicating whether to scan for rule files recursively. - - Returns: - a list of dictionaries representing prometheus alert rule groups, each dictionary - representing an alert group (structure determined by `yaml.safe_load`). - """ - alert_groups = [] # type: List[dict] - - # Gather all alerts into a list of groups - for file_path in self._multi_suffix_glob( - dir_path, [".rule", ".rules", ".yml", ".yaml"], recursive - ): - alert_groups_from_file = self._from_file(dir_path, file_path) - if alert_groups_from_file: - logger.debug("Reading alert rule from %s", file_path) - alert_groups.extend(alert_groups_from_file) - - return alert_groups - - def add_path(self, path: str, *, recursive: bool = False) -> None: - """Add rules from a dir path. - - All rules from files are aggregated into a data structure representing a single rule file. - All group names are augmented with juju topology. - - Args: - path: either a rules file or a dir of rules files. - recursive: whether to read files recursively or not (no impact if `path` is a file). - - Returns: - True if path was added else False. - """ - path = Path(path) # type: Path - if path.is_dir(): - self.alert_groups.extend(self._from_dir(path, recursive)) - elif path.is_file(): - self.alert_groups.extend(self._from_file(path.parent, path)) - else: - logger.debug("Alert rules path does not exist: %s", path) - - def as_dict(self) -> dict: - """Return standard alert rules file in dict representation. - - Returns: - a dictionary containing a single list of alert rule groups. - The list of alert rule groups is provided as value of the - "groups" dictionary key. - """ - return {"groups": self.alert_groups} if self.alert_groups else {} - - class TargetsChangedEvent(EventBase): """Event emitted when Prometheus scrape targets change.""" @@ -1321,7 +1122,7 @@ def _inject_alert_expr_labels(self, rules: Dict[str, Any]) -> Dict[str, Any]: # Inject topology and put it back in the list rule["expr"] = self._tool.inject_label_matchers( re.sub(r"%%juju_topology%%,?", "", rule["expr"]), - topology.label_matcher_dict, + topology.alert_expression_dict, ) except KeyError: # Some required JujuTopology key is missing. Just move on. @@ -1353,29 +1154,31 @@ def _static_scrape_config(self, relation) -> list: if not relation.units: return [] - scrape_jobs = json.loads(relation.data[relation.app].get("scrape_jobs", "[]")) + scrape_configs = json.loads(relation.data[relation.app].get("scrape_jobs", "[]")) - if not scrape_jobs: + if not scrape_configs: return [] scrape_metadata = json.loads(relation.data[relation.app].get("scrape_metadata", "{}")) if not scrape_metadata: - return scrape_jobs + return scrape_configs topology = JujuTopology.from_dict(scrape_metadata) job_name_prefix = "juju_{}_prometheus_scrape".format(topology.identifier) - scrape_jobs = PrometheusConfig.prefix_job_names(scrape_jobs, job_name_prefix) - scrape_jobs = PrometheusConfig.sanitize_scrape_configs(scrape_jobs) + scrape_configs = PrometheusConfig.prefix_job_names(scrape_configs, job_name_prefix) + scrape_configs = PrometheusConfig.sanitize_scrape_configs(scrape_configs) hosts = self._relation_hosts(relation) - scrape_jobs = PrometheusConfig.expand_wildcard_targets_into_individual_jobs( - scrape_jobs, hosts, topology + scrape_configs = PrometheusConfig.expand_wildcard_targets_into_individual_jobs( + scrape_configs, hosts, topology ) - return scrape_jobs + # For https scrape targets we still do not render a `tls_config` section because certs + # are expected to be made available by the charm via the `update-ca-certificates` mechanism. + return scrape_configs def _relation_hosts(self, relation: Relation) -> Dict[str, Tuple[str, str]]: """Returns a mapping from unit names to (address, path) tuples, for the given relation.""" @@ -1440,7 +1243,7 @@ def _dedupe_job_names(jobs: List[dict]): job["job_name"] = "{}_{}".format(job["job_name"], hashed) new_jobs = [] for key in jobs_dict: - new_jobs.extend([i for i in jobs_dict[key]]) + new_jobs.extend(list(jobs_dict[key])) # Deduplicate jobs which are equal # Again this in O(n^2) but it should be okay @@ -1722,7 +1525,7 @@ def set_scrape_job_spec(self, _=None): if not self._charm.unit.is_leader(): return - alert_rules = AlertRules(topology=self.topology) + alert_rules = AlertRules(query_type="promql", topology=self.topology) alert_rules.add_path(self._alert_rules_path, recursive=True) alert_rules_as_dict = alert_rules.as_dict() @@ -1793,11 +1596,10 @@ def _scrape_jobs(self) -> list: A list of dictionaries, where each dictionary specifies a single scrape job for Prometheus. """ - jobs = self._jobs if self._jobs else [DEFAULT_JOB] + jobs = self._jobs or [] if callable(self._lookaside_jobs): - return jobs + PrometheusConfig.sanitize_scrape_configs(self._lookaside_jobs()) - else: - return jobs + jobs.extend(PrometheusConfig.sanitize_scrape_configs(self._lookaside_jobs())) + return jobs or [DEFAULT_JOB] @property def _scrape_metadata(self) -> dict: @@ -1870,7 +1672,7 @@ def _update_relation_data(self, _): if not self._charm.unit.is_leader(): return - alert_rules = AlertRules() + alert_rules = AlertRules(query_type="promql") alert_rules.add_path(self.dir_path, recursive=self._recursive) alert_rules_as_dict = alert_rules.as_dict() @@ -2078,6 +1880,7 @@ def set_target_job_data(self, targets: dict, app_name: str, **kwargs) -> None: Args: targets: a `dict` containing target information app_name: a `str` identifying the application + kwargs: a `dict` of the extra arguments passed to the function """ if not self._charm.unit.is_leader(): return @@ -2203,6 +2006,7 @@ def _static_scrape_job(self, targets, application_name, **kwargs) -> dict: "port". application_name: a string name of the application for which this static scrape job is being constructed. + kwargs: a `dict` of the extra arguments passed to the function Returns: A dictionary corresponding to a Prometheus static scrape @@ -2248,16 +2052,7 @@ def _static_config_extra_labels(self, target: Dict[str, str]) -> Dict[str, str]: logger.debug("Could not perform DNS lookup for %s", target["hostname"]) dns_name = target["hostname"] extra_info["dns_name"] = dns_name - label_re = re.compile(r'(?P