From 2e13fb477040274bfee7e6a72addd59d4b05052c Mon Sep 17 00:00:00 2001 From: Luca Bello Date: Fri, 1 Dec 2023 11:18:28 +0100 Subject: [PATCH 1/4] add nginx workload and some config --- metadata.yaml | 16 +- requirements.txt | 2 + src/charm.py | 17 ++ src/nginx.py | 330 +++++++++++++++++++++++++++++++++++++++ tests/unit/test_charm.py | 2 +- 5 files changed, 358 insertions(+), 9 deletions(-) create mode 100644 src/nginx.py diff --git a/metadata.yaml b/metadata.yaml index 424cda4..926d431 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -9,19 +9,19 @@ summary: Mimir coordinator description: | Mimir coordinator. -#containers: -# nginx: -# resource: nginx-image +containers: + nginx: + resource: nginx-image storage: data: type: filesystem -#resources: -# nginx-image: -# type: oci-image -# description: OCI image for nginx -# upstream-source: ubuntu/nginx:1.18-22.04_beta +resources: + nginx-image: + type: oci-image + description: OCI image for nginx + upstream-source: ubuntu/nginx:1.18-22.04_beta # agent-image: # type: oci-image # upstream-source: ghcr.io/canonical/grafana-agent:latest diff --git a/requirements.txt b/requirements.txt index f529938..5b56e23 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,4 @@ ops pydantic +# crossplane is a package from nginxinc to interact with the Nginx config +crossplane diff --git a/src/charm.py b/src/charm.py index 94588d5..70adf2b 100755 --- a/src/charm.py +++ b/src/charm.py @@ -19,6 +19,7 @@ PrometheusRemoteWriteConsumer, ) from mimir_coordinator import MimirCoordinator +from nginx import Nginx from ops.charm import CharmBase, CollectStatusEvent from ops.main import main from ops.model import ActiveStatus, BlockedStatus, Relation @@ -32,6 +33,10 @@ class MimirCoordinatorK8SOperatorCharm(CharmBase): def __init__(self, *args): super().__init__(*args) + + self._nginx_container = self.unit.get_container("nginx") + self._nginx_config_path = "/etc/nginx/nginx.conf" + self.framework.observe(self.on.config_changed, self._on_config_changed) self.framework.observe(self.on.collect_unit_status, self._on_collect_status) @@ -42,6 +47,12 @@ def __init__(self, *args): self.cluster_provider = MimirClusterProvider(self) self.coordinator = MimirCoordinator(cluster_provider=self.cluster_provider) + self.nginx = Nginx() + self.framework.observe( + self.on.nginx_pebble_ready, # pyright: ignore + self._on_nginx_pebble_ready, + ) + self.framework.observe( self.on.mimir_cluster_relation_changed, # pyright: ignore self._on_mimir_cluster_changed, @@ -123,6 +134,12 @@ def _on_loki_relation_changed(self, _): # TODO Update rules relation with the new list of Loki push-api endpoints pass + def _on_nginx_pebble_ready(self, _event) -> None: + self._nginx_container.push(self.nginx.config_path, self.nginx.config, make_dirs=True) + + self._nginx_container.add_layer("nginx", self.nginx.layer, combine=True) + self._nginx_container.autostart() + if __name__ == "__main__": # pragma: nocover main(MimirCoordinatorK8SOperatorCharm) diff --git a/src/nginx.py b/src/nginx.py new file mode 100644 index 0000000..6fd2041 --- /dev/null +++ b/src/nginx.py @@ -0,0 +1,330 @@ +# Copyright 2023 Canonical +# See LICENSE file for licensing details. +"""Nginx workload.""" + +import logging + +import crossplane +from ops.pebble import Layer + +logger = logging.getLogger(__name__) + + +class Nginx: + """Helper class to manage the nginx workload.""" + + config_path = "/etc/nginx/nginx.conf" + + def __init__(self, *args): + super().__init__(*args) + + @property + def config(self) -> str: + """Build and return the Nginx configuration.""" + log_level = "error" + auth_enabled = False + addresses = { + "FIXME": "unit.app-endpoints.model.svc.cluster.local", + "distributor": "worker.worker-endpoints.cos.svc.cluster.local", + "alertmanager": "worker.worker-endpoints.cos.svc.cluster.local", + "ruler": "worker.worker-endpoints.cos.svc.cluster.local", + "query_frontend": "worker.worker-endpoints.cos.svc.cluster.local", + "compactor": "worker.worker-endpoints.cos.svc.cluster.local", + } # FIXME example, get it from somewhere + + def log_verbose(verbose): + if verbose: + return [{"directive": "access_log", "args": ["/dev/stderr", "main"]}] + return [ + { + "directive": "map", + "args": ["$status", "$loggable"], + "block": [ + {"directive": "~^[23]", "args": ["0"]}, + {"directive": "default", "args": ["1"]}, + ], + }, + {"directive": "access_log", "args": ["/dev/stderr"]}, + ] + + def resolver(custom_resolver): + if custom_resolver: + return {"directive": "resolver", "args": [custom_resolver]} + return {} # return the CoreDNS cluster local address + + def basic_auth(enabled): + if enabled: + return [ + {"directive": "auth_basic", "args": ['"Mimir"']}, + { + "directive": "auth_basic_user_file", + "args": ["/etc/nginx/secrets/.htpasswd"], + }, + ] + return [] + + # build the complete configuration + full_config = [ + {"directive": "worker_processes", "args": ["5"]}, + {"directive": "error_log", "args": ["/dev/stderr", log_level]}, + {"directive": "pid", "args": ["/tmp/nginx.pid"]}, + {"directive": "worker_rlimit_nofile", "args": ["8192"]}, + { + "directive": "events", + "args": [], + "block": [{"directive": "worker_connections", "args": ["4096"]}], + }, + { + "directive": "http", + "args": [], + "block": [ + # temp paths + {"directive": "client_body_temp_path", "args": ["/tmp/client_temp"]}, + {"directive": "proxy_temp_path", "args": ["/tmp/proxy_temp_path"]}, + {"directive": "fastcgi_temp_path", "args": ["/tmp/fastcgi_temp"]}, + {"directive": "uwsgi_temp_path", "args": ["/tmp/uwsgi_temp"]}, + {"directive": "scgi_temp_path", "args": ["/tmp/scgi_temp"]}, + # logging + {"directive": "default_type", "args": ["application/octet-stream"]}, + { + "directive": "log_format", + "args": [ + 'main \'$remote_addr - $remote_user [$time_local] $status "$request" $body_bytes_sent "$http_referer" "$http_user_agent" "$http_x_forwarded_for";' + ], + }, + *log_verbose(verbose=False), + # mimir-related + {"directive": "sendfile", "args": ["on"]}, + {"directive": "tcp_nopush", "args": ["on"]}, + resolver(custom_resolver=None), # empty for now, check if it's necessary + # TODO: add custom http block for the user to config? + { + "directive": "map", + "args": ["$http_x_scope_orgid", "$ensured_x_scope_orgid"], + "block": [ + {"directive": "default", "args": ["$http_x_scope_orgid"]}, + {"directive": "", "args": ["FIXMEnoAuthTenant?"]}, # FIXME + ], + }, + {"directive": "proxy_read_timeout", "args": ["300"]}, + # server block + { + "directive": "server", + "args": [], + "block": [ + {"directive": "listen", "args": ["8080"]}, + {"directive": "listen", "args": ["[::]:8080"]}, + *basic_auth(auth_enabled), + { + "directive": "location", + "args": ["=", "/"], + "block": [ + {"directive": "return", "args": ["200", "'OK'"]}, + {"directive": "auth_basic", "args": ["off"]}, + ], + }, + { + "directive": "proxy_set_header", + "args": ["X-Scope-OrgID", "$ensured_x_scope_orgid"], + }, + # Distributor endpoints + { + "directive": "location", + "args": ["/distributor"], + "block": [ + { + "directive": "set", + "args": ["$distributor", addresses["distributor"]], + }, + { + "directive": "proxy_pass", + "args": ["http://$distributor:8080$request_uri"], + }, + ], + }, + { + "directive": "location", + "args": ["/api/v1/push"], + "block": [ + { + "directive": "set", + "args": ["$distributor", addresses["distributor"]], + }, + { + "directive": "proxy_pass", + "args": ["http://$distributor:8080$request_uri"], + }, + ], + }, + { + "directive": "location", + "args": ["/otlp/v1/metrics"], + "block": [ + { + "directive": "set", + "args": ["$distributor", addresses["distributor"]], + }, + { + "directive": "proxy_pass", + "args": ["http://$distributor:8080$request_uri"], + }, + ], + }, + # Alertmanager endpoints + { + "directive": "location", + "args": ["/alertmanager"], + "block": [ + { + "directive": "set", + "args": ["$alertmanager", addresses["alertmanager"]], + }, + { + "directive": "proxy_pass", + "args": ["http://$alertmanager:8080$request_uri"], + }, + ], + }, + { + "directive": "location", + "args": ["/multitenant_alertmanager/status"], + "block": [ + { + "directive": "set", + "args": ["$alertmanager", addresses["alertmanager"]], + }, + { + "directive": "proxy_pass", + "args": ["http://$alertmanager:8080$request_uri"], + }, + ], + }, + { + "directive": "location", + "args": ["/api/v1/alerts"], + "block": [ + { + "directive": "set", + "args": ["$alertmanager", addresses["alertmanager"]], + }, + { + "directive": "proxy_pass", + "args": ["http://$alertmanager:8080$request_uri"], + }, + ], + }, + # Ruler endpoints + { + "directive": "location", + "args": ["/prometheus/config/v1/rules"], + "block": [ + {"directive": "set", "args": ["$ruler", addresses["rules"]]}, + { + "directive": "proxy_pass", + "args": ["http://$ruler:8080$request_uri"], + }, + ], + }, + { + "directive": "location", + "args": ["/prometheus/api/v1/rules"], + "block": [ + {"directive": "set", "args": ["$ruler", addresses["ruler"]]}, + { + "directive": "proxy_pass", + "args": ["http://$ruler:8080$request_uri"], + }, + ], + }, + { + "directive": "location", + "args": ["/prometheus/api/v1/alerts"], + "block": [ + {"directive": "set", "args": ["$ruler", addresses["ruler"]]}, + { + "directive": "proxy_pass", + "args": ["http://$ruler:8080$request_uri"], + }, + ], + }, + { + "directive": "location", + "args": ["=", "/ruler/ring"], + "block": [ + {"directive": "set", "args": ["$ruler", addresses["ruler"]]}, + { + "directive": "proxy_pass", + "args": ["http://$ruler:8080$request_uri"], + }, + ], + }, + # Query frontent + { + "directive": "location", + "args": ["/prometheus"], + "block": [ + { + "directive": "set", + "args": ["$query_frontend", addresses["query_frontend"]], + }, + { + "directive": "proxy_pass", + "args": ["http://$query_frontend:8080$request_uri"], + }, + ], + }, + # Buildinfo endpoint can go to any component + { + "directive": "location", + "args": ["=", "/api/v1/status/buildinfo"], + "block": [ + { + "directive": "set", + "args": ["$query_frontend", addresses["query_frontend"]], + }, + { + "directive": "proxy_pass", + "args": ["http://$query_frontend:8080$request_uri"], + }, + ], + }, + # Compactor endpoint for uploading blocks + { + "directive": "location", + "args": ["=", "/api/v1/upload/block/"], + "block": [ + { + "directive": "set", + "args": ["$compactor", addresses["compactor"]], + }, + { + "directive": "proxy_pass", + "args": ["http://$compactor:8080$request_uri"], + }, + ], + }, + ], + }, + ], + }, + ] + + return crossplane.build(full_config) + + @property + def layer(self) -> Layer: + """Return the Pebble layer for Nginx.""" + return Layer( + { + "summary": "nginx layer", + "description": "pebble config layer for Nginx", + "services": { + "nginx": { + "override": "replace", + "summary": "nginx", + "command": "nginx", + "startup": "enabled", + } + }, + } + ) diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index a8d2d22..d8e8819 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -6,7 +6,7 @@ import unittest from charm import MimirCoordinatorK8SOperatorCharm -from ops.model import ActiveStatus, BlockedStatus +from ops.model import BlockedStatus from ops.testing import Harness From 7cd2661f5b4d4b974c84061ff3d04d01bb7a2357 Mon Sep 17 00:00:00 2001 From: Luca Bello Date: Fri, 1 Dec 2023 11:24:25 +0100 Subject: [PATCH 2/4] fix typo --- src/nginx.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nginx.py b/src/nginx.py index 6fd2041..27e92bd 100644 --- a/src/nginx.py +++ b/src/nginx.py @@ -258,7 +258,7 @@ def basic_auth(enabled): }, ], }, - # Query frontent + # Query frontend { "directive": "location", "args": ["/prometheus"], From 11fda9673ca8646a79b88bfd4223cf13f7c2903c Mon Sep 17 00:00:00 2001 From: Luca Bello Date: Wed, 6 Dec 2023 15:16:50 +0100 Subject: [PATCH 3/4] fix nginx config and add load balancing --- .../mimir_coordinator_k8s/v0/mimir_cluster.py | 24 +++- src/charm.py | 2 +- src/nginx.py | 107 +++++++----------- 3 files changed, 61 insertions(+), 72 deletions(-) diff --git a/lib/charms/mimir_coordinator_k8s/v0/mimir_cluster.py b/lib/charms/mimir_coordinator_k8s/v0/mimir_cluster.py index 3a8ebfe..934ccca 100644 --- a/lib/charms/mimir_coordinator_k8s/v0/mimir_cluster.py +++ b/lib/charms/mimir_coordinator_k8s/v0/mimir_cluster.py @@ -8,6 +8,7 @@ """ import json import logging +from collections import defaultdict from enum import Enum from typing import Any, Dict, MutableMapping, Set, List, Iterable from typing import Optional @@ -23,7 +24,7 @@ DEFAULT_ENDPOINT_NAME = "mimir-cluster" LIBAPI = 0 -LIBPATCH = 1 +LIBPATCH = 2 BUILTIN_JUJU_KEYS = {"ingress-address", "private-address", "egress-subnets"} @@ -242,15 +243,18 @@ def gather_roles(self) -> Dict[MimirRole, int]: data[role] += role_n return data - def gather_addresses(self) -> Set[str]: - """Go through the worker's unit databags to collect all the addresses published by the units.""" - data = set() + def gather_addresses_by_role(self) -> Dict[str, Set[str]]: + """Go through the worker's unit databags to collect all the addresses published by the units, by role.""" + data = defaultdict(set) for relation in self._relations: + worker_app_data = MimirClusterRequirerAppData.load(relation.data[relation.app]) + worker_roles = set(worker_app_data.roles) for worker_unit in relation.units: try: worker_data = MimirClusterRequirerUnitData.load(relation.data[worker_unit]) unit_address = worker_data.address - data.add(unit_address) + for role in worker_roles: + data[role].add(unit_address) except DataValidationError as e: log.error(f"invalid databag contents: {e}") continue @@ -258,6 +262,16 @@ def gather_addresses(self) -> Set[str]: return data + def gather_addresses(self) -> Set[str]: + """Go through the worker's unit databags to collect all the addresses published by the units.""" + data = set() + addresses_by_role = self.gather_addresses_by_role() + for role, address_set in addresses_by_role.items(): + data.update(address_set) + + return data + + class MimirClusterRemovedEvent(ops.EventBase): """Event emitted when the relation with the "mimir-cluster" provider has been severed. diff --git a/src/charm.py b/src/charm.py index 70adf2b..0f0ec92 100755 --- a/src/charm.py +++ b/src/charm.py @@ -47,7 +47,7 @@ def __init__(self, *args): self.cluster_provider = MimirClusterProvider(self) self.coordinator = MimirCoordinator(cluster_provider=self.cluster_provider) - self.nginx = Nginx() + self.nginx = Nginx(cluster_provider=self.cluster_provider) self.framework.observe( self.on.nginx_pebble_ready, # pyright: ignore self._on_nginx_pebble_ready, diff --git a/src/nginx.py b/src/nginx.py index 27e92bd..f9211e5 100644 --- a/src/nginx.py +++ b/src/nginx.py @@ -3,8 +3,10 @@ """Nginx workload.""" import logging +from typing import Dict, List, Set import crossplane +from charms.mimir_coordinator_k8s.v0.mimir_cluster import MimirClusterProvider from ops.pebble import Layer logger = logging.getLogger(__name__) @@ -15,22 +17,32 @@ class Nginx: config_path = "/etc/nginx/nginx.conf" - def __init__(self, *args): + def __init__(self, cluster_provider: MimirClusterProvider, *args): super().__init__(*args) + self.cluster_provider = cluster_provider @property def config(self) -> str: """Build and return the Nginx configuration.""" log_level = "error" auth_enabled = False - addresses = { - "FIXME": "unit.app-endpoints.model.svc.cluster.local", - "distributor": "worker.worker-endpoints.cos.svc.cluster.local", - "alertmanager": "worker.worker-endpoints.cos.svc.cluster.local", - "ruler": "worker.worker-endpoints.cos.svc.cluster.local", - "query_frontend": "worker.worker-endpoints.cos.svc.cluster.local", - "compactor": "worker.worker-endpoints.cos.svc.cluster.local", - } # FIXME example, get it from somewhere + addresses_by_role = self.cluster_provider.gather_addresses_by_role() + + def upstreams(addresses_by_role: Dict[str, Set[str]]) -> List[Dict]: + nginx_upstreams = [] + for role, address_set in addresses_by_role.items(): + nginx_upstreams.append( + { + "directive": "upstream", + "args": [role], + "block": [ + {"directive": "server", "args": [f"{addr}:8080"]} + for addr in address_set + ], + } + ) + + return nginx_upstreams def log_verbose(verbose): if verbose: @@ -49,8 +61,8 @@ def log_verbose(verbose): def resolver(custom_resolver): if custom_resolver: - return {"directive": "resolver", "args": [custom_resolver]} - return {} # return the CoreDNS cluster local address + return [{"directive": "resolver", "args": [custom_resolver]}] + return [{"directive": "resolver", "args": ["kube-dns.kube-system.svc.cluster.local."]}] def basic_auth(enabled): if enabled: @@ -78,6 +90,8 @@ def basic_auth(enabled): "directive": "http", "args": [], "block": [ + # upstreams (load balancing) + *upstreams(addresses_by_role), # temp paths {"directive": "client_body_temp_path", "args": ["/tmp/client_temp"]}, {"directive": "proxy_temp_path", "args": ["/tmp/proxy_temp_path"]}, @@ -89,14 +103,15 @@ def basic_auth(enabled): { "directive": "log_format", "args": [ - 'main \'$remote_addr - $remote_user [$time_local] $status "$request" $body_bytes_sent "$http_referer" "$http_user_agent" "$http_x_forwarded_for";' + "main", + '$remote_addr - $remote_user [$time_local] $status "$request" $body_bytes_sent "$http_referer" "$http_user_agent" "$http_x_forwarded_for"', ], }, *log_verbose(verbose=False), # mimir-related {"directive": "sendfile", "args": ["on"]}, {"directive": "tcp_nopush", "args": ["on"]}, - resolver(custom_resolver=None), # empty for now, check if it's necessary + *resolver(custom_resolver=None), # TODO: add custom http block for the user to config? { "directive": "map", @@ -132,13 +147,9 @@ def basic_auth(enabled): "directive": "location", "args": ["/distributor"], "block": [ - { - "directive": "set", - "args": ["$distributor", addresses["distributor"]], - }, { "directive": "proxy_pass", - "args": ["http://$distributor:8080$request_uri"], + "args": ["http://distributor"], }, ], }, @@ -146,13 +157,9 @@ def basic_auth(enabled): "directive": "location", "args": ["/api/v1/push"], "block": [ - { - "directive": "set", - "args": ["$distributor", addresses["distributor"]], - }, { "directive": "proxy_pass", - "args": ["http://$distributor:8080$request_uri"], + "args": ["http://distributor"], }, ], }, @@ -160,13 +167,9 @@ def basic_auth(enabled): "directive": "location", "args": ["/otlp/v1/metrics"], "block": [ - { - "directive": "set", - "args": ["$distributor", addresses["distributor"]], - }, { "directive": "proxy_pass", - "args": ["http://$distributor:8080$request_uri"], + "args": ["http://distributor"], }, ], }, @@ -175,13 +178,9 @@ def basic_auth(enabled): "directive": "location", "args": ["/alertmanager"], "block": [ - { - "directive": "set", - "args": ["$alertmanager", addresses["alertmanager"]], - }, { "directive": "proxy_pass", - "args": ["http://$alertmanager:8080$request_uri"], + "args": ["http://alertmanager"], }, ], }, @@ -189,13 +188,9 @@ def basic_auth(enabled): "directive": "location", "args": ["/multitenant_alertmanager/status"], "block": [ - { - "directive": "set", - "args": ["$alertmanager", addresses["alertmanager"]], - }, { "directive": "proxy_pass", - "args": ["http://$alertmanager:8080$request_uri"], + "args": ["http://alertmanager"], }, ], }, @@ -203,13 +198,9 @@ def basic_auth(enabled): "directive": "location", "args": ["/api/v1/alerts"], "block": [ - { - "directive": "set", - "args": ["$alertmanager", addresses["alertmanager"]], - }, { "directive": "proxy_pass", - "args": ["http://$alertmanager:8080$request_uri"], + "args": ["http://alertmanager"], }, ], }, @@ -218,10 +209,9 @@ def basic_auth(enabled): "directive": "location", "args": ["/prometheus/config/v1/rules"], "block": [ - {"directive": "set", "args": ["$ruler", addresses["rules"]]}, { "directive": "proxy_pass", - "args": ["http://$ruler:8080$request_uri"], + "args": ["http://ruler"], }, ], }, @@ -229,10 +219,9 @@ def basic_auth(enabled): "directive": "location", "args": ["/prometheus/api/v1/rules"], "block": [ - {"directive": "set", "args": ["$ruler", addresses["ruler"]]}, { "directive": "proxy_pass", - "args": ["http://$ruler:8080$request_uri"], + "args": ["http://ruler"], }, ], }, @@ -240,10 +229,9 @@ def basic_auth(enabled): "directive": "location", "args": ["/prometheus/api/v1/alerts"], "block": [ - {"directive": "set", "args": ["$ruler", addresses["ruler"]]}, { "directive": "proxy_pass", - "args": ["http://$ruler:8080$request_uri"], + "args": ["http://ruler"], }, ], }, @@ -251,10 +239,9 @@ def basic_auth(enabled): "directive": "location", "args": ["=", "/ruler/ring"], "block": [ - {"directive": "set", "args": ["$ruler", addresses["ruler"]]}, { "directive": "proxy_pass", - "args": ["http://$ruler:8080$request_uri"], + "args": ["http://ruler"], }, ], }, @@ -263,13 +250,9 @@ def basic_auth(enabled): "directive": "location", "args": ["/prometheus"], "block": [ - { - "directive": "set", - "args": ["$query_frontend", addresses["query_frontend"]], - }, { "directive": "proxy_pass", - "args": ["http://$query_frontend:8080$request_uri"], + "args": ["http://query-frontend"], }, ], }, @@ -278,13 +261,9 @@ def basic_auth(enabled): "directive": "location", "args": ["=", "/api/v1/status/buildinfo"], "block": [ - { - "directive": "set", - "args": ["$query_frontend", addresses["query_frontend"]], - }, { "directive": "proxy_pass", - "args": ["http://$query_frontend:8080$request_uri"], + "args": ["http://query-frontend"], }, ], }, @@ -293,13 +272,9 @@ def basic_auth(enabled): "directive": "location", "args": ["=", "/api/v1/upload/block/"], "block": [ - { - "directive": "set", - "args": ["$compactor", addresses["compactor"]], - }, { "directive": "proxy_pass", - "args": ["http://$compactor:8080$request_uri"], + "args": ["http://compactor"], }, ], }, From 94599a566de6fa997f7e099a482b230fc918ad70 Mon Sep 17 00:00:00 2001 From: Luca Bello Date: Thu, 7 Dec 2023 09:53:29 +0100 Subject: [PATCH 4/4] remove unused variable --- src/charm.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/charm.py b/src/charm.py index 0f0ec92..ee5fdb3 100755 --- a/src/charm.py +++ b/src/charm.py @@ -35,7 +35,6 @@ def __init__(self, *args): super().__init__(*args) self._nginx_container = self.unit.get_container("nginx") - self._nginx_config_path = "/etc/nginx/nginx.conf" self.framework.observe(self.on.config_changed, self._on_config_changed) self.framework.observe(self.on.collect_unit_status, self._on_collect_status)