diff --git a/helm-configs/monitoring/openstack-exporter/openstack-exporter-helm-overrides.yaml b/helm-configs/monitoring/openstack-exporter/openstack-exporter-helm-overrides.yaml new file mode 100644 index 00000000..3ed2e502 --- /dev/null +++ b/helm-configs/monitoring/openstack-exporter/openstack-exporter-helm-overrides.yaml @@ -0,0 +1,230 @@ + +images: + tags: + prometheus_openstack_exporter: docker.io/openstackhelm/prometheus-openstack-exporter:ubuntu_bionic-20240210 + dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 + image_repo_sync: docker.io/library/docker:17.07.0 + ks_user: docker.io/openstackhelm/heat:2023.2-ubuntu_jammy + pull_policy: IfNotPresent + local_registry: + active: false + exclude: + - dep_check + - image_repo_sync + +labels: + openstack_exporter: + node_selector_key: openstack-control-plane + node_selector_value: enabled + job: + node_selector_key: openstack-control-plane + node_selector_value: enabled + +pod: + security_context: + exporter: + pod: + runAsUser: 65534 + container: + openstack_metrics_exporter: + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + ks_user: + pod: + runAsUser: 65534 + container: + prometheus_openstack_exporter_ks_user: + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + affinity: + anti: + type: + default: preferredDuringSchedulingIgnoredDuringExecution + topologyKey: + default: kubernetes.io/hostname + mounts: + prometheus_openstack_exporter: + prometheus_openstack_exporter: + init_container: null + replicas: + prometheus_openstack_exporter: 1 + lifecycle: + upgrades: + deployments: + revision_history: 3 + pod_replacement_strategy: RollingUpdate + rolling_update: + max_unavailable: 1 + max_surge: 3 + termination_grace_period: + prometheus_openstack_exporter: + timeout: 30 + resources: + enabled: false + prometheus_openstack_exporter: + requests: + memory: "64Mi" + cpu: "100m" + limits: + memory: "4096Mi" + jobs: + image_repo_sync: + requests: + memory: "64Mi" + cpu: "100m" + limits: + memory: "4096Mi" + ks_user: + requests: + memory: "64Mi" + cpu: "100m" + limits: + memory: "4096Mi" +dependencies: + dynamic: + common: + local_image_registry: + jobs: + - prometheus-openstack-exporter-image-repo-sync + services: + - endpoint: node + service: local_image_registry + static: + image_repo_sync: + services: + - endpoint: internal + service: local_image_registry + ks_user: + services: + - endpoint: internal + service: identity + prometheus_openstack_exporter: + jobs: + - prometheus-openstack-exporter-ks-user + services: + - endpoint: internal + service: identity + +conf: + prometheus_openstack_exporter: + OS_POLLING_INTERVAL: 30 + TIMEOUT_SECONDS: 20 + OS_RETRIES: 1 + +secrets: + identity: + admin: prometheus-openstack-exporter-keystone-admin + user: prometheus-openstack-exporter-keystone-user + oci_image_registry: + prometheus-openstack-exporter: prometheus-openstack-exporter-oci-image-registry-key + tls: + identity: + api: + # This name should be same as in keystone. Keystone + # secret will be used in these charts + # + public: keystone-tls-public + internal: keystone-tls-public + + +endpoints: + cluster_domain_suffix: cluster.local + local_image_registry: + name: docker-registry + namespace: docker-registry + hosts: + default: localhost + internal: docker-registry + node: localhost + host_fqdn_override: + default: null + port: + registry: + node: 5000 + oci_image_registry: + name: oci-image-registry + namespace: oci-image-registry + auth: + enabled: false + prometheus-openstack-exporter: + username: prometheus-openstack-exporter + password: password + hosts: + default: localhost + host_fqdn_override: + default: null + port: + registry: + default: null + prometheus_openstack_exporter: + namespace: null + hosts: + default: openstack-metrics + host_fqdn_override: + default: null + path: + default: null + scheme: + default: 'http' + port: + exporter: + default: 9103 + identity: + name: keystone + auth: + admin: + region_name: RegionOne + username: admin + password: password + project_name: admin + user_domain_name: default + project_domain_name: default + user: + role: admin + region_name: RegionOne + username: prometheus-openstack-exporter + password: password + project_name: service + user_domain_name: default + project_domain_name: default + hosts: + default: keystone + internal: keystone-api + host_fqdn_override: + default: null + path: + default: /v3 + scheme: + default: 'http' + port: + api: + default: 80 + internal: 5000 + +monitoring: + prometheus: + enabled: true + openstack_exporter: + scrape: true + +network: + openstack_metrics_exporter: + port: 9103 + +network_policy: + prometheus-openstack-exporter: + ingress: + - {} + egress: + - {} + +manifests: + certificates: false + configmap_bin: true + deployment: true + job_image_repo_sync: true + job_ks_user: true + network_policy: false + secret_keystone: true + secret_registry: true + service: true \ No newline at end of file diff --git a/kustomize/prometheus/kustomization.yaml b/kustomize/prometheus/kustomization.yaml index af727b76..4313fd7d 100644 --- a/kustomize/prometheus/kustomization.yaml +++ b/kustomize/prometheus/kustomization.yaml @@ -1,3 +1,6 @@ +resources: + - ns-prometheus.yaml + helmCharts: - name: prometheus repo: https://prometheus-community.github.io/helm-charts diff --git a/kustomize/prometheus/ns-prometheus.yaml b/kustomize/prometheus/ns-prometheus.yaml new file mode 100644 index 00000000..968d863f --- /dev/null +++ b/kustomize/prometheus/ns-prometheus.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + kubernetes.io/metadata.name: prometheus + name: prometheus + name: prometheus diff --git a/kustomize/prometheus/values.yaml b/kustomize/prometheus/values.yaml index 9aa88899..aef175f5 100644 --- a/kustomize/prometheus/values.yaml +++ b/kustomize/prometheus/values.yaml @@ -1154,6 +1154,67 @@ serverFiles: action: replace target_label: node + # Scrape config for service endpoints. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape`: Only scrape services that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + - job_name: 'openstack-exporter' + kubernetes_sd_configs: + - role: endpoints + scrape_interval: 60s + relabel_configs: + - source_labels: + - __meta_kubernetes_service_name + action: keep + regex: "openstack-metrics" + - source_labels: + - __meta_kubernetes_service_annotation_prometheus_io_scrape + action: keep + regex: true + - source_labels: + - __meta_kubernetes_service_annotation_prometheus_io_scheme + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: + - __meta_kubernetes_service_annotation_prometheus_io_path + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: + - __address__ + - __meta_kubernetes_service_annotation_prometheus_io_port + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: + - __meta_kubernetes_namespace + action: replace + target_label: kubernetes_namespace + - source_labels: + - __meta_kubernetes_service_name + action: replace + target_label: instance + - source_labels: + - __meta_kubernetes_service_name + action: replace + target_label: kubernetes_name + - source_labels: + - __meta_kubernetes_service_name + target_label: job + replacement: ${1} + + # adds additional scrape configs to prometheus.yml # must be a string so you have to add a | after extraScrapeConfigs: # example adds prometheus-blackbox-exporter scrape config