From ef45c032b24708f2586d73503133d241133cbf26 Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 19 Aug 2024 12:06:40 -0700 Subject: [PATCH 001/193] Nublado 7 / TMPDIR on /tmp (==tmpfs) --- applications/nublado/Chart.yaml | 2 +- applications/nublado/values-idfdemo.yaml | 1 + applications/nublado/values-idfdev.yaml | 1 + applications/nublado/values-idfint.yaml | 1 + applications/nublado/values-idfprod.yaml | 1 + 5 files changed, 5 insertions(+), 1 deletion(-) diff --git a/applications/nublado/Chart.yaml b/applications/nublado/Chart.yaml index 554c445ffd..1b0e3dad86 100644 --- a/applications/nublado/Chart.yaml +++ b/applications/nublado/Chart.yaml @@ -5,7 +5,7 @@ description: JupyterHub and custom spawner for the Rubin Science Platform sources: - https://github.com/lsst-sqre/nublado home: https://nublado.lsst.io/ -appVersion: 6.3.0 +appVersion: 7.0.0 dependencies: - name: jupyterhub diff --git a/applications/nublado/values-idfdemo.yaml b/applications/nublado/values-idfdemo.yaml index c36c3b5df1..49ea663521 100644 --- a/applications/nublado/values-idfdemo.yaml +++ b/applications/nublado/values-idfdemo.yaml @@ -23,6 +23,7 @@ controller: DAF_BUTLER_REPOSITORY_INDEX: "https://demo.lsst.cloud/api/butler/configs/idf-repositories.yaml" GOOGLE_APPLICATION_CREDENTIALS: "/opt/lsst/software/jupyterlab/secrets/butler-gcs-idf-creds.json" S3_ENDPOINT_URL: "https://storage.googleapis.com" + TMPDIR: "/tmp" initContainers: - name: "inithome" image: diff --git a/applications/nublado/values-idfdev.yaml b/applications/nublado/values-idfdev.yaml index 6432ec3fa2..94760a4c94 100644 --- a/applications/nublado/values-idfdev.yaml +++ b/applications/nublado/values-idfdev.yaml @@ -23,6 +23,7 @@ controller: DAF_BUTLER_REPOSITORY_INDEX: "https://data-dev.lsst.cloud/api/butler/configs/idf-repositories.yaml" GOOGLE_APPLICATION_CREDENTIALS: "/opt/lsst/software/jupyterlab/secrets/butler-gcs-idf-creds.json" S3_ENDPOINT_URL: "https://storage.googleapis.com" + TMPDIR: "/tmp" initContainers: - name: "inithome" image: diff --git a/applications/nublado/values-idfint.yaml b/applications/nublado/values-idfint.yaml index 0ce19538b5..74deab6857 100644 --- a/applications/nublado/values-idfint.yaml +++ b/applications/nublado/values-idfint.yaml @@ -33,6 +33,7 @@ controller: PANDAMON_URL: "https://usdf-panda-bigmon.slac.stanford.edu:8443/" PANDA_CONFIG_ROOT: "~" CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" # 5 days + TMPDIR: "/tmp" initContainers: - name: "inithome" image: diff --git a/applications/nublado/values-idfprod.yaml b/applications/nublado/values-idfprod.yaml index 2f64319a2f..85d9196fef 100644 --- a/applications/nublado/values-idfprod.yaml +++ b/applications/nublado/values-idfprod.yaml @@ -18,6 +18,7 @@ controller: DAF_BUTLER_REPOSITORY_INDEX: "https://data.lsst.cloud/api/butler/configs/idf-repositories.yaml" S3_ENDPOINT_URL: "https://storage.googleapis.com" CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" # 5 days + TMPDIR: "/tmp" initContainers: - name: "inithome" image: From 218a12f1f9a12e72504d9df22ebcff6acb1ddefb Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 26 Aug 2024 19:12:04 +0000 Subject: [PATCH 002/193] chore(deps): update helm release strimzi-kafka-operator to v0.43.0 --- applications/strimzi/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/strimzi/Chart.yaml b/applications/strimzi/Chart.yaml index 342761cc8b..fc8ddc5460 100644 --- a/applications/strimzi/Chart.yaml +++ b/applications/strimzi/Chart.yaml @@ -7,5 +7,5 @@ home: https://strimzi.io appVersion: "0.39.0" dependencies: - name: strimzi-kafka-operator - version: "0.42.0" + version: "0.43.0" repository: https://strimzi.io/charts/ From 0350baa551eedad75aa6aac2b9556f2eeb30bcbf Mon Sep 17 00:00:00 2001 From: Dan Fuchs Date: Mon, 26 Aug 2024 13:56:50 -0500 Subject: [PATCH 003/193] DM-44635: `appmetrics` Kafka/Sasquatch user MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Password is in 1pass: RSP data-dev.lsst.cloud/sasquatch/appmetrics-password Test: ``` ❯ echo "blah" | kcat -b sasquatch-dev-kafka-bootstrap.lsst.cloud:9094 -X security.protocol=SASL_SSL -X sasl.mechanism=SCRAM-SHA-512 -X sasl.username=appmetrics -X sasl.password=$KAFKA_PASSWORD -P -t lsst.square.metrics.dfuchs-test ❯ kcat -b sasquatch-dev-kafka-bootstrap.lsst.cloud:9094 -X security.protocol=SASL_SSL -X sasl.mechanism=SCRAM-SHA-512 -X sasl.username=appmetrics -X sasl.password=$KAFKA_PASSWORD -P -t lsst.square.metrics.dfuchs-test -C -o 0blah ``` --- .../charts/strimzi-kafka/templates/users.yaml | 38 +++++++++++++++++++ applications/sasquatch/secrets.yaml | 4 ++ applications/sasquatch/values-idfdev.yaml | 8 ++++ 3 files changed, 50 insertions(+) diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml index 5b30f2a6a3..75b9433255 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml @@ -300,3 +300,41 @@ spec: host: "*" operation: All {{- end }} +{{- if .Values.users.appmetrics.enabled }} +--- +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaUser +metadata: + name: appmetrics + labels: + strimzi.io/cluster: {{ .Values.cluster.name }} +spec: + authentication: + type: scram-sha-512 + password: + valueFrom: + secretKeyRef: + name: sasquatch + key: appmetrics-password + authorization: + type: simple + acls: + - resource: + type: group + name: "*" + patternType: literal + operation: All + - resource: + type: topic + name: "lsst.square.metrics" + patternType: prefix + type: allow + host: "*" + operation: All + - resource: + type: cluster + operations: + - Describe + - DescribeConfigs + # TODO: Any quotas needed? +{{- end }} diff --git a/applications/sasquatch/secrets.yaml b/applications/sasquatch/secrets.yaml index 2a19674f17..8634cb3df1 100644 --- a/applications/sasquatch/secrets.yaml +++ b/applications/sasquatch/secrets.yaml @@ -69,6 +69,10 @@ ts-salkafka-password: description: >- ts-salkafka KafkaUser password. if: strimzi-kafka.users.tsSalKafka.enabled +appmetrics-password: + description: >- + appmetrics KafkaUser password. + if: strimzi-kafka.users.appmetrics.enabled connect-push-secret: description: >- Write token for pushing generated Strimzi Kafka Connect image to GitHub Container Registry. diff --git a/applications/sasquatch/values-idfdev.yaml b/applications/sasquatch/values-idfdev.yaml index 6519b85afb..4db585f4d5 100644 --- a/applications/sasquatch/values-idfdev.yaml +++ b/applications/sasquatch/values-idfdev.yaml @@ -32,6 +32,8 @@ strimzi-kafka: enabled: true kafkaConnectManager: enabled: true + appmetrics: + enabled: true kraft: enabled: true kafkaController: @@ -73,6 +75,12 @@ telegraf-kafka-consumer: replicaCount: 1 topicRegexps: | [ "lsst.Test.*" ] + appmetrics: + enabled: true + database: "metrics" + replicaCount: 1 + topicRegexps: | + [ "lsst.square.metrics.*" ] kafdrop: cmdArgs: "--message.format=AVRO --topic.deleteEnabled=true --topic.createEnabled=true" From f991ca276298e1ad2fb7210fd35f9cd657f7b710 Mon Sep 17 00:00:00 2001 From: Stelios Voutsinas Date: Mon, 26 Aug 2024 12:05:26 -0700 Subject: [PATCH 004/193] Enable cadcBaseUuid in gafaelfawr for roe environment --- applications/gafaelfawr/values-roe.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/applications/gafaelfawr/values-roe.yaml b/applications/gafaelfawr/values-roe.yaml index f53b9e0ead..f3914a1d96 100644 --- a/applications/gafaelfawr/values-roe.yaml +++ b/applications/gafaelfawr/values-roe.yaml @@ -8,6 +8,9 @@ config: github: clientId: "10172b4db1b67ee31620" + # Support generating user metadata for CADC authentication code. + cadcBaseUuid: "4cb5f948-aad9-466c-837b-5eae565b0a77" + # Allow access by GitHub team. groupMapping: "exec:admin": From c4cb5f4587650f6ed0ad1dca4071b44f9f7f00a8 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 27 Aug 2024 11:56:38 -0700 Subject: [PATCH 005/193] Add default flag for appmetrics kafka user --- applications/sasquatch/README.md | 1 + applications/sasquatch/charts/strimzi-kafka/README.md | 1 + applications/sasquatch/charts/strimzi-kafka/values.yaml | 5 +++++ 3 files changed, 7 insertions(+) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index dc8b8488d3..b8a250eace 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -387,6 +387,7 @@ Rubin Observatory's telemetry service | strimzi-kafka.registry.resources | object | See `values.yaml` | Kubernetes requests and limits for the Schema Registry | | strimzi-kafka.registry.schemaTopic | string | `"registry-schemas"` | Name of the topic used by the Schema Registry | | strimzi-kafka.superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. | +| strimzi-kafka.users.appmetrics.enabled | bool | `false` | Enable user appmetrics | | strimzi-kafka.users.camera.enabled | bool | `false` | Enable user camera, used at the camera environments | | strimzi-kafka.users.consdb.enabled | bool | `false` | Enable user consdb | | strimzi-kafka.users.kafdrop.enabled | bool | `false` | Enable user Kafdrop (deployed by parent Sasquatch chart). | diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index ce4efaea25..4e844c02a3 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -65,6 +65,7 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | registry.resources | object | See `values.yaml` | Kubernetes requests and limits for the Schema Registry | | registry.schemaTopic | string | `"registry-schemas"` | Name of the topic used by the Schema Registry | | superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. | +| users.appmetrics.enabled | bool | `false` | Enable user appmetrics | | users.camera.enabled | bool | `false` | Enable user camera, used at the camera environments | | users.consdb.enabled | bool | `false` | Enable user consdb | | users.kafdrop.enabled | bool | `false` | Enable user Kafdrop (deployed by parent Sasquatch chart). | diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index 2ae8501f6f..f43fd60e4c 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -285,6 +285,11 @@ users: # -- Enable user consdb enabled: false + appmetrics: + # -- Enable user appmetrics + enabled: false + + mirrormaker2: # -- Enable replication in the target (passive) cluster enabled: false From 136d663d256e04907e9d8b53b2a668ec8cc4ef1c Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 27 Aug 2024 11:06:18 -0700 Subject: [PATCH 006/193] Upgrade Kafka to version 3.8.0 --- applications/sasquatch/README.md | 2 +- applications/sasquatch/charts/strimzi-kafka/README.md | 2 +- applications/sasquatch/charts/strimzi-kafka/values.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index b8a250eace..3e53b27e34 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -363,7 +363,7 @@ Rubin Observatory's telemetry service | strimzi-kafka.kafka.storage.size | string | `"500Gi"` | Size of the backing storage disk for each of the Kafka brokers | | strimzi-kafka.kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes | | strimzi-kafka.kafka.tolerations | list | `[]` | Tolerations for Kafka broker pod assignment | -| strimzi-kafka.kafka.version | string | `"3.7.1"` | Version of Kafka to deploy | +| strimzi-kafka.kafka.version | string | `"3.8.0"` | Version of Kafka to deploy | | strimzi-kafka.kafkaController.enabled | bool | `false` | Enable Kafka Controller | | strimzi-kafka.kafkaController.resources | object | See `values.yaml` | Kubernetes requests and limits for the Kafka Controller | | strimzi-kafka.kafkaController.storage.size | string | `"20Gi"` | Size of the backing storage disk for each of the Kafka controllers | diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index 4e844c02a3..fd425d5279 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -41,7 +41,7 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | kafka.storage.size | string | `"500Gi"` | Size of the backing storage disk for each of the Kafka brokers | | kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes | | kafka.tolerations | list | `[]` | Tolerations for Kafka broker pod assignment | -| kafka.version | string | `"3.7.1"` | Version of Kafka to deploy | +| kafka.version | string | `"3.8.0"` | Version of Kafka to deploy | | kafkaController.enabled | bool | `false` | Enable Kafka Controller | | kafkaController.resources | object | See `values.yaml` | Kubernetes requests and limits for the Kafka Controller | | kafkaController.storage.size | string | `"20Gi"` | Size of the backing storage disk for each of the Kafka controllers | diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index f43fd60e4c..fa0deaa57b 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -11,7 +11,7 @@ cluster: kafka: # -- Version of Kafka to deploy - version: "3.7.1" + version: "3.8.0" # -- Number of Kafka broker replicas to run replicas: 3 From d86df1c94ef7a1c2d03fac663b86d4a85b030d07 Mon Sep 17 00:00:00 2001 From: "David H. Irving" Date: Tue, 27 Aug 2024 14:07:13 -0700 Subject: [PATCH 007/193] Increase Butler memory limit Butler server's memory limit was set too low previously -- it could easily result in an out-of-memory kill with just a couple in-flight requests. --- applications/butler/values.yaml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/applications/butler/values.yaml b/applications/butler/values.yaml index c59779c6a5..81264a34cf 100644 --- a/applications/butler/values.yaml +++ b/applications/butler/values.yaml @@ -41,10 +41,15 @@ podAnnotations: {} resources: limits: cpu: "1" - memory: "324Mi" + # Worst case peak usage for a single container would be something like all + # 40 threads in the thread pool running large queries costing ~35MB each. + memory: "1.5Gi" requests: cpu: "15m" - memory: "150Mi" + # Butler server uses around 200MB idle at startup, but under dynamic usage + # Python seems to want to hold onto another couple hundred megabytes of + # heap. + memory: "0.5Gi" # -- Node selection rules for the butler deployment pod nodeSelector: {} From 63a694db2e23b0f3be27dfac25480fae52c2ea5e Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Thu, 29 Aug 2024 16:19:57 -0700 Subject: [PATCH 008/193] BTS: Change Kafka broker message timestamp type. --- applications/sasquatch/values-base.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 257afa096b..0eb4939051 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -22,6 +22,7 @@ strimzi-kafka: config: auto.create.topics.enable: false log.cleaner.min.compaction.lag.ms: 259200000 + log.message.timestamp.type: LogAppendTime log.retention.hours: 72 log.retention.ms: 259200000 storage: From 5a13c5100f4391e6c26b4ea634fd03052799f570 Mon Sep 17 00:00:00 2001 From: Fritz Mueller Date: Fri, 23 Aug 2024 09:09:33 -0700 Subject: [PATCH 009/193] cm-service: remove redis --- applications/cm-service/Chart.yaml | 5 ----- applications/cm-service/README.md | 2 -- applications/cm-service/secrets.yaml | 14 +++----------- applications/cm-service/templates/deployment.yaml | 7 ------- .../cm-service/templates/vault-secrets.yaml | 13 ------------- .../cm-service/templates/worker-deployment.yaml | 8 -------- applications/cm-service/values.yaml | 9 --------- 7 files changed, 3 insertions(+), 55 deletions(-) diff --git a/applications/cm-service/Chart.yaml b/applications/cm-service/Chart.yaml index ad1b2ef5a1..ede07c7126 100644 --- a/applications/cm-service/Chart.yaml +++ b/applications/cm-service/Chart.yaml @@ -6,8 +6,3 @@ sources: - https://github.com/lsst-dm/cm-service type: application version: 1.0.0 - -dependencies: -- name: redis - version: 1.0.13 - repository: https://lsst-sqre.github.io/charts/ diff --git a/applications/cm-service/README.md b/applications/cm-service/README.md index 88b43969e7..9f720ea79d 100644 --- a/applications/cm-service/README.md +++ b/applications/cm-service/README.md @@ -30,8 +30,6 @@ Campaign Management for Rubin Data Release Production | image.repository | string | `"ghcr.io/lsst-dm/cm-service"` | Image to use for frontend containers | | image.tag | string | The appVersion of the chart | Tag of frontend image to use | | ingress.annotations | object | `{}` | Additional annotations for the frontend ingress rule | -| redis.config.secretKey | string | `"password"` | Key inside secret from which to get the Redis password (do not change) | -| redis.config.secretName | string | `"redis-secret"` | Name of secret containing Redis password | | worker.affinity | object | `{}` | Affinity rules for the worker pods | | worker.htcondor.config.contents | string | `nil` | If specified, contents of htcondor config file to be injected into worker containers | | worker.htcondor.config.mountPath | string | `nil` | If specified, location for htcondor config file to be injected into worker containers | diff --git a/applications/cm-service/secrets.yaml b/applications/cm-service/secrets.yaml index 414e59c34f..681ae4c8a3 100644 --- a/applications/cm-service/secrets.yaml +++ b/applications/cm-service/secrets.yaml @@ -1,16 +1,8 @@ -redis-password: - description: >- - Password used to authenticate cm-service to its internal Redis server, - deployed as part of the same Argo CD application. This secret can be - changed at any time, but both the Redis server and the cm-service - deployments will then have to be restarted to pick up the new value. - generate: - type: password postgres-password: description: >- Password used to authenticate cm-service to its internal cnpg Postgres - server, deployed as part of the same Argo CD application. This secret can - be changed at any time, but both the Redis server and the cm-service - deployments will then have to be restarted to pick up the new value. + server, deployed as part of the same Argo CD application. This secret can be + changed at any time, but the cm-service deployments will then have to be + restarted to pick up the new value. generate: type: password diff --git a/applications/cm-service/templates/deployment.yaml b/applications/cm-service/templates/deployment.yaml index b50e8277eb..bfe3b38d8a 100644 --- a/applications/cm-service/templates/deployment.yaml +++ b/applications/cm-service/templates/deployment.yaml @@ -28,11 +28,6 @@ spec: containers: - name: "cm-service" env: - - name: CM_ARQ_REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-secret - key: password - name: CM_DATABASE_PASSWORD valueFrom: secretKeyRef: @@ -48,8 +43,6 @@ spec: value: {{ .Values.config.logProfile | quote }} - name: CM_LOG_LEVEL value: {{ .Values.config.logLevel | quote }} - - name: CM_ARQ_REDIS_URL - value: "redis://cm-service-redis/1" image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: diff --git a/applications/cm-service/templates/vault-secrets.yaml b/applications/cm-service/templates/vault-secrets.yaml index 26f72b46e6..996a6617d8 100644 --- a/applications/cm-service/templates/vault-secrets.yaml +++ b/applications/cm-service/templates/vault-secrets.yaml @@ -1,18 +1,5 @@ apiVersion: ricoberger.de/v1alpha1 kind: VaultSecret -metadata: - name: redis-secret - labels: - {{- include "cm-service.labels" . | nindent 4 }} -spec: - path: "{{ .Values.global.vaultSecretsPath }}/cm-service" - templates: - password: >- - {% index .Secrets "redis-password" %} - type: Opaque ---- -apiVersion: ricoberger.de/v1alpha1 -kind: VaultSecret metadata: name: postgres-secret labels: diff --git a/applications/cm-service/templates/worker-deployment.yaml b/applications/cm-service/templates/worker-deployment.yaml index e0bce6f8c1..3218cd651d 100644 --- a/applications/cm-service/templates/worker-deployment.yaml +++ b/applications/cm-service/templates/worker-deployment.yaml @@ -27,14 +27,6 @@ spec: automountServiceAccountToken: false containers: - name: "cm-service-worker" - env: - - name: CM_ARQ_REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-secret - key: password - - name: CM_ARQ_REDIS_URL - value: "redis://cm-service-redis/1" image: "{{ .Values.worker.image.repository }}:{{ .Values.worker.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} resources: diff --git a/applications/cm-service/values.yaml b/applications/cm-service/values.yaml index 363b8a4e25..ae260853f4 100644 --- a/applications/cm-service/values.yaml +++ b/applications/cm-service/values.yaml @@ -129,15 +129,6 @@ worker: # -- If specified, location for htcondor schedd address file to be injected into worker pods contents: null -redis: - config: - # -- Name of secret containing Redis password - secretName: "redis-secret" - - # -- Key inside secret from which to get the Redis password (do not - # change) - secretKey: "password" - # The following will be set by parameters injected by Argo CD and should not # be set in the individual environment values files. global: From c46d2021dabd92e7979ce20f1b8873558edba712 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 30 Aug 2024 08:28:24 -0700 Subject: [PATCH 010/193] BTS: Increase M1M3 LOVE producer memory resources. --- applications/love/values-base.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/applications/love/values-base.yaml b/applications/love/values-base.yaml index 3742727392..fecc6d4326 100644 --- a/applications/love/values-base.yaml +++ b/applications/love/values-base.yaml @@ -296,6 +296,13 @@ love-producer: csc: MTDomeTrajectory:0 --log-level 10 - name: mtm1m3 csc: MTM1M3:0 --log-level 10 + resources: + requests: + cpu: 10m + memory: 200Mi + limits: + cpu: 100m + memory: 600Mi - name: mtm2 csc: MTM2:0 --log-level 10 - name: mtmount From 89b5250286f5fb3a82b7b3f7ba249dd9428615fa Mon Sep 17 00:00:00 2001 From: A I Date: Fri, 30 Aug 2024 17:15:31 +0100 Subject: [PATCH 011/193] enabled ssotap --- environments/values-roe.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/environments/values-roe.yaml b/environments/values-roe.yaml index 8759014a8d..444f3bd295 100644 --- a/environments/values-roe.yaml +++ b/environments/values-roe.yaml @@ -11,3 +11,4 @@ applications: postgres: true squareone: true tap: true + ssotap: true From 003ac4ef7e9c67610a16d7e78f47fb0f62ce640a Mon Sep 17 00:00:00 2001 From: Fritz Mueller Date: Fri, 30 Aug 2024 09:48:54 -0700 Subject: [PATCH 012/193] cm-service: upgrade to v0.1.1 --- applications/cm-service/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/cm-service/Chart.yaml b/applications/cm-service/Chart.yaml index ede07c7126..9ea2b7b9f1 100644 --- a/applications/cm-service/Chart.yaml +++ b/applications/cm-service/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 0.1.0 +appVersion: 0.1.1 description: Campaign Management for Rubin Data Release Production name: cm-service sources: From da60b21b885b4d074f0f82e2e241d28b5d8679f7 Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Thu, 22 Aug 2024 10:22:50 -0700 Subject: [PATCH 013/193] Add config for raw microservice to Prompt Processing. The microservice provides an optional way to identify previously downloaded raw images. It is only expected to be deployed for Rubin cameras. --- applications/prompt-proto-service-hsc-gpu/README.md | 1 + applications/prompt-proto-service-hsc-gpu/values.yaml | 4 ++++ applications/prompt-proto-service-hsc/README.md | 1 + applications/prompt-proto-service-hsc/values.yaml | 4 ++++ applications/prompt-proto-service-latiss/README.md | 1 + applications/prompt-proto-service-latiss/values.yaml | 4 ++++ applications/prompt-proto-service-lsstcam/README.md | 1 + applications/prompt-proto-service-lsstcam/values.yaml | 4 ++++ applications/prompt-proto-service-lsstcomcam/README.md | 1 + applications/prompt-proto-service-lsstcomcam/values.yaml | 4 ++++ applications/prompt-proto-service-lsstcomcamsim/README.md | 1 + applications/prompt-proto-service-lsstcomcamsim/values.yaml | 4 ++++ charts/prompt-proto-service/README.md | 1 + .../prompt-proto-service/templates/prompt-proto-service.yaml | 2 ++ charts/prompt-proto-service/values.yaml | 4 ++++ 15 files changed, 37 insertions(+) diff --git a/applications/prompt-proto-service-hsc-gpu/README.md b/applications/prompt-proto-service-hsc-gpu/README.md index e244bf3f12..b97ddaa42a 100644 --- a/applications/prompt-proto-service-hsc-gpu/README.md +++ b/applications/prompt-proto-service-hsc-gpu/README.md @@ -45,6 +45,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | +| prompt-proto-service.raw_microservice | string | `""` | The URI to a microservice that maps image metadata to a file location. If empty, Prompt Processing does not use a microservice. | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | int | `0` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | diff --git a/applications/prompt-proto-service-hsc-gpu/values.yaml b/applications/prompt-proto-service-hsc-gpu/values.yaml index 46c7db1e09..b8cc85249d 100644 --- a/applications/prompt-proto-service-hsc-gpu/values.yaml +++ b/applications/prompt-proto-service-hsc-gpu/values.yaml @@ -73,6 +73,10 @@ prompt-proto-service: # -- Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. disableBucketValidation: 0 + # -- The URI to a microservice that maps image metadata to a file location. + # If empty, Prompt Processing does not use a microservice. + raw_microservice: "" + imageNotifications: # -- Hostname and port of the Kafka provider # @default -- None, must be set diff --git a/applications/prompt-proto-service-hsc/README.md b/applications/prompt-proto-service-hsc/README.md index 3c10244ada..1d6c810a2c 100644 --- a/applications/prompt-proto-service-hsc/README.md +++ b/applications/prompt-proto-service-hsc/README.md @@ -45,6 +45,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | +| prompt-proto-service.raw_microservice | string | `""` | The URI to a microservice that maps image metadata to a file location. If empty, Prompt Processing does not use a microservice. | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | int | `0` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | diff --git a/applications/prompt-proto-service-hsc/values.yaml b/applications/prompt-proto-service-hsc/values.yaml index 931f3525b9..3f4b799c67 100644 --- a/applications/prompt-proto-service-hsc/values.yaml +++ b/applications/prompt-proto-service-hsc/values.yaml @@ -73,6 +73,10 @@ prompt-proto-service: # -- Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. disableBucketValidation: 0 + # -- The URI to a microservice that maps image metadata to a file location. + # If empty, Prompt Processing does not use a microservice. + raw_microservice: "" + imageNotifications: # -- Hostname and port of the Kafka provider # @default -- None, must be set diff --git a/applications/prompt-proto-service-latiss/README.md b/applications/prompt-proto-service-latiss/README.md index 605ee8a88e..17da7029ab 100644 --- a/applications/prompt-proto-service-latiss/README.md +++ b/applications/prompt-proto-service-latiss/README.md @@ -45,6 +45,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | +| prompt-proto-service.raw_microservice | string | `""` | The URI to a microservice that maps image metadata to a file location. If empty, Prompt Processing does not use a microservice. | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | string | `"0"` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | diff --git a/applications/prompt-proto-service-latiss/values.yaml b/applications/prompt-proto-service-latiss/values.yaml index 410e4e5225..9768a1c05d 100644 --- a/applications/prompt-proto-service-latiss/values.yaml +++ b/applications/prompt-proto-service-latiss/values.yaml @@ -73,6 +73,10 @@ prompt-proto-service: # -- Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. disableBucketValidation: '0' + # -- The URI to a microservice that maps image metadata to a file location. + # If empty, Prompt Processing does not use a microservice. + raw_microservice: "" + imageNotifications: # -- Hostname and port of the Kafka provider # @default -- None, must be set diff --git a/applications/prompt-proto-service-lsstcam/README.md b/applications/prompt-proto-service-lsstcam/README.md index abdafa6f39..20834485da 100644 --- a/applications/prompt-proto-service-lsstcam/README.md +++ b/applications/prompt-proto-service-lsstcam/README.md @@ -45,6 +45,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | +| prompt-proto-service.raw_microservice | string | `""` | The URI to a microservice that maps image metadata to a file location. If empty, Prompt Processing does not use a microservice. | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | int | `0` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | diff --git a/applications/prompt-proto-service-lsstcam/values.yaml b/applications/prompt-proto-service-lsstcam/values.yaml index 6304b41272..6221360a93 100644 --- a/applications/prompt-proto-service-lsstcam/values.yaml +++ b/applications/prompt-proto-service-lsstcam/values.yaml @@ -73,6 +73,10 @@ prompt-proto-service: # -- Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. disableBucketValidation: 0 + # -- The URI to a microservice that maps image metadata to a file location. + # If empty, Prompt Processing does not use a microservice. + raw_microservice: "" + imageNotifications: # -- Hostname and port of the Kafka provider # @default -- None, must be set diff --git a/applications/prompt-proto-service-lsstcomcam/README.md b/applications/prompt-proto-service-lsstcomcam/README.md index 13cf5a2017..ca625a5b66 100644 --- a/applications/prompt-proto-service-lsstcomcam/README.md +++ b/applications/prompt-proto-service-lsstcomcam/README.md @@ -45,6 +45,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | +| prompt-proto-service.raw_microservice | string | `""` | The URI to a microservice that maps image metadata to a file location. If empty, Prompt Processing does not use a microservice. | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | int | `0` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | diff --git a/applications/prompt-proto-service-lsstcomcam/values.yaml b/applications/prompt-proto-service-lsstcomcam/values.yaml index c4f253abe0..67fc0978a9 100644 --- a/applications/prompt-proto-service-lsstcomcam/values.yaml +++ b/applications/prompt-proto-service-lsstcomcam/values.yaml @@ -73,6 +73,10 @@ prompt-proto-service: # -- Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. disableBucketValidation: 0 + # -- The URI to a microservice that maps image metadata to a file location. + # If empty, Prompt Processing does not use a microservice. + raw_microservice: "" + imageNotifications: # -- Hostname and port of the Kafka provider # @default -- None, must be set diff --git a/applications/prompt-proto-service-lsstcomcamsim/README.md b/applications/prompt-proto-service-lsstcomcamsim/README.md index ad995209a9..55d6b814c6 100644 --- a/applications/prompt-proto-service-lsstcomcamsim/README.md +++ b/applications/prompt-proto-service-lsstcomcamsim/README.md @@ -45,6 +45,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | +| prompt-proto-service.raw_microservice | string | `""` | The URI to a microservice that maps image metadata to a file location. If empty, Prompt Processing does not use a microservice. | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | int | `0` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | diff --git a/applications/prompt-proto-service-lsstcomcamsim/values.yaml b/applications/prompt-proto-service-lsstcomcamsim/values.yaml index 1cab015e99..47815b63cf 100644 --- a/applications/prompt-proto-service-lsstcomcamsim/values.yaml +++ b/applications/prompt-proto-service-lsstcomcamsim/values.yaml @@ -73,6 +73,10 @@ prompt-proto-service: # -- Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. disableBucketValidation: 0 + # -- The URI to a microservice that maps image metadata to a file location. + # If empty, Prompt Processing does not use a microservice. + raw_microservice: "" + imageNotifications: # -- Hostname and port of the Kafka provider # @default -- None, must be set diff --git a/charts/prompt-proto-service/README.md b/charts/prompt-proto-service/README.md index b046a4e24b..5f3f2efadb 100644 --- a/charts/prompt-proto-service/README.md +++ b/charts/prompt-proto-service/README.md @@ -51,6 +51,7 @@ Event-driven processing of camera images | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | | | podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | +| raw_microservice | string | `""` | The URI to a microservice that maps image metadata to a file location. If empty, Prompt Processing does not use a microservice. | | registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | | s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | s3.disableBucketValidation | int | `0` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | diff --git a/charts/prompt-proto-service/templates/prompt-proto-service.yaml b/charts/prompt-proto-service/templates/prompt-proto-service.yaml index f6572190d2..690c5b34aa 100644 --- a/charts/prompt-proto-service/templates/prompt-proto-service.yaml +++ b/charts/prompt-proto-service/templates/prompt-proto-service.yaml @@ -70,6 +70,8 @@ spec: value: {{ .Values.apdb.config }} - name: KAFKA_CLUSTER value: {{ .Values.imageNotifications.kafkaClusterAddress }} + - name: RAW_MICROSERVICE + value: {{ .Values.raw_microservice }} - name: SASQUATCH_URL value: {{ .Values.sasquatch.endpointUrl }} {{- if and .Values.sasquatch.endpointUrl .Values.sasquatch.auth_env }} diff --git a/charts/prompt-proto-service/values.yaml b/charts/prompt-proto-service/values.yaml index 8c4454ee27..7751ab89ea 100644 --- a/charts/prompt-proto-service/values.yaml +++ b/charts/prompt-proto-service/values.yaml @@ -75,6 +75,10 @@ s3: # -- Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. disableBucketValidation: 0 +# -- The URI to a microservice that maps image metadata to a file location. +# If empty, Prompt Processing does not use a microservice. +raw_microservice: "" + imageNotifications: # -- Hostname and port of the Kafka provider # @default -- None, must be set From 65d27fa0f45b4c5e052ff5ccd7c740dd167e96ca Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Thu, 22 Aug 2024 10:28:18 -0700 Subject: [PATCH 014/193] Use raw microservice with LATISS and ComCamSim Prompt Processing. These are the only two instances deployed at present. --- .../values-usdfprod-prompt-processing.yaml | 2 ++ .../values-usdfprod-prompt-processing.yaml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index dafffeab21..b244df2ebc 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -49,6 +49,8 @@ prompt-proto-service: imageBucket: rubin-summit endpointUrl: https://s3dfrgw.slac.stanford.edu + raw_microservice: http://172.24.5.144:8080/presence + imageNotifications: kafkaClusterAddress: prompt-processing-2-kafka-bootstrap.kafka:9092 topic: rubin-prompt-processing-prod diff --git a/applications/prompt-proto-service-lsstcomcamsim/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-lsstcomcamsim/values-usdfprod-prompt-processing.yaml index 9f0fdf75cb..ce3809fb7c 100644 --- a/applications/prompt-proto-service-lsstcomcamsim/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-lsstcomcamsim/values-usdfprod-prompt-processing.yaml @@ -33,6 +33,8 @@ prompt-proto-service: imageBucket: rubin-summit endpointUrl: https://sdfembs3.sdf.slac.stanford.edu + raw_microservice: http://172.24.5.158:8080/presence + imageNotifications: kafkaClusterAddress: prompt-processing-2-kafka-bootstrap.kafka:9092 topic: rubin-summit-notification From e6af09ab3e0945414872041f3449681f1f6918ba Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 30 Aug 2024 15:31:59 -0700 Subject: [PATCH 015/193] Increase default flush_interval - Set flush_interval=10s by default to fix warning ["outputs.influxdb"] did not complete within its flush interval --- .../charts/telegraf-kafka-consumer/templates/configmap.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml index 5e55c1a59e..31d160897f 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml @@ -15,7 +15,7 @@ data: metric_batch_size = {{ default 1000 $value.metric_batch_size }} metric_buffer_limit = {{ default 10000 $value.metric_buffer_limit }} collection_jitter = {{ default "0s" $value.collection_jitter | quote }} - flush_interval = {{ default "1s" $value.flush_interval | quote }} + flush_interval = {{ default "10s" $value.flush_interval | quote }} flush_jitter = {{ default "0s" $value.flush_jitter | quote }} debug = {{ default false $value.debug }} omit_hostname = true From 52e220d0b14851ccbae76823925007de24516595 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 30 Aug 2024 15:33:26 -0700 Subject: [PATCH 016/193] Improve app labels - Use k8s recommended labels, allow to select connector pods using app.kubernetes.io/instance label --- .../telegraf-kafka-consumer/templates/configmap.yaml | 4 +++- .../telegraf-kafka-consumer/templates/deployment.yaml | 8 +++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml index 31d160897f..8e5e0aa29d 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml @@ -6,7 +6,9 @@ kind: ConfigMap metadata: name: sasquatch-telegraf-{{ $key }} labels: - app: sasquatch-telegraf-kakfa-consumer + app.kubernetes.io/name: sasquatch-telegraf + app.kubernetes.io/instance: sasquatch-telegraf-{{ $key }} + app.kubernetes.io/part-of: sasquatch data: telegraf.conf: |+ [agent] diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/deployment.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/deployment.yaml index addd04a6e6..5408f4f93f 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/deployment.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/deployment.yaml @@ -6,16 +6,18 @@ kind: Deployment metadata: name: sasquatch-telegraf-{{ $key }} labels: - app: sasquatch-telegraf-kafka-consumer + app.kubernetes.io/name: sasquatch-telegraf + app.kubernetes.io/instance: sasquatch-telegraf-{{ $key }} + app.kubernetes.io/part-of: sasquatch spec: replicas: {{ default 1 $value.replicaCount }} selector: matchLabels: - app: sasquatch-telegraf-kafka-consumer + app.kubernetes.io/instance: sasquatch-telegraf-{{ $key }} template: metadata: labels: - app: sasquatch-telegraf-kafka-consumer + app.kubernetes.io/instance: sasquatch-telegraf-{{ $key }} {{- if $.Values.podAnnotations }} annotations: {{- toYaml $.Values.podAnnotations | nindent 8 }} From 3d63c92d890deef5fbfa8a6f4106b692e203fe42 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 30 Aug 2024 11:47:05 -0700 Subject: [PATCH 017/193] Remove kafka-connect-manager on BTS - These connectors will be replaced by the Telegraf based connectors --- applications/sasquatch/values-base.yaml | 50 ------------------------- 1 file changed, 50 deletions(-) diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 0eb4939051..a60f95113f 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -131,56 +131,6 @@ influxdb: enabled: true hostname: base-lsp.lsst.codes -kafka-connect-manager: - influxdbSink: - # Based on the kafka producers configuration for the BTS - # https://github.com/lsst-ts/argocd-csc/blob/main/apps/kafka-producers/values-base-teststand.yaml - connectors: - auxtel: - enabled: true - topicsRegex: "lsst.sal.ATAOS|lsst.sal.ATDome|lsst.sal.ATDomeTrajectory|lsst.sal.ATHexapod|lsst.sal.ATPneumatics|lsst.sal.ATPtg|lsst.sal.ATMCS" - maintel: - enabled: true - topicsRegex: "lsst.sal.MTAOS|lsst.sal.MTDome|lsst.sal.MTDomeTrajectory|lsst.sal.MTPtg" - mtmount: - enabled: true - topicsRegex: "lsst.sal.MTMount" - tasksMax: "8" - eas: - enabled: true - topicsRegex: "lsst.sal.DIMM|lsst.sal.DSM|lsst.sal.EPM|lsst.sal.ESS|lsst.sal.HVAC|lsst.sal.WeatherForecast" - latiss: - enabled: true - topicsRegex: "lsst.sal.ATCamera|lsst.sal.ATHeaderService|lsst.sal.ATOODS|lsst.sal.ATSpectrograph" - m1m3: - enabled: true - topicsRegex: "lsst.sal.MTM1M3" - tasksMax: "8" - m2: - enabled: true - topicsRegex: "lsst.sal.MTHexapod|lsst.sal.MTM2|lsst.sal.MTRotator" - obssys: - enabled: true - topicsRegex: "lsst.sal.Scheduler|lsst.sal.Script|lsst.sal.ScriptQueue|lsst.sal.Watcher" - ocps: - enabled: true - topicsRegex: "lsst.sal.OCPS" - test: - enabled: true - topicsRegex: "lsst.sal.Test" - mtaircompressor: - enabled: true - topicsRegex: "lsst.sal.MTAirCompressor" - lasertracker: - enabled: true - topicsRegex: "lsst.sal.LaserTracker" - genericcamera: - enabled: true - topicsRegex: "lsst.sal.GCHeaderService|lsst.sal.GenericCamera" - lsstcam: - enabled: true - topicsRegex: "lsst.sal.MTCamera|lsst.sal.MTHeaderService|lsst.sal.MTOODS" - telegraf-kafka-consumer: enabled: false kafkaConsumers: From f2454340e0e10c7f9f1e59db2f7c68a6eece6f6d Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 30 Aug 2024 12:00:59 -0700 Subject: [PATCH 018/193] Review telegraf-based connector configuration at base - Enable telegraf-based connectors - Most of the parameters are now default, so we don't need to repeat them in the configuration - Run 8 connectors "tasks" for M1M3 and MTMount CSCs --- applications/sasquatch/values-base.yaml | 102 ++++-------------------- 1 file changed, 17 insertions(+), 85 deletions(-) diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index a60f95113f..f818146427 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -132,146 +132,78 @@ influxdb: hostname: base-lsp.lsst.codes telegraf-kafka-consumer: - enabled: false + enabled: true kafkaConsumers: auxtel: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.ATAOS", "lsst.sal.ATDome", "lsst.sal.ATDomeTrajectory", "lsst.sal.ATHexapod", "lsst.sal.ATPneumatics", "lsst.sal.ATPtg", "lsst.sal.ATMCS" ] maintel: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.MTAOS", "lsst.sal.MTDome", "lsst.sal.MTDomeTrajectory", "lsst.sal.MTPtg" ] mtmount: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" + replicaCount: 8 topicRegexps: | [ "lsst.sal.MTMount" ] eas: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.DIMM", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.ESS", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] latiss: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.ATCamera", "lsst.sal.ATHeaderService", "lsst.sal.ATOODS", "lsst.sal.ATSpectrograph" ] m1m3: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" + replicaCount: 8 topicRegexps: | [ "lsst.sal.MTM1M3" ] m2: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.MTHexapod", "lsst.sal.MTM2", "lsst.sal.MTRotator" ] obssys: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.Scheduler", "lsst.sal.Script", "lsst.sal.ScriptQueue", "lsst.sal.Watcher" ] ocps: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.OCPS" ] test: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.Test" ] mtaircompressor: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.MTAirCompressor" ] lasertracker: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.LaserTracker" ] genericcamera: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.GCHeaderService", "lsst.sal.GenericCamera" ] lsstcam: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.MTCamera", "lsst.sal.MTHeaderService", "lsst.sal.MTOODS" ] From 602825f7f885dfd019f514d2d2b17169db0d2be7 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 2 Sep 2024 09:44:28 +0000 Subject: [PATCH 019/193] Update Helm release argo-workflows to v0.42.1 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index aa76df123b..285c51625d 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.42.0 + version: 0.42.1 repository: https://argoproj.github.io/argo-helm From 232f6a82fbfae619d0af6396bc4ba15a5a35eeb3 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 2 Sep 2024 18:10:59 +0000 Subject: [PATCH 020/193] Update Helm release argo-cd to v7.5.2 --- applications/argocd/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index 2956e31a97..81ca4bd156 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 7.4.5 + version: 7.5.2 repository: https://argoproj.github.io/argo-helm From ec653e7527c3e80abfb58403c1b1c3372f26f7bc Mon Sep 17 00:00:00 2001 From: "David H. Irving" Date: Fri, 31 May 2024 16:04:32 -0700 Subject: [PATCH 021/193] Add ir2 repo to idfdev Butler server We want to use the ir2 repo to test the "hybrid model" where the Butler's database is at Google but the data is at USDF. Added a configuration option to Butler to allow it to use two different sets of S3 credentials simultaneously. Added the ir2 repo to data-dev. --- applications/butler/README.md | 1 + applications/butler/secrets.yaml | 6 ++++++ applications/butler/templates/deployment.yaml | 7 +++++++ applications/butler/values-idfdev.yaml | 1 + applications/butler/values.yaml | 5 +++++ 5 files changed, 20 insertions(+) diff --git a/applications/butler/README.md b/applications/butler/README.md index ac80574a22..73d7c17812 100644 --- a/applications/butler/README.md +++ b/applications/butler/README.md @@ -15,6 +15,7 @@ Server for Butler data abstraction service | autoscaling.maxReplicas | int | `100` | Maximum number of butler deployment pods | | autoscaling.minReplicas | int | `1` | Minimum number of butler deployment pods | | autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of butler deployment pods | +| config.additionalS3ProfileName | string | No second S3 profile is available. | Profile name identifying a second S3 endpoint and set of credentials to use for accessing files in the datastore. | | config.dp02PostgresUri | string | No configuration file for DP02 will be generated. | Postgres connection string pointing to the registry database hosting Data Preview 0.2 data. | | config.pathPrefix | string | `"/api/butler"` | The prefix of the path portion of the URL where the Butler service will be exposed. For example, if the service should be exposed at `https://data.lsst.cloud/api/butler`, this should be set to `/api/butler` | | config.pguser | string | Use values specified in per-repository Butler config files. | Postgres username used to connect to the Butler DB | diff --git a/applications/butler/secrets.yaml b/applications/butler/secrets.yaml index 1b2d88511e..96fc6efe1f 100644 --- a/applications/butler/secrets.yaml +++ b/applications/butler/secrets.yaml @@ -18,3 +18,9 @@ copy: application: nublado key: "postgres-credentials.txt" +"additional-s3-profile": + description: >- + Credentials and endpoint for a second S3 profile to use, in addition to the + default endpoint. For docs on format see + https://github.com/lsst/resources/blob/a34598e125919799d3db4bd8a2363087c3de434e/python/lsst/resources/s3utils.py#L201 + if: additionalS3ProfileName diff --git a/applications/butler/templates/deployment.yaml b/applications/butler/templates/deployment.yaml index 9ba64a4257..c7e3f06b4c 100644 --- a/applications/butler/templates/deployment.yaml +++ b/applications/butler/templates/deployment.yaml @@ -65,6 +65,13 @@ spec: - name: PGUSER value: {{ .Values.config.pguser | quote }} {{ end }} + {{ if .Values.config.additionalS3ProfileName }} + - name: LSST_RESOURCES_S3_PROFILE_{{ .Values.config.additionalS3ProfileName }} + valueFrom: + secretKeyRef: + name: {{ include "butler.fullname" . }} + key: additional-s3-profile + {{ end }} volumeMounts: - name: "butler-secrets" mountPath: "/opt/lsst/butler/secrets" diff --git a/applications/butler/values-idfdev.yaml b/applications/butler/values-idfdev.yaml index 08c73c983f..90d8a9db26 100644 --- a/applications/butler/values-idfdev.yaml +++ b/applications/butler/values-idfdev.yaml @@ -6,3 +6,4 @@ config: s3EndpointUrl: "https://storage.googleapis.com" repositories: dp02: "file:///opt/lsst/butler/config/dp02.yaml" + ir2: "s3://butler-us-central1-panda-dev/ir2/butler-ir2.yaml" diff --git a/applications/butler/values.yaml b/applications/butler/values.yaml index 81264a34cf..18086ea1c0 100644 --- a/applications/butler/values.yaml +++ b/applications/butler/values.yaml @@ -92,6 +92,11 @@ config: # -- URL for the S3 service where files for datasets are stored by Butler. s3EndpointUrl: "" + # -- Profile name identifying a second S3 endpoint and set of credentials + # to use for accessing files in the datastore. + # @default -- No second S3 profile is available. + additionalS3ProfileName: "" + # -- The prefix of the path portion of the URL where the Butler service will # be exposed. For example, if the service should be exposed at # `https://data.lsst.cloud/api/butler`, this should be set to `/api/butler` From c7aaad3b94656d01191ab5cec5e6add59e1459f0 Mon Sep 17 00:00:00 2001 From: Adam Thornton Date: Fri, 31 May 2024 16:24:32 -0700 Subject: [PATCH 022/193] Add conditional secret for Butler idfdev --- applications/butler/secrets.yaml | 2 +- applications/butler/values-idfdev.yaml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/applications/butler/secrets.yaml b/applications/butler/secrets.yaml index 96fc6efe1f..23ee59d217 100644 --- a/applications/butler/secrets.yaml +++ b/applications/butler/secrets.yaml @@ -23,4 +23,4 @@ Credentials and endpoint for a second S3 profile to use, in addition to the default endpoint. For docs on format see https://github.com/lsst/resources/blob/a34598e125919799d3db4bd8a2363087c3de434e/python/lsst/resources/s3utils.py#L201 - if: additionalS3ProfileName + if: config.additionalS3ProfileName diff --git a/applications/butler/values-idfdev.yaml b/applications/butler/values-idfdev.yaml index 90d8a9db26..e70e31b433 100644 --- a/applications/butler/values-idfdev.yaml +++ b/applications/butler/values-idfdev.yaml @@ -4,6 +4,7 @@ image: config: dp02PostgresUri: postgresql://postgres@sqlproxy-butler-int.sqlproxy-cross-project:5432/dp02 s3EndpointUrl: "https://storage.googleapis.com" + additionalS3ProfileName: "ir2" repositories: dp02: "file:///opt/lsst/butler/config/dp02.yaml" ir2: "s3://butler-us-central1-panda-dev/ir2/butler-ir2.yaml" From af6920938282cd5c57e684f1daf75477a21fa688 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 3 Sep 2024 18:38:15 -0700 Subject: [PATCH 023/193] Update dependencies --- .pre-commit-config.yaml | 4 +- requirements/dev.txt | 438 ++++++++++++++++++++-------------------- requirements/main.txt | 94 +++++---- requirements/tox.txt | 44 ++-- 4 files changed, 294 insertions(+), 286 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 772024b059..3c7869cff9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,7 +14,7 @@ repos: - -c=.yamllint.yml - repo: https://github.com/python-jsonschema/check-jsonschema - rev: 0.29.1 + rev: 0.29.2 hooks: - id: check-jsonschema files: ^applications/.*/secrets(-[^./-]+)?\.yaml @@ -46,7 +46,7 @@ repos: - --template-files=../helm-docs.md.gotmpl - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.7 + rev: v0.6.3 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] diff --git a/requirements/dev.txt b/requirements/dev.txt index d1d038b509..8658c4de3d 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -10,6 +10,10 @@ annotated-types==0.7.0 \ # via # -c requirements/main.txt # pydantic +appnope==0.1.4 \ + --hash=sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee \ + --hash=sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c + # via ipykernel asttokens==2.4.1 \ --hash=sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24 \ --hash=sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0 @@ -32,9 +36,9 @@ beautifulsoup4==4.12.3 \ --hash=sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051 \ --hash=sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed # via pydata-sphinx-theme -certifi==2024.7.4 \ - --hash=sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b \ - --hash=sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90 +certifi==2024.8.30 \ + --hash=sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8 \ + --hash=sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9 # via # -c requirements/main.txt # requests @@ -270,9 +274,9 @@ docutils==0.21.2 \ # sphinx-jinja # sphinx-prompt # sphinxcontrib-bibtex -executing==2.0.1 \ - --hash=sha256:35afe2ce3affba8ee97f2d69927fa823b08b472b7b994e36a52a964b93d16147 \ - --hash=sha256:eac49ca94516ccc753f9fb5ce82603156e590b27525a8bc32cce8ae302eb61bc +executing==2.1.0 \ + --hash=sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf \ + --hash=sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab # via stack-data fastjsonschema==2.20.0 \ --hash=sha256:3d48fc5300ee96f5d116f10fe6f28d938e6008f59a6a025c2649475b87f76a23 \ @@ -353,12 +357,10 @@ greenlet==3.0.3 \ --hash=sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf \ --hash=sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da \ --hash=sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33 - # via - # -r requirements/dev.in - # sqlalchemy -idna==3.7 \ - --hash=sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc \ - --hash=sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0 + # via -r requirements/dev.in +idna==3.8 \ + --hash=sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac \ + --hash=sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603 # via # -c requirements/main.txt # requests @@ -367,9 +369,9 @@ imagesize==1.4.1 \ --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \ --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a # via sphinx -importlib-metadata==8.2.0 \ - --hash=sha256:11901fa0c2f97919b288679932bb64febaeacf289d18ac84dd68cb2e74213369 \ - --hash=sha256:72e8d4399996132204f9a16dcc751af254a48f8d1b20b9ff0f98d4a8f901e73d +importlib-metadata==8.4.0 \ + --hash=sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1 \ + --hash=sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5 # via # jupyter-cache # myst-nb @@ -381,9 +383,9 @@ ipykernel==6.29.5 \ --hash=sha256:afdb66ba5aa354b09b91379bac28ae4afebbb30e8b39510c9690afb7a10421b5 \ --hash=sha256:f093a22c4a40f8828f8e330a9c297cb93dcab13bd9678ded6de8e5cf81c56215 # via myst-nb -ipython==8.26.0 \ - --hash=sha256:1cec0fbba8404af13facebe83d04436a7434c7400e59f47acf467c64abd0956c \ - --hash=sha256:e6b347c27bdf9c32ee9d31ae85defc525755a1869f14057e900675b9e8d6e6ff +ipython==8.27.0 \ + --hash=sha256:0b99a2dc9f15fd68692e898e5568725c6d49c527d36a9fb5960ffbdeaa82ff7e \ + --hash=sha256:f68b3cb8bde357a5d7adc9598d57e22a45dfbea19eb6b98286fa3b288c9cd55c # via # ipykernel # myst-nb @@ -522,34 +524,34 @@ mdurl==0.1.2 \ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba # via markdown-it-py -mypy==1.11.1 \ - --hash=sha256:0624bdb940255d2dd24e829d99a13cfeb72e4e9031f9492148f410ed30bcab54 \ - --hash=sha256:0bc71d1fb27a428139dd78621953effe0d208aed9857cb08d002280b0422003a \ - --hash=sha256:0bd53faf56de9643336aeea1c925012837432b5faf1701ccca7fde70166ccf72 \ - --hash=sha256:11965c2f571ded6239977b14deebd3f4c3abd9a92398712d6da3a772974fad69 \ - --hash=sha256:1a81cf05975fd61aec5ae16501a091cfb9f605dc3e3c878c0da32f250b74760b \ - --hash=sha256:2684d3f693073ab89d76da8e3921883019ea8a3ec20fa5d8ecca6a2db4c54bbe \ - --hash=sha256:2c63350af88f43a66d3dfeeeb8d77af34a4f07d760b9eb3a8697f0386c7590b4 \ - --hash=sha256:45df906e8b6804ef4b666af29a87ad9f5921aad091c79cc38e12198e220beabd \ - --hash=sha256:4c956b49c5d865394d62941b109728c5c596a415e9c5b2be663dd26a1ff07bc0 \ - --hash=sha256:64f4a90e3ea07f590c5bcf9029035cf0efeae5ba8be511a8caada1a4893f5525 \ - --hash=sha256:749fd3213916f1751fff995fccf20c6195cae941dc968f3aaadf9bb4e430e5a2 \ - --hash=sha256:79c07eb282cb457473add5052b63925e5cc97dfab9812ee65a7c7ab5e3cb551c \ - --hash=sha256:7b6343d338390bb946d449677726edf60102a1c96079b4f002dedff375953fc5 \ - --hash=sha256:886c9dbecc87b9516eff294541bf7f3655722bf22bb898ee06985cd7269898de \ - --hash=sha256:a2b43895a0f8154df6519706d9bca8280cda52d3d9d1514b2d9c3e26792a0b74 \ - --hash=sha256:a32fc80b63de4b5b3e65f4be82b4cfa362a46702672aa6a0f443b4689af7008c \ - --hash=sha256:a707ec1527ffcdd1c784d0924bf5cb15cd7f22683b919668a04d2b9c34549d2e \ - --hash=sha256:a831671bad47186603872a3abc19634f3011d7f83b083762c942442d51c58d58 \ - --hash=sha256:b639dce63a0b19085213ec5fdd8cffd1d81988f47a2dec7100e93564f3e8fb3b \ - --hash=sha256:b868d3bcff720dd7217c383474008ddabaf048fad8d78ed948bb4b624870a417 \ - --hash=sha256:c1952f5ea8a5a959b05ed5f16452fddadbaae48b5d39235ab4c3fc444d5fd411 \ - --hash=sha256:d44be7551689d9d47b7abc27c71257adfdb53f03880841a5db15ddb22dc63edb \ - --hash=sha256:e1e30dc3bfa4e157e53c1d17a0dad20f89dc433393e7702b813c10e200843b03 \ - --hash=sha256:e4fe9f4e5e521b458d8feb52547f4bade7ef8c93238dfb5bbc790d9ff2d770ca \ - --hash=sha256:f39918a50f74dc5969807dcfaecafa804fa7f90c9d60506835036cc1bc891dc8 \ - --hash=sha256:f404a0b069709f18bbdb702eb3dcfe51910602995de00bd39cea3050b5772d08 \ - --hash=sha256:fca4a60e1dd9fd0193ae0067eaeeb962f2d79e0d9f0f66223a0682f26ffcc809 +mypy==1.11.2 \ + --hash=sha256:06d26c277962f3fb50e13044674aa10553981ae514288cb7d0a738f495550b36 \ + --hash=sha256:2ff93107f01968ed834f4256bc1fc4475e2fecf6c661260066a985b52741ddce \ + --hash=sha256:36383a4fcbad95f2657642a07ba22ff797de26277158f1cc7bd234821468b1b6 \ + --hash=sha256:37c7fa6121c1cdfcaac97ce3d3b5588e847aa79b580c1e922bb5d5d2902df19b \ + --hash=sha256:3a66169b92452f72117e2da3a576087025449018afc2d8e9bfe5ffab865709ca \ + --hash=sha256:3f14cd3d386ac4d05c5a39a51b84387403dadbd936e17cb35882134d4f8f0d24 \ + --hash=sha256:41ea707d036a5307ac674ea172875f40c9d55c5394f888b168033177fce47383 \ + --hash=sha256:478db5f5036817fe45adb7332d927daa62417159d49783041338921dcf646fc7 \ + --hash=sha256:4a8a53bc3ffbd161b5b2a4fff2f0f1e23a33b0168f1c0778ec70e1a3d66deb86 \ + --hash=sha256:539c570477a96a4e6fb718b8d5c3e0c0eba1f485df13f86d2970c91f0673148d \ + --hash=sha256:57555a7715c0a34421013144a33d280e73c08df70f3a18a552938587ce9274f4 \ + --hash=sha256:6e658bd2d20565ea86da7d91331b0eed6d2eee22dc031579e6297f3e12c758c8 \ + --hash=sha256:6e7184632d89d677973a14d00ae4d03214c8bc301ceefcdaf5c474866814c987 \ + --hash=sha256:75746e06d5fa1e91bfd5432448d00d34593b52e7e91a187d981d08d1f33d4385 \ + --hash=sha256:7f9993ad3e0ffdc95c2a14b66dee63729f021968bff8ad911867579c65d13a79 \ + --hash=sha256:801780c56d1cdb896eacd5619a83e427ce436d86a3bdf9112527f24a66618fef \ + --hash=sha256:801ca29f43d5acce85f8e999b1e431fb479cb02d0e11deb7d2abb56bdaf24fd6 \ + --hash=sha256:969ea3ef09617aff826885a22ece0ddef69d95852cdad2f60c8bb06bf1f71f70 \ + --hash=sha256:a976775ab2256aadc6add633d44f100a2517d2388906ec4f13231fafbb0eccca \ + --hash=sha256:af8d155170fcf87a2afb55b35dc1a0ac21df4431e7d96717621962e4b9192e70 \ + --hash=sha256:b499bc07dbdcd3de92b0a8b29fdf592c111276f6a12fe29c30f6c417dd546d12 \ + --hash=sha256:cd953f221ac1379050a8a646585a29574488974f79d8082cedef62744f0a0104 \ + --hash=sha256:d42a6dd818ffce7be66cce644f1dff482f1d97c53ca70908dff0b9ddc120b77a \ + --hash=sha256:e8960dbbbf36906c5c0b7f4fbf2f0c7ffb20f4898e6a879fcf56a41a08b0d318 \ + --hash=sha256:edb91dded4df17eae4537668b23f0ff6baf3707683734b6a818d5b9d0c0c31a1 \ + --hash=sha256:ee23de8530d99b6db0573c4ef4bd8f39a2a6f9b60655bf7a1357e585a3486f2b \ + --hash=sha256:f7821776e5c4286b6a13138cc935e2e9b6fde05e081bdebf5cdb2bb97c9df81d # via -r requirements/dev.in mypy-extensions==1.0.0 \ --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \ @@ -855,116 +857,116 @@ pyyaml==6.0.2 \ # myst-parser # pybtex # sphinxcontrib-redoc -pyzmq==26.1.0 \ - --hash=sha256:038ae4ffb63e3991f386e7fda85a9baab7d6617fe85b74a8f9cab190d73adb2b \ - --hash=sha256:05bacc4f94af468cc82808ae3293390278d5f3375bb20fef21e2034bb9a505b6 \ - --hash=sha256:0614aed6f87d550b5cecb03d795f4ddbb1544b78d02a4bd5eecf644ec98a39f6 \ - --hash=sha256:08f74904cb066e1178c1ec706dfdb5c6c680cd7a8ed9efebeac923d84c1f13b1 \ - --hash=sha256:093a1a3cae2496233f14b57f4b485da01b4ff764582c854c0f42c6dd2be37f3d \ - --hash=sha256:0a1f6ea5b1d6cdbb8cfa0536f0d470f12b4b41ad83625012e575f0e3ecfe97f0 \ - --hash=sha256:0e6cea102ffa16b737d11932c426f1dc14b5938cf7bc12e17269559c458ac334 \ - --hash=sha256:263cf1e36862310bf5becfbc488e18d5d698941858860c5a8c079d1511b3b18e \ - --hash=sha256:28a8b2abb76042f5fd7bd720f7fea48c0fd3e82e9de0a1bf2c0de3812ce44a42 \ - --hash=sha256:2ae7c57e22ad881af78075e0cea10a4c778e67234adc65c404391b417a4dda83 \ - --hash=sha256:2cd0f4d314f4a2518e8970b6f299ae18cff7c44d4a1fc06fc713f791c3a9e3ea \ - --hash=sha256:2fa76ebcebe555cce90f16246edc3ad83ab65bb7b3d4ce408cf6bc67740c4f88 \ - --hash=sha256:314d11564c00b77f6224d12eb3ddebe926c301e86b648a1835c5b28176c83eab \ - --hash=sha256:347e84fc88cc4cb646597f6d3a7ea0998f887ee8dc31c08587e9c3fd7b5ccef3 \ - --hash=sha256:359c533bedc62c56415a1f5fcfd8279bc93453afdb0803307375ecf81c962402 \ - --hash=sha256:393daac1bcf81b2a23e696b7b638eedc965e9e3d2112961a072b6cd8179ad2eb \ - --hash=sha256:3b3b8e36fd4c32c0825b4461372949ecd1585d326802b1321f8b6dc1d7e9318c \ - --hash=sha256:3c397b1b450f749a7e974d74c06d69bd22dd362142f370ef2bd32a684d6b480c \ - --hash=sha256:3d3146b1c3dcc8a1539e7cc094700b2be1e605a76f7c8f0979b6d3bde5ad4072 \ - --hash=sha256:3ee647d84b83509b7271457bb428cc347037f437ead4b0b6e43b5eba35fec0aa \ - --hash=sha256:416ac51cabd54f587995c2b05421324700b22e98d3d0aa2cfaec985524d16f1d \ - --hash=sha256:451e16ae8bea3d95649317b463c9f95cd9022641ec884e3d63fc67841ae86dfe \ - --hash=sha256:45cb1a70eb00405ce3893041099655265fabcd9c4e1e50c330026e82257892c1 \ - --hash=sha256:46d6800b45015f96b9d92ece229d92f2aef137d82906577d55fadeb9cf5fcb71 \ - --hash=sha256:471312a7375571857a089342beccc1a63584315188560c7c0da7e0a23afd8a5c \ - --hash=sha256:471880c4c14e5a056a96cd224f5e71211997d40b4bf5e9fdded55dafab1f98f2 \ - --hash=sha256:5384c527a9a004445c5074f1e20db83086c8ff1682a626676229aafd9cf9f7d1 \ - --hash=sha256:57bb2acba798dc3740e913ffadd56b1fcef96f111e66f09e2a8db3050f1f12c8 \ - --hash=sha256:58c33dc0e185dd97a9ac0288b3188d1be12b756eda67490e6ed6a75cf9491d79 \ - --hash=sha256:59d0acd2976e1064f1b398a00e2c3e77ed0a157529779e23087d4c2fb8aaa416 \ - --hash=sha256:5a6ed52f0b9bf8dcc64cc82cce0607a3dfed1dbb7e8c6f282adfccc7be9781de \ - --hash=sha256:5bc2431167adc50ba42ea3e5e5f5cd70d93e18ab7b2f95e724dd8e1bd2c38120 \ - --hash=sha256:5cca7b4adb86d7470e0fc96037771981d740f0b4cb99776d5cb59cd0e6684a73 \ - --hash=sha256:61dfa5ee9d7df297c859ac82b1226d8fefaf9c5113dc25c2c00ecad6feeeb04f \ - --hash=sha256:63c1d3a65acb2f9c92dce03c4e1758cc552f1ae5c78d79a44e3bb88d2fa71f3a \ - --hash=sha256:65c6e03cc0222eaf6aad57ff4ecc0a070451e23232bb48db4322cc45602cede0 \ - --hash=sha256:67976d12ebfd61a3bc7d77b71a9589b4d61d0422282596cf58c62c3866916544 \ - --hash=sha256:68a0a1d83d33d8367ddddb3e6bb4afbb0f92bd1dac2c72cd5e5ddc86bdafd3eb \ - --hash=sha256:6c5aeea71f018ebd3b9115c7cb13863dd850e98ca6b9258509de1246461a7e7f \ - --hash=sha256:754c99a9840839375ee251b38ac5964c0f369306eddb56804a073b6efdc0cd88 \ - --hash=sha256:75a95c2358fcfdef3374cb8baf57f1064d73246d55e41683aaffb6cfe6862917 \ - --hash=sha256:7688653574392d2eaeef75ddcd0b2de5b232d8730af29af56c5adf1df9ef8d6f \ - --hash=sha256:77ce6a332c7e362cb59b63f5edf730e83590d0ab4e59c2aa5bd79419a42e3449 \ - --hash=sha256:7907419d150b19962138ecec81a17d4892ea440c184949dc29b358bc730caf69 \ - --hash=sha256:79e45a4096ec8388cdeb04a9fa5e9371583bcb826964d55b8b66cbffe7b33c86 \ - --hash=sha256:7bcbfbab4e1895d58ab7da1b5ce9a327764f0366911ba5b95406c9104bceacb0 \ - --hash=sha256:80b0c9942430d731c786545da6be96d824a41a51742e3e374fedd9018ea43106 \ - --hash=sha256:8b88641384e84a258b740801cd4dbc45c75f148ee674bec3149999adda4a8598 \ - --hash=sha256:8d4dac7d97f15c653a5fedcafa82626bd6cee1450ccdaf84ffed7ea14f2b07a4 \ - --hash=sha256:8d906d43e1592be4b25a587b7d96527cb67277542a5611e8ea9e996182fae410 \ - --hash=sha256:8efb782f5a6c450589dbab4cb0f66f3a9026286333fe8f3a084399149af52f29 \ - --hash=sha256:906e532c814e1d579138177a00ae835cd6becbf104d45ed9093a3aaf658f6a6a \ - --hash=sha256:90d4feb2e83dfe9ace6374a847e98ee9d1246ebadcc0cb765482e272c34e5820 \ - --hash=sha256:911c43a4117915203c4cc8755e0f888e16c4676a82f61caee2f21b0c00e5b894 \ - --hash=sha256:91d1a20bdaf3b25f3173ff44e54b1cfbc05f94c9e8133314eb2962a89e05d6e3 \ - --hash=sha256:94c4262626424683feea0f3c34951d39d49d354722db2745c42aa6bb50ecd93b \ - --hash=sha256:96d7c1d35ee4a495df56c50c83df7af1c9688cce2e9e0edffdbf50889c167595 \ - --hash=sha256:9869fa984c8670c8ab899a719eb7b516860a29bc26300a84d24d8c1b71eae3ec \ - --hash=sha256:98c03bd7f3339ff47de7ea9ac94a2b34580a8d4df69b50128bb6669e1191a895 \ - --hash=sha256:995301f6740a421afc863a713fe62c0aaf564708d4aa057dfdf0f0f56525294b \ - --hash=sha256:998444debc8816b5d8d15f966e42751032d0f4c55300c48cc337f2b3e4f17d03 \ - --hash=sha256:9a6847c92d9851b59b9f33f968c68e9e441f9a0f8fc972c5580c5cd7cbc6ee24 \ - --hash=sha256:9bdfcb74b469b592972ed881bad57d22e2c0acc89f5e8c146782d0d90fb9f4bf \ - --hash=sha256:9f136a6e964830230912f75b5a116a21fe8e34128dcfd82285aa0ef07cb2c7bd \ - --hash=sha256:a0f0ab9df66eb34d58205913f4540e2ad17a175b05d81b0b7197bc57d000e829 \ - --hash=sha256:a4b7a989c8f5a72ab1b2bbfa58105578753ae77b71ba33e7383a31ff75a504c4 \ - --hash=sha256:a7b8aab50e5a288c9724d260feae25eda69582be84e97c012c80e1a5e7e03fb2 \ - --hash=sha256:ad875277844cfaeca7fe299ddf8c8d8bfe271c3dc1caf14d454faa5cdbf2fa7a \ - --hash=sha256:add52c78a12196bc0fda2de087ba6c876ea677cbda2e3eba63546b26e8bf177b \ - --hash=sha256:b10163e586cc609f5f85c9b233195554d77b1e9a0801388907441aaeb22841c5 \ - --hash=sha256:b24079a14c9596846bf7516fe75d1e2188d4a528364494859106a33d8b48be38 \ - --hash=sha256:b281b5ff5fcc9dcbfe941ac5c7fcd4b6c065adad12d850f95c9d6f23c2652384 \ - --hash=sha256:b3bb34bebaa1b78e562931a1687ff663d298013f78f972a534f36c523311a84d \ - --hash=sha256:b45e6445ac95ecb7d728604bae6538f40ccf4449b132b5428c09918523abc96d \ - --hash=sha256:ba0a31d00e8616149a5ab440d058ec2da621e05d744914774c4dde6837e1f545 \ - --hash=sha256:baba2fd199b098c5544ef2536b2499d2e2155392973ad32687024bd8572a7d1c \ - --hash=sha256:bd13f0231f4788db619347b971ca5f319c5b7ebee151afc7c14632068c6261d3 \ - --hash=sha256:bd3f6329340cef1c7ba9611bd038f2d523cea79f09f9c8f6b0553caba59ec562 \ - --hash=sha256:bdeb2c61611293f64ac1073f4bf6723b67d291905308a7de9bb2ca87464e3273 \ - --hash=sha256:bef24d3e4ae2c985034439f449e3f9e06bf579974ce0e53d8a507a1577d5b2ab \ - --hash=sha256:c0665d85535192098420428c779361b8823d3d7ec4848c6af3abb93bc5c915bf \ - --hash=sha256:c5668dac86a869349828db5fc928ee3f58d450dce2c85607067d581f745e4fb1 \ - --hash=sha256:c9b9305004d7e4e6a824f4f19b6d8f32b3578aad6f19fc1122aaf320cbe3dc83 \ - --hash=sha256:ccb42ca0a4a46232d716779421bbebbcad23c08d37c980f02cc3a6bd115ad277 \ - --hash=sha256:ce6f2b66799971cbae5d6547acefa7231458289e0ad481d0be0740535da38d8b \ - --hash=sha256:d36b8fffe8b248a1b961c86fbdfa0129dfce878731d169ede7fa2631447331be \ - --hash=sha256:d3dd5523ed258ad58fed7e364c92a9360d1af8a9371e0822bd0146bdf017ef4c \ - --hash=sha256:d416f2088ac8f12daacffbc2e8918ef4d6be8568e9d7155c83b7cebed49d2322 \ - --hash=sha256:d4fafc2eb5d83f4647331267808c7e0c5722c25a729a614dc2b90479cafa78bd \ - --hash=sha256:d5c8b17f6e8f29138678834cf8518049e740385eb2dbf736e8f07fc6587ec682 \ - --hash=sha256:d9270fbf038bf34ffca4855bcda6e082e2c7f906b9eb8d9a8ce82691166060f7 \ - --hash=sha256:dcc37d9d708784726fafc9c5e1232de655a009dbf97946f117aefa38d5985a0f \ - --hash=sha256:ddbb2b386128d8eca92bd9ca74e80f73fe263bcca7aa419f5b4cbc1661e19741 \ - --hash=sha256:e1e5d0a25aea8b691a00d6b54b28ac514c8cc0d8646d05f7ca6cb64b97358250 \ - --hash=sha256:e5c88b2f13bcf55fee78ea83567b9fe079ba1a4bef8b35c376043440040f7edb \ - --hash=sha256:e7eca8b89e56fb8c6c26dd3e09bd41b24789022acf1cf13358e96f1cafd8cae3 \ - --hash=sha256:e8746ce968be22a8a1801bf4a23e565f9687088580c3ed07af5846580dd97f76 \ - --hash=sha256:ec7248673ffc7104b54e4957cee38b2f3075a13442348c8d651777bf41aa45ee \ - --hash=sha256:ecb6c88d7946166d783a635efc89f9a1ff11c33d680a20df9657b6902a1d133b \ - --hash=sha256:ef3b048822dca6d231d8a8ba21069844ae38f5d83889b9b690bf17d2acc7d099 \ - --hash=sha256:f133d05aaf623519f45e16ab77526e1e70d4e1308e084c2fb4cedb1a0c764bbb \ - --hash=sha256:f3292d384537b9918010769b82ab3e79fca8b23d74f56fc69a679106a3e2c2cf \ - --hash=sha256:f774841bb0e8588505002962c02da420bcfb4c5056e87a139c6e45e745c0e2e2 \ - --hash=sha256:f9499c70c19ff0fbe1007043acb5ad15c1dec7d8e84ab429bca8c87138e8f85c \ - --hash=sha256:f99de52b8fbdb2a8f5301ae5fc0f9e6b3ba30d1d5fc0421956967edcc6914242 \ - --hash=sha256:fa25a620eed2a419acc2cf10135b995f8f0ce78ad00534d729aa761e4adcef8a \ - --hash=sha256:fbf558551cf415586e91160d69ca6416f3fce0b86175b64e4293644a7416b81b \ - --hash=sha256:fc82269d24860cfa859b676d18850cbb8e312dcd7eada09e7d5b007e2f3d9eb1 \ - --hash=sha256:ff832cce719edd11266ca32bc74a626b814fff236824aa1aeaad399b69fe6eae +pyzmq==26.2.0 \ + --hash=sha256:007137c9ac9ad5ea21e6ad97d3489af654381324d5d3ba614c323f60dab8fae6 \ + --hash=sha256:034da5fc55d9f8da09015d368f519478a52675e558c989bfcb5cf6d4e16a7d2a \ + --hash=sha256:05590cdbc6b902101d0e65d6a4780af14dc22914cc6ab995d99b85af45362cc9 \ + --hash=sha256:070672c258581c8e4f640b5159297580a9974b026043bd4ab0470be9ed324f1f \ + --hash=sha256:0aca98bc423eb7d153214b2df397c6421ba6373d3397b26c057af3c904452e37 \ + --hash=sha256:0bed0e799e6120b9c32756203fb9dfe8ca2fb8467fed830c34c877e25638c3fc \ + --hash=sha256:0d987a3ae5a71c6226b203cfd298720e0086c7fe7c74f35fa8edddfbd6597eed \ + --hash=sha256:0eaa83fc4c1e271c24eaf8fb083cbccef8fde77ec8cd45f3c35a9a123e6da097 \ + --hash=sha256:160c7e0a5eb178011e72892f99f918c04a131f36056d10d9c1afb223fc952c2d \ + --hash=sha256:17bf5a931c7f6618023cdacc7081f3f266aecb68ca692adac015c383a134ca52 \ + --hash=sha256:17c412bad2eb9468e876f556eb4ee910e62d721d2c7a53c7fa31e643d35352e6 \ + --hash=sha256:18c8dc3b7468d8b4bdf60ce9d7141897da103c7a4690157b32b60acb45e333e6 \ + --hash=sha256:1a534f43bc738181aa7cbbaf48e3eca62c76453a40a746ab95d4b27b1111a7d2 \ + --hash=sha256:1c17211bc037c7d88e85ed8b7d8f7e52db6dc8eca5590d162717c654550f7282 \ + --hash=sha256:1f3496d76b89d9429a656293744ceca4d2ac2a10ae59b84c1da9b5165f429ad3 \ + --hash=sha256:1fcc03fa4997c447dce58264e93b5aa2d57714fbe0f06c07b7785ae131512732 \ + --hash=sha256:226af7dcb51fdb0109f0016449b357e182ea0ceb6b47dfb5999d569e5db161d5 \ + --hash=sha256:23f4aad749d13698f3f7b64aad34f5fc02d6f20f05999eebc96b89b01262fb18 \ + --hash=sha256:25bf2374a2a8433633c65ccb9553350d5e17e60c8eb4de4d92cc6bd60f01d306 \ + --hash=sha256:28ad5233e9c3b52d76196c696e362508959741e1a005fb8fa03b51aea156088f \ + --hash=sha256:28c812d9757fe8acecc910c9ac9dafd2ce968c00f9e619db09e9f8f54c3a68a3 \ + --hash=sha256:29c6a4635eef69d68a00321e12a7d2559fe2dfccfa8efae3ffb8e91cd0b36a8b \ + --hash=sha256:29c7947c594e105cb9e6c466bace8532dc1ca02d498684128b339799f5248277 \ + --hash=sha256:2a50625acdc7801bc6f74698c5c583a491c61d73c6b7ea4dee3901bb99adb27a \ + --hash=sha256:2ae90ff9dad33a1cfe947d2c40cb9cb5e600d759ac4f0fd22616ce6540f72797 \ + --hash=sha256:2c4a71d5d6e7b28a47a394c0471b7e77a0661e2d651e7ae91e0cab0a587859ca \ + --hash=sha256:2ea4ad4e6a12e454de05f2949d4beddb52460f3de7c8b9d5c46fbb7d7222e02c \ + --hash=sha256:2eb7735ee73ca1b0d71e0e67c3739c689067f055c764f73aac4cc8ecf958ee3f \ + --hash=sha256:31507f7b47cc1ead1f6e86927f8ebb196a0bab043f6345ce070f412a59bf87b5 \ + --hash=sha256:35cffef589bcdc587d06f9149f8d5e9e8859920a071df5a2671de2213bef592a \ + --hash=sha256:367b4f689786fca726ef7a6c5ba606958b145b9340a5e4808132cc65759abd44 \ + --hash=sha256:39887ac397ff35b7b775db7201095fc6310a35fdbae85bac4523f7eb3b840e20 \ + --hash=sha256:3a495b30fc91db2db25120df5847d9833af237546fd59170701acd816ccc01c4 \ + --hash=sha256:3b55a4229ce5da9497dd0452b914556ae58e96a4381bb6f59f1305dfd7e53fc8 \ + --hash=sha256:402b190912935d3db15b03e8f7485812db350d271b284ded2b80d2e5704be780 \ + --hash=sha256:43a47408ac52647dfabbc66a25b05b6a61700b5165807e3fbd40063fcaf46386 \ + --hash=sha256:4661c88db4a9e0f958c8abc2b97472e23061f0bc737f6f6179d7a27024e1faa5 \ + --hash=sha256:46a446c212e58456b23af260f3d9fb785054f3e3653dbf7279d8f2b5546b21c2 \ + --hash=sha256:470d4a4f6d48fb34e92d768b4e8a5cc3780db0d69107abf1cd7ff734b9766eb0 \ + --hash=sha256:49d34ab71db5a9c292a7644ce74190b1dd5a3475612eefb1f8be1d6961441971 \ + --hash=sha256:4d29ab8592b6ad12ebbf92ac2ed2bedcfd1cec192d8e559e2e099f648570e19b \ + --hash=sha256:4d80b1dd99c1942f74ed608ddb38b181b87476c6a966a88a950c7dee118fdf50 \ + --hash=sha256:4da04c48873a6abdd71811c5e163bd656ee1b957971db7f35140a2d573f6949c \ + --hash=sha256:4f78c88905461a9203eac9faac157a2a0dbba84a0fd09fd29315db27be40af9f \ + --hash=sha256:4ff9dc6bc1664bb9eec25cd17506ef6672d506115095411e237d571e92a58231 \ + --hash=sha256:5506f06d7dc6ecf1efacb4a013b1f05071bb24b76350832c96449f4a2d95091c \ + --hash=sha256:55cf66647e49d4621a7e20c8d13511ef1fe1efbbccf670811864452487007e08 \ + --hash=sha256:5a509df7d0a83a4b178d0f937ef14286659225ef4e8812e05580776c70e155d5 \ + --hash=sha256:5c2b3bfd4b9689919db068ac6c9911f3fcb231c39f7dd30e3138be94896d18e6 \ + --hash=sha256:6835dd60355593de10350394242b5757fbbd88b25287314316f266e24c61d073 \ + --hash=sha256:689c5d781014956a4a6de61d74ba97b23547e431e9e7d64f27d4922ba96e9d6e \ + --hash=sha256:6a96179a24b14fa6428cbfc08641c779a53f8fcec43644030328f44034c7f1f4 \ + --hash=sha256:6ace4f71f1900a548f48407fc9be59c6ba9d9aaf658c2eea6cf2779e72f9f317 \ + --hash=sha256:6b274e0762c33c7471f1a7471d1a2085b1a35eba5cdc48d2ae319f28b6fc4de3 \ + --hash=sha256:706e794564bec25819d21a41c31d4df2d48e1cc4b061e8d345d7fb4dd3e94072 \ + --hash=sha256:70fc7fcf0410d16ebdda9b26cbd8bf8d803d220a7f3522e060a69a9c87bf7bad \ + --hash=sha256:7133d0a1677aec369d67dd78520d3fa96dd7f3dcec99d66c1762870e5ea1a50a \ + --hash=sha256:7445be39143a8aa4faec43b076e06944b8f9d0701b669df4af200531b21e40bb \ + --hash=sha256:76589c020680778f06b7e0b193f4b6dd66d470234a16e1df90329f5e14a171cd \ + --hash=sha256:76589f2cd6b77b5bdea4fca5992dc1c23389d68b18ccc26a53680ba2dc80ff2f \ + --hash=sha256:77eb0968da535cba0470a5165468b2cac7772cfb569977cff92e240f57e31bef \ + --hash=sha256:794a4562dcb374f7dbbfb3f51d28fb40123b5a2abadee7b4091f93054909add5 \ + --hash=sha256:7ad1bc8d1b7a18497dda9600b12dc193c577beb391beae5cd2349184db40f187 \ + --hash=sha256:7f98f6dfa8b8ccaf39163ce872bddacca38f6a67289116c8937a02e30bbe9711 \ + --hash=sha256:8423c1877d72c041f2c263b1ec6e34360448decfb323fa8b94e85883043ef988 \ + --hash=sha256:8685fa9c25ff00f550c1fec650430c4b71e4e48e8d852f7ddcf2e48308038640 \ + --hash=sha256:878206a45202247781472a2d99df12a176fef806ca175799e1c6ad263510d57c \ + --hash=sha256:89289a5ee32ef6c439086184529ae060c741334b8970a6855ec0b6ad3ff28764 \ + --hash=sha256:8ab5cad923cc95c87bffee098a27856c859bd5d0af31bd346035aa816b081fe1 \ + --hash=sha256:8b435f2753621cd36e7c1762156815e21c985c72b19135dac43a7f4f31d28dd1 \ + --hash=sha256:8be4700cd8bb02cc454f630dcdf7cfa99de96788b80c51b60fe2fe1dac480289 \ + --hash=sha256:8c997098cc65e3208eca09303630e84d42718620e83b733d0fd69543a9cab9cb \ + --hash=sha256:8ea039387c10202ce304af74def5021e9adc6297067f3441d348d2b633e8166a \ + --hash=sha256:8f7e66c7113c684c2b3f1c83cdd3376103ee0ce4c49ff80a648643e57fb22218 \ + --hash=sha256:90412f2db8c02a3864cbfc67db0e3dcdbda336acf1c469526d3e869394fe001c \ + --hash=sha256:92a78853d7280bffb93df0a4a6a2498cba10ee793cc8076ef797ef2f74d107cf \ + --hash=sha256:989d842dc06dc59feea09e58c74ca3e1678c812a4a8a2a419046d711031f69c7 \ + --hash=sha256:9cb3a6460cdea8fe8194a76de8895707e61ded10ad0be97188cc8463ffa7e3a8 \ + --hash=sha256:9dd8cd1aeb00775f527ec60022004d030ddc51d783d056e3e23e74e623e33726 \ + --hash=sha256:9ed69074a610fad1c2fda66180e7b2edd4d31c53f2d1872bc2d1211563904cd9 \ + --hash=sha256:9edda2df81daa129b25a39b86cb57dfdfe16f7ec15b42b19bfac503360d27a93 \ + --hash=sha256:a2224fa4a4c2ee872886ed00a571f5e967c85e078e8e8c2530a2fb01b3309b88 \ + --hash=sha256:a4f96f0d88accc3dbe4a9025f785ba830f968e21e3e2c6321ccdfc9aef755115 \ + --hash=sha256:aedd5dd8692635813368e558a05266b995d3d020b23e49581ddd5bbe197a8ab6 \ + --hash=sha256:aee22939bb6075e7afededabad1a56a905da0b3c4e3e0c45e75810ebe3a52672 \ + --hash=sha256:b1d464cb8d72bfc1a3adc53305a63a8e0cac6bc8c5a07e8ca190ab8d3faa43c2 \ + --hash=sha256:b8f86dd868d41bea9a5f873ee13bf5551c94cf6bc51baebc6f85075971fe6eea \ + --hash=sha256:bc6bee759a6bddea5db78d7dcd609397449cb2d2d6587f48f3ca613b19410cfc \ + --hash=sha256:bea2acdd8ea4275e1278350ced63da0b166421928276c7c8e3f9729d7402a57b \ + --hash=sha256:bfa832bfa540e5b5c27dcf5de5d82ebc431b82c453a43d141afb1e5d2de025fa \ + --hash=sha256:c0e6091b157d48cbe37bd67233318dbb53e1e6327d6fc3bb284afd585d141003 \ + --hash=sha256:c3789bd5768ab5618ebf09cef6ec2b35fed88709b104351748a63045f0ff9797 \ + --hash=sha256:c530e1eecd036ecc83c3407f77bb86feb79916d4a33d11394b8234f3bd35b940 \ + --hash=sha256:c811cfcd6a9bf680236c40c6f617187515269ab2912f3d7e8c0174898e2519db \ + --hash=sha256:c92d73464b886931308ccc45b2744e5968cbaade0b1d6aeb40d8ab537765f5bc \ + --hash=sha256:cccba051221b916a4f5e538997c45d7d136a5646442b1231b916d0164067ea27 \ + --hash=sha256:cdeabcff45d1c219636ee2e54d852262e5c2e085d6cb476d938aee8d921356b3 \ + --hash=sha256:ced65e5a985398827cc9276b93ef6dfabe0273c23de8c7931339d7e141c2818e \ + --hash=sha256:d049df610ac811dcffdc147153b414147428567fbbc8be43bb8885f04db39d98 \ + --hash=sha256:dacd995031a01d16eec825bf30802fceb2c3791ef24bcce48fa98ce40918c27b \ + --hash=sha256:ddf33d97d2f52d89f6e6e7ae66ee35a4d9ca6f36eda89c24591b0c40205a3629 \ + --hash=sha256:ded0fc7d90fe93ae0b18059930086c51e640cdd3baebdc783a695c77f123dcd9 \ + --hash=sha256:e3e0210287329272539eea617830a6a28161fbbd8a3271bf4150ae3e58c5d0e6 \ + --hash=sha256:e6fa2e3e683f34aea77de8112f6483803c96a44fd726d7358b9888ae5bb394ec \ + --hash=sha256:ea0eb6af8a17fa272f7b98d7bebfab7836a0d62738e16ba380f440fceca2d951 \ + --hash=sha256:ea7f69de383cb47522c9c208aec6dd17697db7875a4674c4af3f8cfdac0bdeae \ + --hash=sha256:eac5174677da084abf378739dbf4ad245661635f1600edd1221f150b165343f4 \ + --hash=sha256:fc4f7a173a5609631bb0c42c23d12c49df3966f89f496a51d3eb0ec81f4519d6 \ + --hash=sha256:fdb5b3e311d4d4b0eb8b3e8b4d1b0a512713ad7e6a68791d0923d1aec433d919 # via # ipykernel # jupyter-client @@ -1089,9 +1091,9 @@ rpds-py==0.20.0 \ # via # jsonschema # referencing -setuptools==72.2.0 \ - --hash=sha256:80aacbf633704e9c8bfa1d99fa5dd4dc59573efcf9e4042c13d3bcef91ac2ef9 \ - --hash=sha256:f11dd94b7bae3a156a95ec151f24e4637fb4fa19c878e4d191bfb8b2d82728c4 +setuptools==74.1.1 \ + --hash=sha256:2353af060c06388be1cecbf5953dcdb1f38362f87a2356c480b6b4d5fcfc8847 \ + --hash=sha256:fc91b5f89e392ef5b77fe143b17e32f65d3024744fba66dc3afe07201684d766 # via documenteer six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ @@ -1138,9 +1140,9 @@ sphinx==8.0.2 \ # sphinxcontrib-youtube # sphinxext-opengraph # sphinxext-rediraffe -sphinx-autodoc-typehints==2.2.3 \ - --hash=sha256:b7058e8c5831e5598afca1a78fda0695d3291388d954464a6e480c36198680c0 \ - --hash=sha256:fde3d888949bd0a91207cf1e54afda58121dbb4bf1f183d0cc78a0826654c974 +sphinx-autodoc-typehints==2.3.0 \ + --hash=sha256:3098e2c6d0ba99eacd013eb06861acc9b51c6e595be86ab05c08ee5506ac0c67 \ + --hash=sha256:535c78ed2d6a1bad393ba9f3dfa2602cf424e2631ee207263e07874c38fde084 # via documenteer sphinx-automodapi==0.17.0 \ --hash=sha256:4d029cb79eef29413e94ab01bb0177ebd2d5ba86e9789b73575afe9c06ae1501 \ @@ -1223,56 +1225,56 @@ sphinxext-rediraffe==0.2.7 \ --hash=sha256:651dcbfae5ffda9ffd534dfb8025f36120e5efb6ea1a33f5420023862b9f725d \ --hash=sha256:9e430a52d4403847f4ffb3a8dd6dfc34a9fe43525305131f52ed899743a5fd8c # via documenteer -sqlalchemy==2.0.32 \ - --hash=sha256:01438ebcdc566d58c93af0171c74ec28efe6a29184b773e378a385e6215389da \ - --hash=sha256:0c1c9b673d21477cec17ab10bc4decb1322843ba35b481585facd88203754fc5 \ - --hash=sha256:0c9045ecc2e4db59bfc97b20516dfdf8e41d910ac6fb667ebd3a79ea54084619 \ - --hash=sha256:0d322cc9c9b2154ba7e82f7bf25ecc7c36fbe2d82e2933b3642fc095a52cfc78 \ - --hash=sha256:0ef18a84e5116340e38eca3e7f9eeaaef62738891422e7c2a0b80feab165905f \ - --hash=sha256:1467940318e4a860afd546ef61fefb98a14d935cd6817ed07a228c7f7c62f389 \ - --hash=sha256:14e09e083a5796d513918a66f3d6aedbc131e39e80875afe81d98a03312889e6 \ - --hash=sha256:167e7497035c303ae50651b351c28dc22a40bb98fbdb8468cdc971821b1ae533 \ - --hash=sha256:19d98f4f58b13900d8dec4ed09dd09ef292208ee44cc9c2fe01c1f0a2fe440e9 \ - --hash=sha256:21b053be28a8a414f2ddd401f1be8361e41032d2ef5884b2f31d31cb723e559f \ - --hash=sha256:251f0d1108aab8ea7b9aadbd07fb47fb8e3a5838dde34aa95a3349876b5a1f1d \ - --hash=sha256:295ff8689544f7ee7e819529633d058bd458c1fd7f7e3eebd0f9268ebc56c2a0 \ - --hash=sha256:2b6be53e4fde0065524f1a0a7929b10e9280987b320716c1509478b712a7688c \ - --hash=sha256:306fe44e754a91cd9d600a6b070c1f2fadbb4a1a257b8781ccf33c7067fd3e4d \ - --hash=sha256:31983018b74908ebc6c996a16ad3690301a23befb643093fcfe85efd292e384d \ - --hash=sha256:328429aecaba2aee3d71e11f2477c14eec5990fb6d0e884107935f7fb6001632 \ - --hash=sha256:3bd1cae7519283ff525e64645ebd7a3e0283f3c038f461ecc1c7b040a0c932a1 \ - --hash=sha256:3cd33c61513cb1b7371fd40cf221256456d26a56284e7d19d1f0b9f1eb7dd7e8 \ - --hash=sha256:3eb6a97a1d39976f360b10ff208c73afb6a4de86dd2a6212ddf65c4a6a2347d5 \ - --hash=sha256:4363ed245a6231f2e2957cccdda3c776265a75851f4753c60f3004b90e69bfeb \ - --hash=sha256:4488120becf9b71b3ac718f4138269a6be99a42fe023ec457896ba4f80749525 \ - --hash=sha256:49496b68cd190a147118af585173ee624114dfb2e0297558c460ad7495f9dfe2 \ - --hash=sha256:4979dc80fbbc9d2ef569e71e0896990bc94df2b9fdbd878290bd129b65ab579c \ - --hash=sha256:52fec964fba2ef46476312a03ec8c425956b05c20220a1a03703537824b5e8e1 \ - --hash=sha256:5954463675cb15db8d4b521f3566a017c8789222b8316b1e6934c811018ee08b \ - --hash=sha256:62e23d0ac103bcf1c5555b6c88c114089587bc64d048fef5bbdb58dfd26f96da \ - --hash=sha256:6bab3db192a0c35e3c9d1560eb8332463e29e5507dbd822e29a0a3c48c0a8d92 \ - --hash=sha256:6c742be912f57586ac43af38b3848f7688863a403dfb220193a882ea60e1ec3a \ - --hash=sha256:723a40ee2cc7ea653645bd4cf024326dea2076673fc9d3d33f20f6c81db83e1d \ - --hash=sha256:78c03d0f8a5ab4f3034c0e8482cfcc415a3ec6193491cfa1c643ed707d476f16 \ - --hash=sha256:7d6ba0497c1d066dd004e0f02a92426ca2df20fac08728d03f67f6960271feec \ - --hash=sha256:7dd8583df2f98dea28b5cd53a1beac963f4f9d087888d75f22fcc93a07cf8d84 \ - --hash=sha256:85a01b5599e790e76ac3fe3aa2f26e1feba56270023d6afd5550ed63c68552b3 \ - --hash=sha256:8a37e4d265033c897892279e8adf505c8b6b4075f2b40d77afb31f7185cd6ecd \ - --hash=sha256:8bd63d051f4f313b102a2af1cbc8b80f061bf78f3d5bd0843ff70b5859e27924 \ - --hash=sha256:916a798f62f410c0b80b63683c8061f5ebe237b0f4ad778739304253353bc1cb \ - --hash=sha256:9365a3da32dabd3e69e06b972b1ffb0c89668994c7e8e75ce21d3e5e69ddef28 \ - --hash=sha256:99db65e6f3ab42e06c318f15c98f59a436f1c78179e6a6f40f529c8cc7100b22 \ - --hash=sha256:aaf04784797dcdf4c0aa952c8d234fa01974c4729db55c45732520ce12dd95b4 \ - --hash=sha256:acd9b73c5c15f0ec5ce18128b1fe9157ddd0044abc373e6ecd5ba376a7e5d961 \ - --hash=sha256:ada0102afff4890f651ed91120c1120065663506b760da4e7823913ebd3258be \ - --hash=sha256:b178e875a7a25b5938b53b006598ee7645172fccafe1c291a706e93f48499ff5 \ - --hash=sha256:b27dfb676ac02529fb6e343b3a482303f16e6bc3a4d868b73935b8792edb52d0 \ - --hash=sha256:b8afd5b26570bf41c35c0121801479958b4446751a3971fb9a480c1afd85558e \ - --hash=sha256:bf2360a5e0f7bd75fa80431bf8ebcfb920c9f885e7956c7efde89031695cafb8 \ - --hash=sha256:c1b88cc8b02b6a5f0efb0345a03672d4c897dc7d92585176f88c67346f565ea8 \ - --hash=sha256:c41a2b9ca80ee555decc605bd3c4520cc6fef9abde8fd66b1cf65126a6922d65 \ - --hash=sha256:c750987fc876813f27b60d619b987b057eb4896b81117f73bb8d9918c14f1cad \ - --hash=sha256:e567a8793a692451f706b363ccf3c45e056b67d90ead58c3bc9471af5d212202 +sqlalchemy==2.0.33 \ + --hash=sha256:06504d9625e3ef114b39803ebca6f379133acad58a87c33117ddc5df66079915 \ + --hash=sha256:06b30bbc43c6dd8b7cdc509cd2e58f4f1dce867565642e1d1a65e32459c89bd0 \ + --hash=sha256:0ea64443a86c3b5a0fd7c93363ad2f9465cb3af61f9920b7c75d1a7bebbeef8a \ + --hash=sha256:1109cc6dc5c9d1223c42186391e6a5509e6d4ab2c30fa629573c10184f742f2e \ + --hash=sha256:17d0c69f66392ad2db1609373a74d1f834b2e632f3f52d446747b8ec220aea53 \ + --hash=sha256:1d81e3aeab456fe24c3f0dcfd4f952a3a5ee45e9c14fc66d34c1d7a60cf7b698 \ + --hash=sha256:2415824ec658891ac38d13a2f36b4ceb2033f034dee1c226f83917589a65f072 \ + --hash=sha256:28c0800c851955f5bd11c0b904638c1343002650d0c071c6fbf0d157cc78627d \ + --hash=sha256:2b1e98507ec2aa200af980d592e936e9dac1c1ec50acc94330ae4b13c55d6fea \ + --hash=sha256:30a3f55be76364b64c83788728faaba782ab282a24909e1994404c2146d39982 \ + --hash=sha256:31e56020832be602201fbf8189f379569cf5c3604cdc4ce79f10dbbfcbf8a0eb \ + --hash=sha256:32a4f38d2efca066ec793451ef6852cb0d9086dc3d5479d88a5a25529d1d1861 \ + --hash=sha256:3ad94634338d8c576b1d47a96c798be186650aa5282072053ce2d12c6f309f82 \ + --hash=sha256:3c64d58e83a68e228b1ae6ebac8721241e9d8cc5e0c0dd11ed5d89155477b243 \ + --hash=sha256:454e9b4355f0051063daebc4060140251c19f33fc5d02151c347431860fd104b \ + --hash=sha256:459099ab8dd43a5edbb99f58ba4730baec457df9c06ebc71434c6b4b78cc8cf9 \ + --hash=sha256:49541a43828e273325c520fbacf786615bd974dad63ff60b8ea1e1216e914d1a \ + --hash=sha256:4f1c44c8d66101e6f627f330d8b5b3de5ad25eedb6df3ce39a2e6f92debbcf15 \ + --hash=sha256:523ae689c023cbf0fe1613101254824515193f85f806ba04611dee83302660b5 \ + --hash=sha256:570ec43e8c3c020abac4f0720baa5fe5187334e3f1e8e1777183c041962b61cc \ + --hash=sha256:60c54b677d4f0a0b2df3b79e89e84d601fb931c720176641742efd66b50601f9 \ + --hash=sha256:61e9a2d68a5a8ca6a84cbc79aa7f2e430ae854d3351d6e9ceb3edf6798797b63 \ + --hash=sha256:63b7d9890f7958dabd95cf98a3f48740fbe2bb0493523aef590e82164fa68194 \ + --hash=sha256:67eb8e0ffbebd3d82ec5079ca5f807a661c574b482785483717857c2acab833a \ + --hash=sha256:684aee5fd811091b2f48006fb3fe6c7f2de4a716ef8d294a2aab762099753133 \ + --hash=sha256:751eaafa907a66dd9a328a9d15c3dcfdcba3ef8dd8f7f4a9771cdacdec45d9bf \ + --hash=sha256:77eaf8fdf305266b806a91ae4633edbf86ad37e13bd92ac85e305e7f654c19a5 \ + --hash=sha256:7fd0a28bc24a75326f13735a58272247f65c9e8ee16205eacb2431d6ee94f44a \ + --hash=sha256:816c927dd51e4951d6e79870c945340057a5d8e63543419dee0d247bd67a88f8 \ + --hash=sha256:81759e77a4985abdbac068762a0eaf0f11860fe041ad6da170aae7615ea72531 \ + --hash=sha256:82c72da5be489c8d150deba70d5732398695418df5232bceb52ee323ddd9753b \ + --hash=sha256:8bef11d31a1c48f5943e577d1ef81085ec1550c37552bfc9bf8e5d184ce47142 \ + --hash=sha256:91c93333c2b37ff721dc83b37e28c29de4c502b5612f2d093468037b86aa2be0 \ + --hash=sha256:92249ac94279b8e5f0c0c8420e09b804d0a49d2269f52f549d4cb536c8382434 \ + --hash=sha256:93efa4b72f7cb70555b0f66ee5e113ae40073c57054a72887e50b05bfd97baa4 \ + --hash=sha256:9d035a672d5b3e4793a4a8865c3274a7bbbac7fac67a47b415023b5539105087 \ + --hash=sha256:9e5819822050e6e36e2aa41260d05074c026a1bbb9baa6869170b5ce64db7a4d \ + --hash=sha256:a3926e4ed4a3e956c8b2b0f1140493378c8cd17cad123b4fc1e0f6ecd3e05b19 \ + --hash=sha256:a3da2371628e28ef279f3f756f5e58858fad7820de08508138c9f5f9e4d8f4ac \ + --hash=sha256:ac252bafe8cbadfac7b1e8a74748ffd775e27325186d12b82600b652d9adcb86 \ + --hash=sha256:ae294808afde1b14a1a69aa86a69cadfe391848bbb233a5332a8065e4081cabc \ + --hash=sha256:c40e0213beaf410a151e4329e30c73687838c251c998ba1b312975dbbcb2d05d \ + --hash=sha256:c5d5a733c6af7f392435e673d1b136f6bdf2366033abb35eed680400dc730840 \ + --hash=sha256:c633e2d2f8a7b88c06e276bbe16cb7e62fed815fcbeb69cd9752cea166ecb8e8 \ + --hash=sha256:c9f4f92eee7d06531cc6a5b814e603a0c7639876aab03638dcc70c420a3974f6 \ + --hash=sha256:ca8788dc1baee100f09110f33a01d928cf9df4483d2bfb25a37be31a659d46bb \ + --hash=sha256:d004a623ad4aa8d2eb31b37e65b5e020c9f65a1852b8b9e6301f0e411aca5b9a \ + --hash=sha256:ee2b82b170591ccd19d463c9798a9caeea0cad967a8d2f3264de459f582696d5 \ + --hash=sha256:f7c82a7930126bb5ccfbb73fc1562d52942fbffb2fda2791fab49de249fc202a # via jupyter-cache stack-data==0.6.3 \ --hash=sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9 \ @@ -1286,9 +1288,9 @@ termcolor==2.4.0 \ --hash=sha256:9297c0df9c99445c2412e832e882a7884038a25617c60cea2ad69488d4040d63 \ --hash=sha256:aab9e56047c8ac41ed798fa36d892a37aca6b3e9159f3e0c24bc64a9b3ac7b7a # via pytest-sugar -tomlkit==0.13.0 \ - --hash=sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72 \ - --hash=sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264 +tomlkit==0.13.2 \ + --hash=sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde \ + --hash=sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79 # via documenteer tornado==6.4.1 \ --hash=sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8 \ @@ -1390,7 +1392,7 @@ wcwidth==0.2.13 \ --hash=sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859 \ --hash=sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5 # via prompt-toolkit -zipp==3.20.0 \ - --hash=sha256:0145e43d89664cfe1a2e533adc75adafed82fe2da404b4bbb6b026c0157bdb31 \ - --hash=sha256:58da6168be89f0be59beb194da1250516fdaa062ccebd30127ac65d30045e10d +zipp==3.20.1 \ + --hash=sha256:9960cd8967c8f85a56f920d5d507274e74f9ff813a0ab8889a5b5be2daf44064 \ + --hash=sha256:c22b14cc4763c5a5b04134207736c107db42e9d3ef2d9779d465f5f1bcba572b # via importlib-metadata diff --git a/requirements/main.txt b/requirements/main.txt index e4b5e19f13..67830cad11 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -39,9 +39,9 @@ bcrypt==4.2.0 \ --hash=sha256:cf69eaf5185fd58f268f805b505ce31f9b9fc2d64b376642164e9244540c1221 \ --hash=sha256:f4f4acf526fcd1c34e7ce851147deedd4e26e6402369304220250598b26448db # via -r requirements/main.in -certifi==2024.7.4 \ - --hash=sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b \ - --hash=sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90 +certifi==2024.8.30 \ + --hash=sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8 \ + --hash=sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9 # via # httpcore # httpx @@ -213,41 +213,41 @@ click==8.1.7 \ # via # -r requirements/main.in # safir -cryptography==43.0.0 \ - --hash=sha256:0663585d02f76929792470451a5ba64424acc3cd5227b03921dab0e2f27b1709 \ - --hash=sha256:08a24a7070b2b6804c1940ff0f910ff728932a9d0e80e7814234269f9d46d069 \ - --hash=sha256:232ce02943a579095a339ac4b390fbbe97f5b5d5d107f8a08260ea2768be8cc2 \ - --hash=sha256:2905ccf93a8a2a416f3ec01b1a7911c3fe4073ef35640e7ee5296754e30b762b \ - --hash=sha256:299d3da8e00b7e2b54bb02ef58d73cd5f55fb31f33ebbf33bd00d9aa6807df7e \ - --hash=sha256:2c6d112bf61c5ef44042c253e4859b3cbbb50df2f78fa8fae6747a7814484a70 \ - --hash=sha256:31e44a986ceccec3d0498e16f3d27b2ee5fdf69ce2ab89b52eaad1d2f33d8778 \ - --hash=sha256:3d9a1eca329405219b605fac09ecfc09ac09e595d6def650a437523fcd08dd22 \ - --hash=sha256:3dcdedae5c7710b9f97ac6bba7e1052b95c7083c9d0e9df96e02a1932e777895 \ - --hash=sha256:47ca71115e545954e6c1d207dd13461ab81f4eccfcb1345eac874828b5e3eaaf \ - --hash=sha256:4a997df8c1c2aae1e1e5ac49c2e4f610ad037fc5a3aadc7b64e39dea42249431 \ - --hash=sha256:51956cf8730665e2bdf8ddb8da0056f699c1a5715648c1b0144670c1ba00b48f \ - --hash=sha256:5bcb8a5620008a8034d39bce21dc3e23735dfdb6a33a06974739bfa04f853947 \ - --hash=sha256:64c3f16e2a4fc51c0d06af28441881f98c5d91009b8caaff40cf3548089e9c74 \ - --hash=sha256:6e2b11c55d260d03a8cf29ac9b5e0608d35f08077d8c087be96287f43af3ccdc \ - --hash=sha256:7b3f5fe74a5ca32d4d0f302ffe6680fcc5c28f8ef0dc0ae8f40c0f3a1b4fca66 \ - --hash=sha256:844b6d608374e7d08f4f6e6f9f7b951f9256db41421917dfb2d003dde4cd6b66 \ - --hash=sha256:9a8d6802e0825767476f62aafed40532bd435e8a5f7d23bd8b4f5fd04cc80ecf \ - --hash=sha256:aae4d918f6b180a8ab8bf6511a419473d107df4dbb4225c7b48c5c9602c38c7f \ - --hash=sha256:ac1955ce000cb29ab40def14fd1bbfa7af2017cca696ee696925615cafd0dce5 \ - --hash=sha256:b88075ada2d51aa9f18283532c9f60e72170041bba88d7f37e49cbb10275299e \ - --hash=sha256:cb013933d4c127349b3948aa8aaf2f12c0353ad0eccd715ca789c8a0f671646f \ - --hash=sha256:cc70b4b581f28d0a254d006f26949245e3657d40d8857066c2ae22a61222ef55 \ - --hash=sha256:e9c5266c432a1e23738d178e51c2c7a5e2ddf790f248be939448c0ba2021f9d1 \ - --hash=sha256:ea9e57f8ea880eeea38ab5abf9fbe39f923544d7884228ec67d666abd60f5a47 \ - --hash=sha256:ee0c405832ade84d4de74b9029bedb7b31200600fa524d218fc29bfa371e97f5 \ - --hash=sha256:fdcb265de28585de5b859ae13e3846a8e805268a823a12a4da2597f1f5afc9f0 +cryptography==43.0.1 \ + --hash=sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494 \ + --hash=sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806 \ + --hash=sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d \ + --hash=sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062 \ + --hash=sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2 \ + --hash=sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4 \ + --hash=sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1 \ + --hash=sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85 \ + --hash=sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84 \ + --hash=sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042 \ + --hash=sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d \ + --hash=sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962 \ + --hash=sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2 \ + --hash=sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa \ + --hash=sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d \ + --hash=sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365 \ + --hash=sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96 \ + --hash=sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47 \ + --hash=sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d \ + --hash=sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d \ + --hash=sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c \ + --hash=sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb \ + --hash=sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277 \ + --hash=sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172 \ + --hash=sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034 \ + --hash=sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a \ + --hash=sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289 # via # -r requirements/main.in # pyjwt # safir -fastapi==0.112.0 \ - --hash=sha256:3487ded9778006a45834b8c816ec4a48d522e2631ca9e75ec5a774f1b052f821 \ - --hash=sha256:d262bc56b7d101d1f4e8fc0ad2ac75bb9935fec504d2b7117686cec50710cf05 +fastapi==0.112.2 \ + --hash=sha256:3d4729c038414d5193840706907a41839d839523da6ed0c2811f1168cac1798c \ + --hash=sha256:db84b470bd0e2b1075942231e90e3577e12a903c4dc8696f0d206a7904a7af1c # via safir gidgethub==5.3.0 \ --hash=sha256:4dd92f2252d12756b13f9dd15cde322bfb0d625b6fb5d680da1567ec74b462c0 \ @@ -279,9 +279,9 @@ hvac==2.3.0 \ --hash=sha256:1b85e3320e8642dd82f234db63253cda169a817589e823713dc5fca83119b1e2 \ --hash=sha256:a3afc5710760b6ee9b3571769df87a0333da45da05a5f9f963e1d3925a84be7d # via -r requirements/main.in -idna==3.7 \ - --hash=sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc \ - --hash=sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0 +idna==3.8 \ + --hash=sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac \ + --hash=sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603 # via # anyio # requests @@ -531,10 +531,14 @@ rfc3986==1.5.0 \ --hash=sha256:270aaf10d87d0d4e095063c65bf3ddbc6ee3d0b226328ce21e036f946e421835 \ --hash=sha256:a86d6e1f5b1dc238b218b012df0aa79409667bb209e58da56d0b94704e712a97 # via httpx -safir==6.2.0 \ - --hash=sha256:335219abba8ed663395bcf6cf86a60ec8de8412ea212dc0dbe8425e9faa7bc97 \ - --hash=sha256:61cf6fd3839c0945bcc7c01469dc8fcd19351eba33b6022c596684d87763e50e +safir==6.3.0 \ + --hash=sha256:2fcd64bf37dd42eacedd6378341b2487cd06dbaf1f28403301b8d80f60a4fb56 \ + --hash=sha256:6ad7dad520d87d853628849ef95a348c55dbd0180ad3f15c1cf2f7f8fe32f915 # via -r requirements/main.in +safir-logging==6.3.0 \ + --hash=sha256:491dfe85de89a3f2daa29c491a22a0551f0961444490418d91ec50c040ae16eb \ + --hash=sha256:e14754ab0bba6cfa248c3fc4cb5ca28410d97ff3965e831eab6581ed37485e79 + # via safir six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 @@ -550,16 +554,18 @@ sniffio==1.3.1 \ # anyio # httpcore # httpx -starlette==0.37.2 \ - --hash=sha256:6fe59f29268538e5d0d182f2791a479a0c64638e6935d1c6989e63fb2699c6ee \ - --hash=sha256:9af890290133b79fc3db55474ade20f6220a364a0402e0b556e7cd5e1e093823 +starlette==0.38.4 \ + --hash=sha256:526f53a77f0e43b85f583438aee1a940fd84f8fd610353e8b0c1a77ad8a87e76 \ + --hash=sha256:53a7439060304a208fea17ed407e998f46da5e5d9b1addfea3040094512a6379 # via # fastapi # safir structlog==24.4.0 \ --hash=sha256:597f61e80a91cc0749a9fd2a098ed76715a1c8a01f73e336b746504d1aad7610 \ --hash=sha256:b27bfecede327a6d2da5fbc96bd859f114ecc398a6389d664f62085ee7ae6fc4 - # via safir + # via + # safir + # safir-logging typing-extensions==4.12.2 \ --hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \ --hash=sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8 diff --git a/requirements/tox.txt b/requirements/tox.txt index 2c83eef26d..5acb5ec80e 100644 --- a/requirements/tox.txt +++ b/requirements/tox.txt @@ -1,8 +1,8 @@ # This file was autogenerated by uv via the following command: # uv pip compile --generate-hashes --output-file requirements/tox.txt requirements/tox.in -cachetools==5.4.0 \ - --hash=sha256:3ae3b49a3d5e28a77a0be2b37dbcb89005058959cb2323858c2657c4a8cab474 \ - --hash=sha256:b8adc2e7c07f105ced7bc56dbb6dfbe7c4a00acce20e2227b3f355be89bc6827 +cachetools==5.5.0 \ + --hash=sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292 \ + --hash=sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a # via tox chardet==5.2.0 \ --hash=sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7 \ @@ -57,25 +57,25 @@ tox-uv==1.11.2 \ --hash=sha256:7f8f1737b3277e1cddcb5b89fcc5931d04923562c940ae60f29e140908566df2 \ --hash=sha256:a7aded5c3fb69f055b523357988c1055bb573e91bfd7ecfb9b5233ebcab5d10b # via -r requirements/tox.in -uv==0.2.36 \ - --hash=sha256:083e56a18fc33395aeed4f56a47003e08f2ad9d5039af63ad5b107a241d0e7a3 \ - --hash=sha256:139889680c2475afbab61c725df951c4dfa030c42c4eaa8f27d05286c96e8aab \ - --hash=sha256:3f18322decfb0da577e40675620f6e6b9ffe1d8ee1de88a448bfe67fe7884626 \ - --hash=sha256:463a45a34277b9334e500fce463f59408a6bab0c1b5cb3023f25185a4805a562 \ - --hash=sha256:5a3800d2484b989041139ef96b395cec0e4e0a13132584b0147c739063a2494f \ - --hash=sha256:5c8d624975f8355e00ad5f802ed27fcfc7b86d0bd50b57efe24bd665fd3f9a9b \ - --hash=sha256:8753851cc10b0a67e5c5dd29a6f35a072341290cf27a7bb3193ddd92bda19f51 \ - --hash=sha256:8820dd5b77ffcda07dde09712a43d969d39b0aace112d8074c540f19a4911cc2 \ - --hash=sha256:89d3fb3d7a66fa4a4f7c938be0277457fe71179ec4e72758cfe16faec1daa362 \ - --hash=sha256:8e5e2e8e218eb672a3bb57af0ab2d2d3de79119b5dc6b6edb03d349739e474ff \ - --hash=sha256:a08d485cd8eae0352b4765005a4499ad5db073c3534866d68617bbb831ee219a \ - --hash=sha256:a4fddaf0a6a995591042a57ac48557b9b2c1e2c7f09e0f7880f40c34e61f53f8 \ - --hash=sha256:a7961f4d88100fc48129c918545cbb17b9a0d8e3d163c65985e1d1682e056321 \ - --hash=sha256:a837b799e3af1535473b8ab14e414e50f595d547d295879db0d6b0943b7476df \ - --hash=sha256:d093fd10aaf29e85128beaa514f8d37d7374cf1d1a95da966e15788a6fe7d55d \ - --hash=sha256:e36cd4e9c1187d155139b98bcd2cfbfb275f9f601c550fcc38a283983c74f93d \ - --hash=sha256:e79a4cdb3b89b011fafcaa853ebbb9254115f3f7cadbd9141492c48ceeac1b2d \ - --hash=sha256:f1d711629dd8610933687ceea4ad82156ef7b2102c4e9da72afe6c01981f8a1a +uv==0.4.4 \ + --hash=sha256:051589ab42bfdb2997ea61a56a78a2bab0b6ae7d014f96a578dcc5f9d8766757 \ + --hash=sha256:0c9ada2fbfe3ca29c50914acd714fe35100ab56fdb83510d1aadd00d55191d1b \ + --hash=sha256:0d0af47198dc4ca635540b72c933219c6c967885788fd1f651112f168fcade0a \ + --hash=sha256:0d51db6bf89b7b0a4aae229f7efee00fc52a1d7391605f3b789996f9c7986653 \ + --hash=sha256:14f06ed0e0f163c9ec8b26f4fc2df14530080e405d7348ad0c59f9c296c55918 \ + --hash=sha256:3e9139f171329b6fa40a064f9e7923848d44e60bc31da138758695ec34d8200d \ + --hash=sha256:433c69a6e7f35c865172d69e51bf78521a9d87eac6f8772af04667f5d25cc9a9 \ + --hash=sha256:718bbdf0675eab8d15f52041725b60743a9496fde3dc493d34913aa4a15f0a81 \ + --hash=sha256:8ba084d6d5baf92a3cfe41a20fd912dea4e2ea3eca8401f1892394c5c2b79c92 \ + --hash=sha256:918d4da22f9ff4403dc72dfb4c58c994400a64797a3a17f00f5c0d3717d1cb8c \ + --hash=sha256:9ba6abad0a531181bcb90b9af818e2490211f2d4b3eb83eb2a27df1f07f299fb \ + --hash=sha256:c1b7db1db176e46184c974ed30687671ec5d67cfcce34c7ed4a63141ecb6c70e \ + --hash=sha256:d2e2c9db83efd81b0b8dcaa45533b461b058d5aec49a6ed6cc98832e56e45856 \ + --hash=sha256:d66242bba1bbec847b77fcdc7d3191eab733189213a5d2717dbda1ff04e24b46 \ + --hash=sha256:da3a77ad858be5239ae33509ddfeaf097d7bda77fc0b2a42994cbec32cef4769 \ + --hash=sha256:dc881ea11dcb443940bbac5d7601cd7c74f80e7086c2e310e86ebf10d1c8816b \ + --hash=sha256:dd94e5be00a0a06ab5cbba7014720440a12bae73150d8146bc3535f3a22ff069 \ + --hash=sha256:f866f9a44982ef8041a982c3197a17e18d4a8ac7717b4462477ea0ca6a088a52 # via tox-uv virtualenv==20.26.3 \ --hash=sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a \ From 6ad393017c8fb717ec3d5f880e81d3b8ad9b031d Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 3 Sep 2024 18:46:51 -0700 Subject: [PATCH 024/193] Add InfluxDB Enterprise - Add configuration for a single data node InfluxDB Enterprise deployment and three meta nodes --- applications/sasquatch/values-summit.yaml | 49 +++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index 8c62d1c356..9f1f21a29f 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -67,6 +67,55 @@ influxdb: memory: 128Gi cpu: 16 +influxdb-enterprise: + enabled: true + license: + secret: + name: sasquatch + key: influxdb-enterprise-license + meta: + ingress: + enabled: true + hostname: summit-lsp.lsst.codes + persistence: + enabled: true + accessMode: ReadWriteOnce + size: 16Gi + resources: + requests: + memory: 2Gi + cpu: 2 + limits: + memory: 4Gi + cpu: 4 + data: + replicas: 1 + ingress: + enabled: true + hostname: summit-lsp.lsst.codes + persistence: + enabled: true + accessMode: ReadWriteOnce + storageClass: localdrive + size: 15Ti + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - yagan20 + # -- InfluxDB Enterprise data pod resources, 16 cores single node license + resources: + requests: + memory: 256Gi + cpu: 16 + limits: + memory: 256Gi + cpu: 16 + kafka-connect-manager: influxdbSink: # Based on the kafka producers configuration for the Summit From 45440f10b0cc10193f7507f27eee88d2577be181 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 3 Sep 2024 18:49:08 -0700 Subject: [PATCH 025/193] Add secret for InfluxDB Enterprise license --- applications/sasquatch/secrets.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/applications/sasquatch/secrets.yaml b/applications/sasquatch/secrets.yaml index 8634cb3df1..b6648a6eb1 100644 --- a/applications/sasquatch/secrets.yaml +++ b/applications/sasquatch/secrets.yaml @@ -81,3 +81,7 @@ camera-password: description: >- camera KafkaUser password. if: strimzi-kafka.users.camera.enabled +influxdb-enterprise-license: + description: >- + InfluxDB Enterprise license. + if: influxdb-enterprise.enabled From 07d81881b22c2bd317b404e0b8aab025dd6b38d4 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 4 Sep 2024 05:51:51 -0700 Subject: [PATCH 026/193] Configure key for InfluxDB Enterprise meta shared secret --- applications/sasquatch/README.md | 4 +++- .../sasquatch/charts/influxdb-enterprise/README.md | 4 +++- .../influxdb-enterprise/templates/meta-statefulset.yaml | 4 ++-- .../sasquatch/charts/influxdb-enterprise/values.yaml | 7 ++++++- 4 files changed, 14 insertions(+), 5 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 3e53b27e34..2ae35a9f58 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -163,7 +163,9 @@ Rubin Observatory's telemetry service | influxdb-enterprise.meta.service.loadBalancerIP | string | Do not allocate a load balancer IP | Load balancer IP for the meta service | | influxdb-enterprise.meta.service.nodePort | int | Do not allocate a node port | Node port for the meta service | | influxdb-enterprise.meta.service.type | string | `"ClusterIP"` | Service type for the meta service | -| influxdb-enterprise.meta.sharedSecret.secretName | string | `"influxdb-enterprise-shared-secret"` | Shared secret used by the internal API for JWT authentication between InfluxDB nodes. Must have a key named `secret` that should be a long, random string See [documentation for shared-internal-secret](https://docs.influxdata.com/enterprise_influxdb/v1/administration/configure/config-data-nodes/#meta-internal-shared-secret). | +| influxdb-enterprise.meta.sharedSecret.secret | object | `{"key":"secret","name":"influxdb-enterprise-shared-secret"}` | Shared secret used by the internal API for JWT authentication between InfluxDB nodes. Must have a key named `secret` that should be a long, random string See [documentation for shared-internal-secret](https://docs.influxdata.com/enterprise_influxdb/v1/administration/configure/config-data-nodes/#meta-internal-shared-secret). | +| influxdb-enterprise.meta.sharedSecret.secret.key | string | `"secret"` | Key within that secret that contains the shared secret | +| influxdb-enterprise.meta.sharedSecret.secret.name | string | `"influxdb-enterprise-shared-secret"` | Name of the secret containing the shared secret | | influxdb-enterprise.meta.tolerations | list | `[]` | Tolerations for meta pods | | influxdb-enterprise.nameOverride | string | `""` | Override the base name for resources | | influxdb-enterprise.serviceAccount.annotations | object | `{}` | Annotations to add to the service account | diff --git a/applications/sasquatch/charts/influxdb-enterprise/README.md b/applications/sasquatch/charts/influxdb-enterprise/README.md index aba97b90d0..12233edf75 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/README.md +++ b/applications/sasquatch/charts/influxdb-enterprise/README.md @@ -92,7 +92,9 @@ Run InfluxDB Enterprise on Kubernetes | meta.service.loadBalancerIP | string | Do not allocate a load balancer IP | Load balancer IP for the meta service | | meta.service.nodePort | int | Do not allocate a node port | Node port for the meta service | | meta.service.type | string | `"ClusterIP"` | Service type for the meta service | -| meta.sharedSecret.secretName | string | `"influxdb-enterprise-shared-secret"` | Shared secret used by the internal API for JWT authentication between InfluxDB nodes. Must have a key named `secret` that should be a long, random string See [documentation for shared-internal-secret](https://docs.influxdata.com/enterprise_influxdb/v1/administration/configure/config-data-nodes/#meta-internal-shared-secret). | +| meta.sharedSecret.secret | object | `{"key":"secret","name":"influxdb-enterprise-shared-secret"}` | Shared secret used by the internal API for JWT authentication between InfluxDB nodes. Must have a key named `secret` that should be a long, random string See [documentation for shared-internal-secret](https://docs.influxdata.com/enterprise_influxdb/v1/administration/configure/config-data-nodes/#meta-internal-shared-secret). | +| meta.sharedSecret.secret.key | string | `"secret"` | Key within that secret that contains the shared secret | +| meta.sharedSecret.secret.name | string | `"influxdb-enterprise-shared-secret"` | Name of the secret containing the shared secret | | meta.tolerations | list | `[]` | Tolerations for meta pods | | nameOverride | string | `""` | Override the base name for resources | | serviceAccount.annotations | object | `{}` | Annotations to add to the service account | diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/meta-statefulset.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-statefulset.yaml index beff940f34..cf543c32a4 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/templates/meta-statefulset.yaml +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-statefulset.yaml @@ -59,8 +59,8 @@ spec: - name: INFLUXDB_META_INTERNAL_SHARED_SECRET valueFrom: secretKeyRef: - name: {{ .Values.meta.sharedSecret.secretName }} - key: secret + name: {{ .Values.meta.sharedSecret.secret.name }} + key: {{ .Values.meta.sharedSecret.secret.key }} {{- if .Values.meta.env }} {{ toYaml .Values.meta.env | indent 12 }} {{- end}} diff --git a/applications/sasquatch/charts/influxdb-enterprise/values.yaml b/applications/sasquatch/charts/influxdb-enterprise/values.yaml index 412b131f72..0709b449c6 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/values.yaml +++ b/applications/sasquatch/charts/influxdb-enterprise/values.yaml @@ -137,7 +137,12 @@ meta: # InfluxDB nodes. Must have a key named `secret` that should be a long, # random string See [documentation for # shared-internal-secret](https://docs.influxdata.com/enterprise_influxdb/v1/administration/configure/config-data-nodes/#meta-internal-shared-secret). - secretName: influxdb-enterprise-shared-secret + secret: + # -- Name of the secret containing the shared secret + name: influxdb-enterprise-shared-secret + + # -- Key within that secret that contains the shared secret + key: secret service: # -- Service type for the meta service From 0cd6fe6251bee74474cde6a25b4daf6b4b1976ac Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 4 Sep 2024 05:52:33 -0700 Subject: [PATCH 027/193] Add InfluxDB Enterprise shared secret --- applications/sasquatch/secrets.yaml | 4 ++++ applications/sasquatch/values-summit.yaml | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/applications/sasquatch/secrets.yaml b/applications/sasquatch/secrets.yaml index b6648a6eb1..7f84437a65 100644 --- a/applications/sasquatch/secrets.yaml +++ b/applications/sasquatch/secrets.yaml @@ -85,3 +85,7 @@ influxdb-enterprise-license: description: >- InfluxDB Enterprise license. if: influxdb-enterprise.enabled +influxdb-enterprise-shared-secret: + description: >- + InfluxDB Enterprise shared secret. + if: influxdb-enterprise.enabled diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index 9f1f21a29f..972f8f4dd5 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -81,6 +81,10 @@ influxdb-enterprise: enabled: true accessMode: ReadWriteOnce size: 16Gi + sharedSecret: + secret: + name: sasquatch + key: influxdb-enterprise-shared-secret resources: requests: memory: 2Gi From 6bdc24fdc0e2da683eb6a07de65c543d2ace79a9 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 4 Sep 2024 06:29:39 -0700 Subject: [PATCH 028/193] Configure connectors for InfluxDB enterprise - InfluxDB OSS and Enterprise will run simultaneously at the Summit for a while - Create a second instance of connectors to write to InfluxDB Enterprise --- applications/sasquatch/Chart.yaml | 4 + applications/sasquatch/README.md | 36 +++++ applications/sasquatch/values-summit.yaml | 173 +++++++++++++++++++++- 3 files changed, 210 insertions(+), 3 deletions(-) diff --git a/applications/sasquatch/Chart.yaml b/applications/sasquatch/Chart.yaml index 93bb8ee863..600032104e 100644 --- a/applications/sasquatch/Chart.yaml +++ b/applications/sasquatch/Chart.yaml @@ -46,6 +46,10 @@ dependencies: - name: telegraf-kafka-consumer condition: telegraf-kafka-consumer.enabled version: 1.0.0 + - name: telegraf-kafka-consumer + alias: telegraf-kafka-consumer-oss + condition: telegraf-kafka-consumer-oss.enabled + version: 1.0.0 - name: rest-proxy condition: rest-proxy.enabled version: 1.0.0 diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 2ae35a9f58..2e8e8fca70 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -434,3 +434,39 @@ Rubin Observatory's telemetry service | telegraf-kafka-consumer.podLabels | object | `{}` | Labels for telegraf-kafka-consumer pods | | telegraf-kafka-consumer.resources | object | See `values.yaml` | Kubernetes resources requests and limits | | telegraf-kafka-consumer.tolerations | list | `[]` | Tolerations for pod assignment | +| telegraf-kafka-consumer-oss.affinity | object | `{}` | Affinity for pod assignment | +| telegraf-kafka-consumer-oss.args | list | `[]` | Arguments passed to the Telegraf agent containers | +| telegraf-kafka-consumer-oss.enabled | bool | `false` | Wether the Telegraf Kafka Consumer is enabled | +| telegraf-kafka-consumer-oss.env | list | See `values.yaml` | Telegraf agent enviroment variables | +| telegraf-kafka-consumer-oss.envFromSecret | string | `""` | Name of the secret with values to be added to the environment. | +| telegraf-kafka-consumer-oss.image.pullPolicy | string | `"Always"` | Image pull policy | +| telegraf-kafka-consumer-oss.image.repo | string | `"docker.io/library/telegraf"` | Telegraf image repository | +| telegraf-kafka-consumer-oss.image.tag | string | `"1.30.2-alpine"` | Telegraf image tag | +| telegraf-kafka-consumer-oss.imagePullSecrets | list | `[]` | Secret names to use for Docker pulls | +| telegraf-kafka-consumer-oss.influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to | +| telegraf-kafka-consumer-oss.influxdb.url | string | `"http://sasquatch-influxdb.sasquatch:8086"` | URL of the InfluxDB v1 instance to write to | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.collection_jitter | string | "0s" | Data collection jitter. This is used to jitter the collection by a random amount. Each plugin will sleep for a random time within jitter before collecting. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.consumer_fetch_default | string | "20MB" | Maximum amount of data the server should return for a fetch request. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.debug | bool | false | Run Telegraf in debug mode. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.enabled | bool | `false` | Enable the Telegraf Kafka consumer. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.fields | list | `[]` | List of Avro fields to be recorded as InfluxDB fields. If not specified, any Avro field that is not marked as a tag will become an InfluxDB field. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.flush_interval | string | "1s" | Data flushing interval for all outputs. Don’t set this below interval. Maximum flush_interval is flush_interval + flush_jitter | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.flush_jitter | string | "0s" | Jitter the flush interval by a random amount. This is primarily to avoid large write spikes for users running a large number of telegraf instances. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.interval | string | "1s" | Data collection interval for the Kafka consumer. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.max_processing_time | string | "5s" | Maximum processing time for a single message. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.metric_batch_size | int | 1000 | Sends metrics to the output in batches of at most metric_batch_size metrics. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.metric_buffer_limit | int | 10000 | Caches metric_buffer_limit metrics for each output, and flushes this buffer on a successful write. This should be a multiple of metric_batch_size and could not be less than 2 times metric_batch_size. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.offset | string | `"oldest"` | Kafka consumer offset. Possible values are `oldest` and `newest`. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.precision | string | "1us" | Data precision. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.replicaCount | int | `1` | Number of Telegraf Kafka consumer replicas. Increase this value to increase the consumer throughput. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.tags | list | `[]` | List of Avro fields to be recorded as InfluxDB tags. The Avro fields specified as tags will be converted to strings before ingestion into InfluxDB. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.timestamp_field | string | `"private_efdStamp"` | Avro field to be used as the InfluxDB timestamp (optional). If unspecified or set to the empty string, Telegraf will use the time it received the measurement. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.timestamp_format | string | `"unix"` | Timestamp format. Possible values are `unix` (the default if unset) a timestamp in seconds since the Unix epoch, `unix_ms` (milliseconds), `unix_us` (microsseconds), or `unix_ns` (nanoseconds). | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.topicRegexps | string | `"[ \".*Test\" ]\n"` | List of regular expressions to specify the Kafka topics consumed by this agent. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.union_field_separator | string | `""` | Union field separator: if a single Avro field is flattened into more than one InfluxDB field (e.g. an array `a`, with four members, would yield `a0`, `a1`, `a2`, `a3`; if the field separator were `_`, these would be `a_0`...`a_3`. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.union_mode | string | `"nullable"` | Union mode: this can be one of `flatten`, `nullable`, or `any`. See `values.yaml` for extensive discussion. | +| telegraf-kafka-consumer-oss.nodeSelector | object | `{}` | Node labels for pod assignment | +| telegraf-kafka-consumer-oss.podAnnotations | object | `{}` | Annotations for telegraf-kafka-consumers pods | +| telegraf-kafka-consumer-oss.podLabels | object | `{}` | Labels for telegraf-kafka-consumer pods | +| telegraf-kafka-consumer-oss.resources | object | See `values.yaml` | Kubernetes resources requests and limits | +| telegraf-kafka-consumer-oss.tolerations | list | `[]` | Tolerations for pod assignment | diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index 972f8f4dd5..5c82488f2a 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -203,8 +203,52 @@ kafka-connect-manager: repairerConnector: false topicsRegex: "lsst.sal.MTCamera|lsst.sal.MTHeaderService|lsst.sal.MTOODS" +telegraf-kafka-consumer-oss: + enabled: true + kafkaConsumers: + oss-backpack: + enabled: true + replicaCount: 1 + database: "lsst.backpack" + timestamp_format: "unix" + timestamp_field: "timestamp" + topicRegexps: | + [ "lsst.backpack" ] + oss-atcamera: + enabled: true + replicaCount: 1 + database: "lsst.ATCamera" + timestamp_format: "unix_ms" + timestamp_field: "timestamp" + tags: | + [ "Agent", "Aspic", "Location", "Raft", "Reb", "Sensor", "Source" ] + topicRegexps: | + [ "lsst.ATCamera" ] + oss-cccamera: + enabled: true + replicaCount: 1 + database: "lsst.CCCamera" + timestamp_format: "unix_ms" + timestamp_field: "timestamp" + tags: | + [ "Agent", "Aspic", "Cold", "Cryo", "Hardware", "Location", "Ps", "RTD", "Raft", "Reb", "Segment", "Sensor", "Source" ] + topicRegexps: | + [ "lsst.CCCamera" ] + oss-mtcamera: + enabled: true + replicaCount: 1 + database: "lsst.MTCamera" + timestamp_format: "unix_ms" + timestamp_field: "timestamp" + tags: | + [ "Agent", "Aspic", "Axis", "Canbus", "Cip", "Clamp", "Cold", "Controller", "Cryo", "Gateway", "Hardware", "Hip", "Hook", "Latch", "Location", "Ps", "RTD", "Raft", "Reb", "Segment", "Sensor", "Socket", "Source", "Truck" ] + topicRegexps: | + [ "lsst.MTCamera" ] + telegraf-kafka-consumer: enabled: true + influxdb: + url: "http://sasquatch-influxdb-enterprise-data.sasquatch:8086" kafkaConsumers: backpack: enabled: true @@ -214,9 +258,134 @@ telegraf-kafka-consumer: timestamp_field: "timestamp" topicRegexps: | [ "lsst.backpack" ] + # CSC connectors + maintel: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.MTAOS", "lsst.sal.MTDome", "lsst.sal.MTDomeTrajectory", "lsst.sal.MTPtg" ] + offset: "newest" + mtmount: + enabled: true + database: "efd" + replicaCount: 8 + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.MTMount" ] + offset: "newest" + comcam: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.CCCamera", "lsst.sal.CCHeaderService", "lsst.sal.CCOODS" ] + offset: "newest" + eas: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.DIMM", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.ESS", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] + offset: "newest" + m1m3: + enabled: true + database: "efd" + replicaCount: 8 + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.MTM1M3" ] + offset: "newest" + m2: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.MTHexapod", "lsst.sal.MTM2", "lsst.sal.MTRotator" ] + offset: "newest" + obssys: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.Scheduler", "lsst.sal.Script", "lsst.sal.ScriptQueue", "lsst.sal.Watcher" ] + offset: "newest" + ocps: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.OCPS" ] + offset: "newest" + pmd: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.PMD" ] + offset: "newest" + calsys: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.ATMonochromator", "lsst.sal.ATWhiteLight", "lsst.sal.CBP", "lsst.sal.Electrometer", "lsst.sal.FiberSpectrograph", "lsst.sal.LinearStage", "lsst.sal.TunableLaser" ] + offset: "newest" + mtaircompressor: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.MTAirCompressor" ] + offset: "newest" + genericcamera: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.GCHeaderService", "lsst.sal.GenericCamera" ] + offset: "newest" + gis: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.GIS" ] + offset: "newest" + lsstcam: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.MTCamera", "lsst.sal.MTHeaderService", "lsst.sal.MTOODS" ] + offset: "newest" + auxtel: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.ATAOS", "lsst.sal.ATDome", "lsst.sal.ATDomeTrajectory", "lsst.sal.ATHexapod", "lsst.sal.ATPneumatics", "lsst.sal.ATPtg", "lsst.sal.ATMCS" ] + latiss: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.ATCamera", "lsst.sal.ATHeaderService", "lsst.sal.ATOODS", "lsst.sal.ATSpectrograph" ] + test: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.Test" ] + lasertracker: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.LaserTracker" ] + # CCS connectors (experimental) data is being written on separate databases for now atcamera: enabled: true - replicaCount: 1 database: "lsst.ATCamera" timestamp_format: "unix_ms" timestamp_field: "timestamp" @@ -226,7 +395,6 @@ telegraf-kafka-consumer: [ "lsst.ATCamera" ] cccamera: enabled: true - replicaCount: 1 database: "lsst.CCCamera" timestamp_format: "unix_ms" timestamp_field: "timestamp" @@ -236,7 +404,6 @@ telegraf-kafka-consumer: [ "lsst.CCCamera" ] mtcamera: enabled: true - replicaCount: 1 database: "lsst.MTCamera" timestamp_format: "unix_ms" timestamp_field: "timestamp" From d1bde581396ac96e903a61f7c9a714e8c502e254 Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Mon, 19 Aug 2024 12:44:03 -0700 Subject: [PATCH 029/193] Split Prompt Processing pipelines into preload and prompt subsets. The two subsets will now be run separately. --- .../values-usdfdev-prompt-processing.yaml | 2 +- .../values-usdfdev-prompt-processing.yaml | 2 +- .../values-usdfdev-prompt-processing.yaml | 2 +- .../values-usdfprod-prompt-processing.yaml | 4 ++-- .../values-usdfdev-prompt-processing.yaml | 2 +- .../values-usdfprod-prompt-processing.yaml | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/applications/prompt-proto-service-hsc-gpu/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-hsc-gpu/values-usdfdev-prompt-processing.yaml index 6ecda4e3cb..fe819556fd 100644 --- a/applications/prompt-proto-service-hsc-gpu/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-hsc-gpu/values-usdfdev-prompt-processing.yaml @@ -13,7 +13,7 @@ prompt-proto-service: instrument: pipelines: main: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/HSC/ApPipe.yaml] - preprocessing: (survey="SURVEY")=[] + preprocessing: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/HSC/Preprocessing.yaml] calibRepo: s3://rubin-pp-dev-users/central_repo/ s3: diff --git a/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml index 987d3b4e9f..f507024096 100644 --- a/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml @@ -14,7 +14,7 @@ prompt-proto-service: instrument: pipelines: main: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/HSC/ApPipe.yaml] - preprocessing: (survey="SURVEY")=[] + preprocessing: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/HSC/Preprocessing.yaml] calibRepo: s3://rubin-pp-dev-users/central_repo/ s3: diff --git a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml index c9f5d5677e..3c34271230 100644 --- a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml @@ -14,7 +14,7 @@ prompt-proto-service: main: >- (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml, ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Isr.yaml] - preprocessing: (survey="SURVEY")=[] + preprocessing: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Preprocessing.yaml] calibRepo: s3://rubin-pp-dev-users/central_repo/ s3: diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index b244df2ebc..5115052b25 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -34,8 +34,8 @@ prompt-proto-service: (survey="BLOCK-295")=[] (survey="")=[] preprocessing: >- - (survey="AUXTEL_PHOTO_IMAGING")=[] - (survey="AUXTEL_DRP_IMAGING")=[] + (survey="AUXTEL_PHOTO_IMAGING")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Preprocessing.yaml] + (survey="AUXTEL_DRP_IMAGING")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Preprocessing.yaml] (survey="BLOCK-T17")=[] (survey="cwfs")=[] (survey="cwfs-focus-sweep")=[] diff --git a/applications/prompt-proto-service-lsstcomcamsim/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-lsstcomcamsim/values-usdfdev-prompt-processing.yaml index df319a9054..86f51c8ce7 100644 --- a/applications/prompt-proto-service-lsstcomcamsim/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-lsstcomcamsim/values-usdfdev-prompt-processing.yaml @@ -15,7 +15,7 @@ prompt-proto-service: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCamSim/ApPipe.yaml, ${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCamSim/SingleFrame.yaml, ${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCamSim/Isr.yaml] - preprocessing: (survey="SURVEY")=[] + preprocessing: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCamSim/Preprocessing.yaml] calibRepo: s3://rubin-pp-dev-users/central_repo/ s3: diff --git a/applications/prompt-proto-service-lsstcomcamsim/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-lsstcomcamsim/values-usdfprod-prompt-processing.yaml index ce3809fb7c..2195736e75 100644 --- a/applications/prompt-proto-service-lsstcomcamsim/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-lsstcomcamsim/values-usdfprod-prompt-processing.yaml @@ -25,7 +25,7 @@ prompt-proto-service: ${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCamSim/Isr.yaml] (survey="")=[] preprocessing: >- - (survey="BLOCK-297")=[] + (survey="BLOCK-297")=[${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCamSim/Preprocessing.yaml] (survey="")=[] calibRepo: s3://rubin-summit-users From 74e846aed8d1b34ef1b3be19ecaa96ce5fe9aa5f Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Tue, 3 Sep 2024 17:47:01 -0400 Subject: [PATCH 030/193] love: refactor configurations to split managers producers by categories on BTS --- applications/love/README.md | 105 ++++---- .../love/charts/love-manager/README.md | 103 ++++---- .../love-manager/templates/_helpers.tpl | 12 +- .../manager-producers-deployment.yaml | 49 ++-- .../templates/manager-producers-hpa.yaml | 34 +-- .../templates/manager-producers-service.yaml | 12 +- .../love/charts/love-manager/values.yaml | 240 ++++++++++-------- .../love/charts/love-producer/README.md | 2 +- .../love-producer/templates/deployment.yaml | 2 + .../love/charts/love-producer/values.yaml | 3 +- applications/love/values-base.yaml | 234 ++++++++++++++++- 11 files changed, 537 insertions(+), 259 deletions(-) diff --git a/applications/love/README.md b/applications/love/README.md index ac03b1cff6..fae75a25ca 100644 --- a/applications/love/README.md +++ b/applications/love/README.md @@ -17,6 +17,7 @@ Deployment for the LSST Operators Visualization Environment | global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| love-manager.manager | object | `{"frontend":{"affinity":{},"autoscaling":{"enabled":true,"maxReplicas":100,"minReplicas":1,"scaleDownPolicy":{},"scaleUpPolicy":{},"targetCPUUtilizationPercentage":80,"targetMemoryUtilizationPercentage":""},"env":{"AUTH_LDAP_1_SERVER_URI":"ldap://ipa1.lsst.local","AUTH_LDAP_2_SERVER_URI":"ldap://ipa2.lsst.local","AUTH_LDAP_3_SERVER_URI":"ldap://ipa3.lsst.local","COMMANDER_HOSTNAME":"love-commander-service","COMMANDER_PORT":5000,"DB_ENGINE":"postgresql","DB_HOST":"love-manager-database-service","DB_NAME":"love","DB_PORT":5432,"DB_USER":"love","JIRA_API_HOSTNAME":"rubinobs.atlassian.net","JIRA_PROJECT_ID":10063,"LOVE_PRODUCER_WEBSOCKET_HOST":"love-service/manager/ws/subscription","LOVE_SITE":"local","OLE_API_HOSTNAME":"site.lsst.local","REDIS_CONFIG_CAPACITY":5000,"REDIS_CONFIG_EXPIRY":5,"REDIS_HOST":"love-manager-redis-service","REMOTE_STORAGE":true,"SERVER_URL":"love.lsst.local","URL_SUBPATH":"/love"},"envSecrets":{"ADMIN_USER_PASS":"admin-user-pass","AUTHLIST_USER_PASS":"authlist-user-pass","AUTH_LDAP_BIND_PASSWORD":"auth-ldap-bind-password","CMD_USER_PASS":"cmd-user-pass","DB_PASS":"db-pass","JIRA_API_TOKEN":"jira-api-token","PROCESS_CONNECTION_PASS":"process-connection-pass","REDIS_PASS":"redis-pass","SECRET_KEY":"manager-secret-key","USER_USER_PASS":"user-user-pass"},"image":{"nexus3":"","pullPolicy":"IfNotPresent","repository":"lsstts/love-manager"},"nodeSelector":{},"ports":{"container":8000,"node":30000},"readinessProbe":{},"replicas":1,"resources":{},"tolerations":[]},"producers":[{"affinity":{},"autoscaling":{"enabled":true,"maxReplicas":100,"minReplicas":1,"scaleDownPolicy":{},"scaleUpPolicy":{},"targetCPUUtilizationPercentage":80,"targetMemoryUtilizationPercentage":""},"env":{"AUTH_LDAP_1_SERVER_URI":"ldap://ipa1.lsst.local","AUTH_LDAP_2_SERVER_URI":"ldap://ipa2.lsst.local","AUTH_LDAP_3_SERVER_URI":"ldap://ipa3.lsst.local","COMMANDER_HOSTNAME":"love-commander-service","COMMANDER_PORT":5000,"DB_ENGINE":"postgresql","DB_HOST":"love-manager-database-service","DB_NAME":"love","DB_PORT":5432,"DB_USER":"love","HEARTBEAT_QUERY_COMMANDER":false,"JIRA_API_HOSTNAME":"rubinobs.atlassian.net","JIRA_PROJECT_ID":10063,"LOVE_SITE":"local","OLE_API_HOSTNAME":"site.lsst.local","REDIS_CONFIG_CAPACITY":5000,"REDIS_CONFIG_EXPIRY":5,"REDIS_HOST":"love-manager-redis-service","REMOTE_STORAGE":true,"SERVER_URL":"love.lsst.local","URL_SUBPATH":"/love"},"envSecrets":{"ADMIN_USER_PASS":"admin-user-pass","AUTHLIST_USER_PASS":"authlist-user-pass","AUTH_LDAP_BIND_PASSWORD":"auth-ldap-bind-password","CMD_USER_PASS":"cmd-user-pass","DB_PASS":"db-pass","JIRA_API_TOKEN":"jira-api-token","PROCESS_CONNECTION_PASS":"process-connection-pass","REDIS_PASS":"redis-pass","SECRET_KEY":"manager-secret-key","USER_USER_PASS":"user-user-pass"},"image":{"nexus3":"","pullPolicy":"IfNotPresent","repository":"lsstts/love-manager"},"name":"example-producer","nodeSelector":{},"ports":{"container":8000,"node":30000},"readinessProbe":{},"replicas":1,"resources":{},"tolerations":[]}],"producers_ports":{"container":8000,"node":30000}}` | Configuration for the different manager instances. This is divided into two sessions; frontend and producers. _frontend_ Configuration for the manager frontend. The frontend session defines the configuration for the so-called frontend managers. These serves the frontend artifacts as well as handles the data piping from the system to the frontend. Every time a user opens a view in LOVE the page will connect to the frontend manager and will receive the telemetry data from the system. Once a connection is established between a frontend and the manager it is kept alive. As more connections come in, the autoscaler will scale up the number of frontend managers and new connections should be redirected to them. The redirect is handled by the manager-frontend-service ClusterIP. _producers_ Configurations for the manger producers. This is basically a list of managers (with the same structure as the frontend, but in a list). These defines services that the LOVE-producers connect to, to feed data from the control system. | | love-manager.manager.frontend.affinity | object | `{}` | Affinity rules for the LOVE manager frontend pods | | love-manager.manager.frontend.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | | love-manager.manager.frontend.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | @@ -66,55 +67,59 @@ Deployment for the LSST Operators Visualization Environment | love-manager.manager.frontend.replicas | int | `1` | Set the default number of LOVE manager frontend pod replicas | | love-manager.manager.frontend.resources | object | `{}` | Resource specifications for the LOVE manager frontend pods | | love-manager.manager.frontend.tolerations | list | `[]` | Toleration specifications for the LOVE manager frontend pods | -| love-manager.manager.producers.affinity | object | `{}` | Affinity rules for the LOVE manager producers pods | -| love-manager.manager.producers.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | -| love-manager.manager.producers.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | -| love-manager.manager.producers.autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | -| love-manager.manager.producers.autoscaling.scaleDownPolicy | object | `{}` | Policy for scaling down manager pods | -| love-manager.manager.producers.autoscaling.scaleUpPolicy | object | `{}` | Policy for scaling up manager pods | -| love-manager.manager.producers.autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | -| love-manager.manager.producers.autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | -| love-manager.manager.producers.env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | -| love-manager.manager.producers.env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | -| love-manager.manager.producers.env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | -| love-manager.manager.producers.env.COMMANDER_HOSTNAME | string | `"love-commander-service"` | Label for the LOVE commander service. Must match the one spcified in the LOVE commander chart | -| love-manager.manager.producers.env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | -| love-manager.manager.producers.env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager producers | -| love-manager.manager.producers.env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | -| love-manager.manager.producers.env.DB_NAME | string | `"love"` | The name of the database being used for the LOVE manager producers | -| love-manager.manager.producers.env.DB_PORT | int | `5432` | The port for the database service | -| love-manager.manager.producers.env.DB_USER | string | `"love"` | The database user needed for access from the LOVE manager producers | -| love-manager.manager.producers.env.HEARTBEAT_QUERY_COMMANDER | bool | `false` | Have the LOVE producer managers not query commander | -| love-manager.manager.producers.env.JIRA_API_HOSTNAME | string | `"rubinobs.atlassian.net"` | Set the hostname for the Jira instance | -| love-manager.manager.producers.env.JIRA_PROJECT_ID | int | `10063` | Set the Jira project ID | -| love-manager.manager.producers.env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | -| love-manager.manager.producers.env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | -| love-manager.manager.producers.env.REDIS_CONFIG_CAPACITY | int | `5000` | The connection capacity for the redis service | -| love-manager.manager.producers.env.REDIS_CONFIG_EXPIRY | int | `5` | The expiration time for the redis service | -| love-manager.manager.producers.env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | -| love-manager.manager.producers.env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | -| love-manager.manager.producers.env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | -| love-manager.manager.producers.env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | -| love-manager.manager.producers.envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager producers admin user password secret key name | -| love-manager.manager.producers.envSecrets.AUTHLIST_USER_PASS | string | `"authlist-user-pass"` | The LOVE manager producers authlist_user password secret key name | -| love-manager.manager.producers.envSecrets.AUTH_LDAP_BIND_PASSWORD | string | `"auth-ldap-bind-password"` | The LOVE manager producers LDAP binding password secret key name | -| love-manager.manager.producers.envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager producers cmd_user user password secret key name | -| love-manager.manager.producers.envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. Must match `database.envSecrets.POSTGRES_PASSWORD` | -| love-manager.manager.producers.envSecrets.JIRA_API_TOKEN | string | `"jira-api-token"` | The LOVE manager jira API token secret key name | -| love-manager.manager.producers.envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager producers process connection password secret key name | -| love-manager.manager.producers.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | -| love-manager.manager.producers.envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager producers secret secret key name | -| love-manager.manager.producers.envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager producers user user password secret key name | -| love-manager.manager.producers.image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | -| love-manager.manager.producers.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager producers image | -| love-manager.manager.producers.image.repository | string | `"lsstts/love-manager"` | The LOVE manager producers image to use | -| love-manager.manager.producers.nodeSelector | object | `{}` | Node selection rules for the LOVE manager producers pods | -| love-manager.manager.producers.ports.container | int | `8000` | The port on the container for normal communications | -| love-manager.manager.producers.ports.node | int | `30000` | The port on the node for normal communcations | -| love-manager.manager.producers.readinessProbe | object | `{}` | Configuration for the LOVE manager producers pods readiness probe | -| love-manager.manager.producers.replicas | int | `1` | Set the default number of LOVE manager producers pod replicas | -| love-manager.manager.producers.resources | object | `{}` | Resource specifications for the LOVE manager producers pods | -| love-manager.manager.producers.tolerations | list | `[]` | Toleration specifications for the LOVE manager producers pods | +| love-manager.manager.producers[0] | object | `{"affinity":{},"autoscaling":{"enabled":true,"maxReplicas":100,"minReplicas":1,"scaleDownPolicy":{},"scaleUpPolicy":{},"targetCPUUtilizationPercentage":80,"targetMemoryUtilizationPercentage":""},"env":{"AUTH_LDAP_1_SERVER_URI":"ldap://ipa1.lsst.local","AUTH_LDAP_2_SERVER_URI":"ldap://ipa2.lsst.local","AUTH_LDAP_3_SERVER_URI":"ldap://ipa3.lsst.local","COMMANDER_HOSTNAME":"love-commander-service","COMMANDER_PORT":5000,"DB_ENGINE":"postgresql","DB_HOST":"love-manager-database-service","DB_NAME":"love","DB_PORT":5432,"DB_USER":"love","HEARTBEAT_QUERY_COMMANDER":false,"JIRA_API_HOSTNAME":"rubinobs.atlassian.net","JIRA_PROJECT_ID":10063,"LOVE_SITE":"local","OLE_API_HOSTNAME":"site.lsst.local","REDIS_CONFIG_CAPACITY":5000,"REDIS_CONFIG_EXPIRY":5,"REDIS_HOST":"love-manager-redis-service","REMOTE_STORAGE":true,"SERVER_URL":"love.lsst.local","URL_SUBPATH":"/love"},"envSecrets":{"ADMIN_USER_PASS":"admin-user-pass","AUTHLIST_USER_PASS":"authlist-user-pass","AUTH_LDAP_BIND_PASSWORD":"auth-ldap-bind-password","CMD_USER_PASS":"cmd-user-pass","DB_PASS":"db-pass","JIRA_API_TOKEN":"jira-api-token","PROCESS_CONNECTION_PASS":"process-connection-pass","REDIS_PASS":"redis-pass","SECRET_KEY":"manager-secret-key","USER_USER_PASS":"user-user-pass"},"image":{"nexus3":"","pullPolicy":"IfNotPresent","repository":"lsstts/love-manager"},"name":"example-producer","nodeSelector":{},"ports":{"container":8000,"node":30000},"readinessProbe":{},"replicas":1,"resources":{},"tolerations":[]}` | Example producer configuration. Each producer should follow the same structure as frontend with the added name field. | +| love-manager.manager.producers[0].affinity | object | `{}` | Affinity rules for the LOVE manager producers pods | +| love-manager.manager.producers[0].autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | +| love-manager.manager.producers[0].autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | +| love-manager.manager.producers[0].autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | +| love-manager.manager.producers[0].autoscaling.scaleDownPolicy | object | `{}` | Policy for scaling down manager pods | +| love-manager.manager.producers[0].autoscaling.scaleUpPolicy | object | `{}` | Policy for scaling up manager pods | +| love-manager.manager.producers[0].autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | +| love-manager.manager.producers[0].autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | +| love-manager.manager.producers[0].env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | +| love-manager.manager.producers[0].env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | +| love-manager.manager.producers[0].env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | +| love-manager.manager.producers[0].env.COMMANDER_HOSTNAME | string | `"love-commander-service"` | Label for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| love-manager.manager.producers[0].env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| love-manager.manager.producers[0].env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager producers | +| love-manager.manager.producers[0].env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | +| love-manager.manager.producers[0].env.DB_NAME | string | `"love"` | The name of the database being used for the LOVE manager producers | +| love-manager.manager.producers[0].env.DB_PORT | int | `5432` | The port for the database service | +| love-manager.manager.producers[0].env.DB_USER | string | `"love"` | The database user needed for access from the LOVE manager producers | +| love-manager.manager.producers[0].env.HEARTBEAT_QUERY_COMMANDER | bool | `false` | Have the LOVE producer managers not query commander | +| love-manager.manager.producers[0].env.JIRA_API_HOSTNAME | string | `"rubinobs.atlassian.net"` | Set the hostname for the Jira instance | +| love-manager.manager.producers[0].env.JIRA_PROJECT_ID | int | `10063` | Set the Jira project ID | +| love-manager.manager.producers[0].env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | +| love-manager.manager.producers[0].env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | +| love-manager.manager.producers[0].env.REDIS_CONFIG_CAPACITY | int | `5000` | The connection capacity for the redis service | +| love-manager.manager.producers[0].env.REDIS_CONFIG_EXPIRY | int | `5` | The expiration time for the redis service | +| love-manager.manager.producers[0].env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | +| love-manager.manager.producers[0].env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | +| love-manager.manager.producers[0].env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | +| love-manager.manager.producers[0].env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | +| love-manager.manager.producers[0].envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager producers admin user password secret key name | +| love-manager.manager.producers[0].envSecrets.AUTHLIST_USER_PASS | string | `"authlist-user-pass"` | The LOVE manager producers authlist_user password secret key name | +| love-manager.manager.producers[0].envSecrets.AUTH_LDAP_BIND_PASSWORD | string | `"auth-ldap-bind-password"` | The LOVE manager producers LDAP binding password secret key name | +| love-manager.manager.producers[0].envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager producers cmd_user user password secret key name | +| love-manager.manager.producers[0].envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. Must match `database.envSecrets.POSTGRES_PASSWORD` | +| love-manager.manager.producers[0].envSecrets.JIRA_API_TOKEN | string | `"jira-api-token"` | The LOVE manager jira API token secret key name | +| love-manager.manager.producers[0].envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager producers process connection password secret key name | +| love-manager.manager.producers[0].envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | +| love-manager.manager.producers[0].envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager producers secret secret key name | +| love-manager.manager.producers[0].envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager producers user user password secret key name | +| love-manager.manager.producers[0].image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | +| love-manager.manager.producers[0].image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager producers image | +| love-manager.manager.producers[0].image.repository | string | `"lsstts/love-manager"` | The LOVE manager producers image to use | +| love-manager.manager.producers[0].nodeSelector | object | `{}` | Node selection rules for the LOVE manager producers pods | +| love-manager.manager.producers[0].ports.container | int | `8000` | The port on the container for normal communications | +| love-manager.manager.producers[0].ports.node | int | `30000` | The port on the node for normal communcations | +| love-manager.manager.producers[0].readinessProbe | object | `{}` | Configuration for the LOVE manager producers pods readiness probe | +| love-manager.manager.producers[0].replicas | int | `1` | Set the default number of LOVE manager producers pod replicas | +| love-manager.manager.producers[0].resources | object | `{}` | Resource specifications for the LOVE manager producers pods | +| love-manager.manager.producers[0].tolerations | list | `[]` | Toleration specifications for the LOVE manager producers pods | +| love-manager.manager.producers_ports | object | `{"container":8000,"node":30000}` | Configuration for the producers ports. this is a single configuration for all the producers. | +| love-manager.manager.producers_ports.container | int | `8000` | The port on the container for normal communications | +| love-manager.manager.producers_ports.node | int | `30000` | The port on the node for normal communcations | | love-manager.namespace | string | `"love"` | The overall namespace for the application | | love-manager.redis.affinity | object | `{}` | Affinity rules for the LOVE redis pods | | love-manager.redis.config | string | `"timeout 60\n"` | Configuration specification for the redis service | @@ -170,7 +175,7 @@ Deployment for the LSST Operators Visualization Environment | love-nginx.tolerations | list | `[]` | Toleration specifications for the NGINX pod | | love-producer.affinity | object | `{}` | Affinity rules applied to all LOVE producer pods | | love-producer.annotations | object | `{}` | This allows for the specification of pod annotations. | -| love-producer.env | object | `{"WEBSOCKET_HOST":"love-nginx/manager/ws/subscription"}` | This section holds a set of key, value pairs for environmental variables | +| love-producer.env | object | `{}` | This section holds a set of key, value pairs for environmental variables | | love-producer.envSecrets | object | `{"PROCESS_CONNECTION_PASS":"process-connection-pass"}` | This section holds a set of key, value pairs for secrets | | love-producer.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE producer image | | love-producer.image.repository | string | `"lsstts/love-producer"` | The LOVE producer image to use | diff --git a/applications/love/charts/love-manager/README.md b/applications/love/charts/love-manager/README.md index 8db2596b51..47a93da5c5 100644 --- a/applications/love/charts/love-manager/README.md +++ b/applications/love/charts/love-manager/README.md @@ -6,6 +6,7 @@ Helm chart for the LOVE manager service. | Key | Type | Default | Description | |-----|------|---------|-------------| +| manager | object | `{"frontend":{"affinity":{},"autoscaling":{"enabled":true,"maxReplicas":100,"minReplicas":1,"scaleDownPolicy":{},"scaleUpPolicy":{},"targetCPUUtilizationPercentage":80,"targetMemoryUtilizationPercentage":""},"env":{"AUTH_LDAP_1_SERVER_URI":"ldap://ipa1.lsst.local","AUTH_LDAP_2_SERVER_URI":"ldap://ipa2.lsst.local","AUTH_LDAP_3_SERVER_URI":"ldap://ipa3.lsst.local","COMMANDER_HOSTNAME":"love-commander-service","COMMANDER_PORT":5000,"DB_ENGINE":"postgresql","DB_HOST":"love-manager-database-service","DB_NAME":"love","DB_PORT":5432,"DB_USER":"love","JIRA_API_HOSTNAME":"rubinobs.atlassian.net","JIRA_PROJECT_ID":10063,"LOVE_PRODUCER_WEBSOCKET_HOST":"love-service/manager/ws/subscription","LOVE_SITE":"local","OLE_API_HOSTNAME":"site.lsst.local","REDIS_CONFIG_CAPACITY":5000,"REDIS_CONFIG_EXPIRY":5,"REDIS_HOST":"love-manager-redis-service","REMOTE_STORAGE":true,"SERVER_URL":"love.lsst.local","URL_SUBPATH":"/love"},"envSecrets":{"ADMIN_USER_PASS":"admin-user-pass","AUTHLIST_USER_PASS":"authlist-user-pass","AUTH_LDAP_BIND_PASSWORD":"auth-ldap-bind-password","CMD_USER_PASS":"cmd-user-pass","DB_PASS":"db-pass","JIRA_API_TOKEN":"jira-api-token","PROCESS_CONNECTION_PASS":"process-connection-pass","REDIS_PASS":"redis-pass","SECRET_KEY":"manager-secret-key","USER_USER_PASS":"user-user-pass"},"image":{"nexus3":"","pullPolicy":"IfNotPresent","repository":"lsstts/love-manager"},"nodeSelector":{},"ports":{"container":8000,"node":30000},"readinessProbe":{},"replicas":1,"resources":{},"tolerations":[]},"producers":[{"affinity":{},"autoscaling":{"enabled":true,"maxReplicas":100,"minReplicas":1,"scaleDownPolicy":{},"scaleUpPolicy":{},"targetCPUUtilizationPercentage":80,"targetMemoryUtilizationPercentage":""},"env":{"AUTH_LDAP_1_SERVER_URI":"ldap://ipa1.lsst.local","AUTH_LDAP_2_SERVER_URI":"ldap://ipa2.lsst.local","AUTH_LDAP_3_SERVER_URI":"ldap://ipa3.lsst.local","COMMANDER_HOSTNAME":"love-commander-service","COMMANDER_PORT":5000,"DB_ENGINE":"postgresql","DB_HOST":"love-manager-database-service","DB_NAME":"love","DB_PORT":5432,"DB_USER":"love","HEARTBEAT_QUERY_COMMANDER":false,"JIRA_API_HOSTNAME":"rubinobs.atlassian.net","JIRA_PROJECT_ID":10063,"LOVE_SITE":"local","OLE_API_HOSTNAME":"site.lsst.local","REDIS_CONFIG_CAPACITY":5000,"REDIS_CONFIG_EXPIRY":5,"REDIS_HOST":"love-manager-redis-service","REMOTE_STORAGE":true,"SERVER_URL":"love.lsst.local","URL_SUBPATH":"/love"},"envSecrets":{"ADMIN_USER_PASS":"admin-user-pass","AUTHLIST_USER_PASS":"authlist-user-pass","AUTH_LDAP_BIND_PASSWORD":"auth-ldap-bind-password","CMD_USER_PASS":"cmd-user-pass","DB_PASS":"db-pass","JIRA_API_TOKEN":"jira-api-token","PROCESS_CONNECTION_PASS":"process-connection-pass","REDIS_PASS":"redis-pass","SECRET_KEY":"manager-secret-key","USER_USER_PASS":"user-user-pass"},"image":{"nexus3":"","pullPolicy":"IfNotPresent","repository":"lsstts/love-manager"},"name":"example-producer","nodeSelector":{},"ports":{"container":8000,"node":30000},"readinessProbe":{},"replicas":1,"resources":{},"tolerations":[]}],"producers_ports":{"container":8000,"node":30000}}` | Configuration for the different manager instances. This is divided into two sessions; frontend and producers. _frontend_ Configuration for the manager frontend. The frontend session defines the configuration for the so-called frontend managers. These serves the frontend artifacts as well as handles the data piping from the system to the frontend. Every time a user opens a view in LOVE the page will connect to the frontend manager and will receive the telemetry data from the system. Once a connection is established between a frontend and the manager it is kept alive. As more connections come in, the autoscaler will scale up the number of frontend managers and new connections should be redirected to them. The redirect is handled by the manager-frontend-service ClusterIP. _producers_ Configurations for the manger producers. This is basically a list of managers (with the same structure as the frontend, but in a list). These defines services that the LOVE-producers connect to, to feed data from the control system. | | manager.frontend.affinity | object | `{}` | Affinity rules for the LOVE manager frontend pods | | manager.frontend.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | | manager.frontend.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | @@ -55,55 +56,59 @@ Helm chart for the LOVE manager service. | manager.frontend.replicas | int | `1` | Set the default number of LOVE manager frontend pod replicas | | manager.frontend.resources | object | `{}` | Resource specifications for the LOVE manager frontend pods | | manager.frontend.tolerations | list | `[]` | Toleration specifications for the LOVE manager frontend pods | -| manager.producers.affinity | object | `{}` | Affinity rules for the LOVE manager producers pods | -| manager.producers.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | -| manager.producers.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | -| manager.producers.autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | -| manager.producers.autoscaling.scaleDownPolicy | object | `{}` | Policy for scaling down manager pods | -| manager.producers.autoscaling.scaleUpPolicy | object | `{}` | Policy for scaling up manager pods | -| manager.producers.autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | -| manager.producers.autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | -| manager.producers.env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | -| manager.producers.env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | -| manager.producers.env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | -| manager.producers.env.COMMANDER_HOSTNAME | string | `"love-commander-service"` | Label for the LOVE commander service. Must match the one spcified in the LOVE commander chart | -| manager.producers.env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | -| manager.producers.env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager producers | -| manager.producers.env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | -| manager.producers.env.DB_NAME | string | `"love"` | The name of the database being used for the LOVE manager producers | -| manager.producers.env.DB_PORT | int | `5432` | The port for the database service | -| manager.producers.env.DB_USER | string | `"love"` | The database user needed for access from the LOVE manager producers | -| manager.producers.env.HEARTBEAT_QUERY_COMMANDER | bool | `false` | Have the LOVE producer managers not query commander | -| manager.producers.env.JIRA_API_HOSTNAME | string | `"rubinobs.atlassian.net"` | Set the hostname for the Jira instance | -| manager.producers.env.JIRA_PROJECT_ID | int | `10063` | Set the Jira project ID | -| manager.producers.env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | -| manager.producers.env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | -| manager.producers.env.REDIS_CONFIG_CAPACITY | int | `5000` | The connection capacity for the redis service | -| manager.producers.env.REDIS_CONFIG_EXPIRY | int | `5` | The expiration time for the redis service | -| manager.producers.env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | -| manager.producers.env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | -| manager.producers.env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | -| manager.producers.env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | -| manager.producers.envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager producers admin user password secret key name | -| manager.producers.envSecrets.AUTHLIST_USER_PASS | string | `"authlist-user-pass"` | The LOVE manager producers authlist_user password secret key name | -| manager.producers.envSecrets.AUTH_LDAP_BIND_PASSWORD | string | `"auth-ldap-bind-password"` | The LOVE manager producers LDAP binding password secret key name | -| manager.producers.envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager producers cmd_user user password secret key name | -| manager.producers.envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. Must match `database.envSecrets.POSTGRES_PASSWORD` | -| manager.producers.envSecrets.JIRA_API_TOKEN | string | `"jira-api-token"` | The LOVE manager jira API token secret key name | -| manager.producers.envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager producers process connection password secret key name | -| manager.producers.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | -| manager.producers.envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager producers secret secret key name | -| manager.producers.envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager producers user user password secret key name | -| manager.producers.image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | -| manager.producers.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager producers image | -| manager.producers.image.repository | string | `"lsstts/love-manager"` | The LOVE manager producers image to use | -| manager.producers.nodeSelector | object | `{}` | Node selection rules for the LOVE manager producers pods | -| manager.producers.ports.container | int | `8000` | The port on the container for normal communications | -| manager.producers.ports.node | int | `30000` | The port on the node for normal communcations | -| manager.producers.readinessProbe | object | `{}` | Configuration for the LOVE manager producers pods readiness probe | -| manager.producers.replicas | int | `1` | Set the default number of LOVE manager producers pod replicas | -| manager.producers.resources | object | `{}` | Resource specifications for the LOVE manager producers pods | -| manager.producers.tolerations | list | `[]` | Toleration specifications for the LOVE manager producers pods | +| manager.producers[0] | object | `{"affinity":{},"autoscaling":{"enabled":true,"maxReplicas":100,"minReplicas":1,"scaleDownPolicy":{},"scaleUpPolicy":{},"targetCPUUtilizationPercentage":80,"targetMemoryUtilizationPercentage":""},"env":{"AUTH_LDAP_1_SERVER_URI":"ldap://ipa1.lsst.local","AUTH_LDAP_2_SERVER_URI":"ldap://ipa2.lsst.local","AUTH_LDAP_3_SERVER_URI":"ldap://ipa3.lsst.local","COMMANDER_HOSTNAME":"love-commander-service","COMMANDER_PORT":5000,"DB_ENGINE":"postgresql","DB_HOST":"love-manager-database-service","DB_NAME":"love","DB_PORT":5432,"DB_USER":"love","HEARTBEAT_QUERY_COMMANDER":false,"JIRA_API_HOSTNAME":"rubinobs.atlassian.net","JIRA_PROJECT_ID":10063,"LOVE_SITE":"local","OLE_API_HOSTNAME":"site.lsst.local","REDIS_CONFIG_CAPACITY":5000,"REDIS_CONFIG_EXPIRY":5,"REDIS_HOST":"love-manager-redis-service","REMOTE_STORAGE":true,"SERVER_URL":"love.lsst.local","URL_SUBPATH":"/love"},"envSecrets":{"ADMIN_USER_PASS":"admin-user-pass","AUTHLIST_USER_PASS":"authlist-user-pass","AUTH_LDAP_BIND_PASSWORD":"auth-ldap-bind-password","CMD_USER_PASS":"cmd-user-pass","DB_PASS":"db-pass","JIRA_API_TOKEN":"jira-api-token","PROCESS_CONNECTION_PASS":"process-connection-pass","REDIS_PASS":"redis-pass","SECRET_KEY":"manager-secret-key","USER_USER_PASS":"user-user-pass"},"image":{"nexus3":"","pullPolicy":"IfNotPresent","repository":"lsstts/love-manager"},"name":"example-producer","nodeSelector":{},"ports":{"container":8000,"node":30000},"readinessProbe":{},"replicas":1,"resources":{},"tolerations":[]}` | Example producer configuration. Each producer should follow the same structure as frontend with the added name field. | +| manager.producers[0].affinity | object | `{}` | Affinity rules for the LOVE manager producers pods | +| manager.producers[0].autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | +| manager.producers[0].autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | +| manager.producers[0].autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | +| manager.producers[0].autoscaling.scaleDownPolicy | object | `{}` | Policy for scaling down manager pods | +| manager.producers[0].autoscaling.scaleUpPolicy | object | `{}` | Policy for scaling up manager pods | +| manager.producers[0].autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | +| manager.producers[0].autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | +| manager.producers[0].env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | +| manager.producers[0].env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | +| manager.producers[0].env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | +| manager.producers[0].env.COMMANDER_HOSTNAME | string | `"love-commander-service"` | Label for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| manager.producers[0].env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| manager.producers[0].env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager producers | +| manager.producers[0].env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | +| manager.producers[0].env.DB_NAME | string | `"love"` | The name of the database being used for the LOVE manager producers | +| manager.producers[0].env.DB_PORT | int | `5432` | The port for the database service | +| manager.producers[0].env.DB_USER | string | `"love"` | The database user needed for access from the LOVE manager producers | +| manager.producers[0].env.HEARTBEAT_QUERY_COMMANDER | bool | `false` | Have the LOVE producer managers not query commander | +| manager.producers[0].env.JIRA_API_HOSTNAME | string | `"rubinobs.atlassian.net"` | Set the hostname for the Jira instance | +| manager.producers[0].env.JIRA_PROJECT_ID | int | `10063` | Set the Jira project ID | +| manager.producers[0].env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | +| manager.producers[0].env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | +| manager.producers[0].env.REDIS_CONFIG_CAPACITY | int | `5000` | The connection capacity for the redis service | +| manager.producers[0].env.REDIS_CONFIG_EXPIRY | int | `5` | The expiration time for the redis service | +| manager.producers[0].env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | +| manager.producers[0].env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | +| manager.producers[0].env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | +| manager.producers[0].env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | +| manager.producers[0].envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager producers admin user password secret key name | +| manager.producers[0].envSecrets.AUTHLIST_USER_PASS | string | `"authlist-user-pass"` | The LOVE manager producers authlist_user password secret key name | +| manager.producers[0].envSecrets.AUTH_LDAP_BIND_PASSWORD | string | `"auth-ldap-bind-password"` | The LOVE manager producers LDAP binding password secret key name | +| manager.producers[0].envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager producers cmd_user user password secret key name | +| manager.producers[0].envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. Must match `database.envSecrets.POSTGRES_PASSWORD` | +| manager.producers[0].envSecrets.JIRA_API_TOKEN | string | `"jira-api-token"` | The LOVE manager jira API token secret key name | +| manager.producers[0].envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager producers process connection password secret key name | +| manager.producers[0].envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | +| manager.producers[0].envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager producers secret secret key name | +| manager.producers[0].envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager producers user user password secret key name | +| manager.producers[0].image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | +| manager.producers[0].image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager producers image | +| manager.producers[0].image.repository | string | `"lsstts/love-manager"` | The LOVE manager producers image to use | +| manager.producers[0].nodeSelector | object | `{}` | Node selection rules for the LOVE manager producers pods | +| manager.producers[0].ports.container | int | `8000` | The port on the container for normal communications | +| manager.producers[0].ports.node | int | `30000` | The port on the node for normal communcations | +| manager.producers[0].readinessProbe | object | `{}` | Configuration for the LOVE manager producers pods readiness probe | +| manager.producers[0].replicas | int | `1` | Set the default number of LOVE manager producers pod replicas | +| manager.producers[0].resources | object | `{}` | Resource specifications for the LOVE manager producers pods | +| manager.producers[0].tolerations | list | `[]` | Toleration specifications for the LOVE manager producers pods | +| manager.producers_ports | object | `{"container":8000,"node":30000}` | Configuration for the producers ports. this is a single configuration for all the producers. | +| manager.producers_ports.container | int | `8000` | The port on the container for normal communications | +| manager.producers_ports.node | int | `30000` | The port on the node for normal communcations | | namespace | string | `"love"` | The overall namespace for the application | | redis.affinity | object | `{}` | Affinity rules for the LOVE redis pods | | redis.config | string | `"timeout 60\n"` | Configuration specification for the redis service | diff --git a/applications/love/charts/love-manager/templates/_helpers.tpl b/applications/love/charts/love-manager/templates/_helpers.tpl index 13e1c5bcec..f95f771b7b 100644 --- a/applications/love/charts/love-manager/templates/_helpers.tpl +++ b/applications/love/charts/love-manager/templates/_helpers.tpl @@ -33,8 +33,8 @@ Manager frontend fullname {{/* Manager producers fullname */}} -{{- define "love-manager-producers.fullname" -}} -{{ include "love-manager.fullname" . }}-producers +{{- define "love-manager-producer.fullname" -}} +{{ include "love-manager.fullname" . }}-producer {{- end }} {{/* @@ -63,9 +63,9 @@ helm.sh/chart: {{ include "love-manager.chart" . }} {{/* Manager Producers Common labels */}} -{{- define "love-manager-producers.labels" -}} +{{- define "love-manager-producer.labels" -}} helm.sh/chart: {{ include "love-manager.chart" . }} -{{ include "love-manager-producers.selectorLabels" . }} +{{ include "love-manager-producer.selectorLabels" . }} {{- end }} {{/* @@ -87,9 +87,9 @@ app.kubernetes.io/instance: {{ include "love-manager.name" . }}-frontend {{/* Manager Producers Selector labels */}} -{{- define "love-manager-producers.selectorLabels" -}} +{{- define "love-manager-producer.selectorLabels" -}} app.kubernetes.io/name: {{ include "love-manager.name" . }} -app.kubernetes.io/instance: {{ include "love-manager.name" . }}-producers +app.kubernetes.io/instance: {{ include "love-manager.name" . }}-producer {{- end }} {{/* diff --git a/applications/love/charts/love-manager/templates/manager-producers-deployment.yaml b/applications/love/charts/love-manager/templates/manager-producers-deployment.yaml index 308f2eb69b..855fe7d4d9 100644 --- a/applications/love/charts/love-manager/templates/manager-producers-deployment.yaml +++ b/applications/love/charts/love-manager/templates/manager-producers-deployment.yaml @@ -1,55 +1,62 @@ +{{ range $manager_producer:= .Values.manager.producers }} +{{ $_ := set $.Values "manager_producer" $manager_producer }} +--- apiVersion: apps/v1 kind: Deployment metadata: - name: {{ include "love-manager-producers.fullname" . }} + name: {{ include "love-manager-producer.fullname" $ }}-{{ $manager_producer.name }} namespace: {{ $.Values.global.controlSystem.appNamespace }} labels: - {{- include "love-manager-producers.labels" . | nindent 4 }} + app.kubernetes.io/instance: {{ include "love-manager-producer.fullname" $ }}-{{ $manager_producer.name }} + app.kubernetes.io/name: {{ include "love-manager-producer.fullname" $ }} spec: selector: matchLabels: - {{- include "love-manager-producers.selectorLabels" . | nindent 6 }} - {{- if not .Values.manager.producers.autoscaling.enabled }} - replicas: {{ .Values.manager.producers.replicas }} + app.kubernetes.io/instance: {{ include "love-manager-producer.fullname" $ }}-{{ $manager_producer.name }} + app.kubernetes.io/name: {{ include "love-manager-producer.fullname" $ }} + {{- if not $manager_producer.autoscaling.enabled }} + replicas: {{ $manager_producer.replicas }} {{- end }} template: metadata: labels: - {{- include "love-manager-producers.selectorLabels" . | nindent 8 }} + app.kubernetes.io/instance: {{ include "love-manager-producer.fullname" $ }}-{{ $manager_producer.name }} + app.kubernetes.io/name: {{ include "love-manager-producer.fullname" $ }} spec: containers: - - name: {{ include "love-manager-producers.fullname" . }} - {{- $imageTag := .Values.manager.producers.image.tag | default $.Values.global.controlSystem.imageTag }} - image: "{{ .Values.manager.producers.image.repository }}:{{ $imageTag }}" - imagePullPolicy: {{ .Values.manager.producers.image.pullPolicy }} + - name: {{ include "love-manager-producer.fullname" $ }}-{{ $manager_producer.name }} + {{- $imageTag := $manager_producer.image.tag | default $.Values.global.controlSystem.imageTag }} + image: "{{ $manager_producer.image.repository }}:{{ $imageTag }}" + imagePullPolicy: {{ $manager_producer.image.pullPolicy }} ports: - - containerPort: {{ .Values.manager.producers.ports.container }} + - containerPort: {{ $.Values.manager.producers_ports.container }} env: - {{- $data := dict "env" .Values.manager.producers.env "secret" false }} + {{- $data := dict "env" $manager_producer.env "secret" false }} {{- include "helpers.envFromList" $data | indent 10 }} - {{- if .Values.manager.producers.envSecrets }} - {{- $data := dict "secret" true "env" .Values.manager.producers.envSecrets }} + {{- if $manager_producer.envSecrets }} + {{- $data := dict "secret" true "env" $manager_producer.envSecrets }} {{- include "helpers.envFromList" $data | indent 10 }} {{- end }} - {{- with $.Values.manager.producers.resources }} + {{- with $manager_producer.resources }} resources: - {{- toYaml $.Values.manager.producers.resources | nindent 10 }} + {{- toYaml $manager_producer.resources | nindent 10 }} {{- end }} - {{- with $.Values.manager.producers.readinessProbe }} + {{- with $manager_producer.readinessProbe }} readinessProbe: - {{- toYaml $.Values.manager.producers.readinessProbe | nindent 10 }} + {{- toYaml $manager_producer.readinessProbe | nindent 10 }} {{- end }} imagePullSecrets: - name: pull-secret - {{- with $.Values.manager.producers.nodeSelector }} + {{- with $manager_producer.nodeSelector }} nodeSelector: {{- toYaml $ | nindent 8 }} {{- end }} - {{- with $.Values.manager.producers.affinity }} + {{- with $manager_producer.affinity }} affinity: {{- toYaml $ | nindent 8 }} {{- end }} - {{- with $.Values.manager.producers.tolerations }} + {{- with $manager_producer.tolerations }} tolerations: {{- toYaml $ | nindent 8 }} {{- end }} +{{- end }} \ No newline at end of file diff --git a/applications/love/charts/love-manager/templates/manager-producers-hpa.yaml b/applications/love/charts/love-manager/templates/manager-producers-hpa.yaml index a44422835b..238c66f21c 100644 --- a/applications/love/charts/love-manager/templates/manager-producers-hpa.yaml +++ b/applications/love/charts/love-manager/templates/manager-producers-hpa.yaml @@ -1,47 +1,51 @@ -{{- if .Values.manager.producers.autoscaling.enabled }} +{{ range $manager_producer:= .Values.manager.producers }} +{{ $_ := set $.Values "manager_producer" $manager_producer }} +--- +{{- if $manager_producer.autoscaling.enabled }} apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: - name: {{ include "love-manager-producers.fullname" . }} + name: {{ include "love-manager-producer.fullname" $ }}-{{ $manager_producer.name }} labels: - {{- include "love-manager-producers.labels" . | nindent 4 }} + {{- include "love-manager-producer.labels" $ | nindent 4 }} spec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment - name: {{ include "love-manager-producers.fullname" . }} - minReplicas: {{ .Values.manager.producers.autoscaling.minReplicas }} - maxReplicas: {{ .Values.manager.producers.autoscaling.maxReplicas }} + name: {{ include "love-manager-producer.fullname" $ }}-{{ $manager_producer.name }} + minReplicas: {{ $manager_producer.autoscaling.minReplicas }} + maxReplicas: {{ $manager_producer.autoscaling.maxReplicas }} metrics: - {{- if .Values.manager.producers.autoscaling.targetCPUUtilizationPercentage }} + {{- if $manager_producer.autoscaling.targetCPUUtilizationPercentage }} - type: Resource resource: name: cpu target: type: Utilization - averageUtilization: {{ .Values.manager.producers.autoscaling.targetCPUUtilizationPercentage }} + averageUtilization: {{ $manager_producer.autoscaling.targetCPUUtilizationPercentage }} {{- end }} - {{- if .Values.manager.producers.autoscaling.targetMemoryUtilizationPercentage }} + {{- if $manager_producer.autoscaling.targetMemoryUtilizationPercentage }} - type: Resource resource: name: memory target: type: Utilization - averageUtilization: {{ .Values.manager.producers.autoscaling.targetMemoryUtilizationPercentage }} + averageUtilization: {{ $manager_producer.autoscaling.targetMemoryUtilizationPercentage }} {{- end }} - {{- if or .Values.manager.producers.autoscaling.scaleUpPolicy .Values.manager.producers.autoscaling.scaleDownPolicy }} + {{- if or $manager_producer.autoscaling.scaleUpPolicy $manager_producer.autoscaling.scaleDownPolicy }} behavior: - {{- if .Values.manager.producers.autoscaling.scaleUpPolicy }} + {{- if $manager_producer.autoscaling.scaleUpPolicy }} scaleUp: - {{- with .Values.manager.producers.autoscaling.scaleUpPolicy }} + {{- with $manager_producer.autoscaling.scaleUpPolicy }} {{- toYaml . | nindent 6 }} {{- end }} {{- end }} - {{- if .Values.manager.producers.autoscaling.scaleDownPolicy }} + {{- if $manager_producer.autoscaling.scaleDownPolicy }} scaleDown: - {{- with .Values.manager.producers.autoscaling.scaleDownPolicy }} + {{- with $manager_producer.autoscaling.scaleDownPolicy }} {{- toYaml . | nindent 6 }} {{- end }} {{- end }} {{- end }} {{- end }} +{{- end }} \ No newline at end of file diff --git a/applications/love/charts/love-manager/templates/manager-producers-service.yaml b/applications/love/charts/love-manager/templates/manager-producers-service.yaml index bf90a53f9b..1195507e30 100644 --- a/applications/love/charts/love-manager/templates/manager-producers-service.yaml +++ b/applications/love/charts/love-manager/templates/manager-producers-service.yaml @@ -1,10 +1,14 @@ +{{ range $manager_producer:= .Values.manager.producers }} +{{ $_ := set $.Values "manager_producer" $manager_producer }} +--- apiVersion: v1 kind: Service metadata: - name: {{ include "love-manager-producers.fullname" . }}-service - namespace: {{ .Values.namespace }} + name: {{ include "love-manager-producer.fullname" $ }}-{{ $manager_producer.name }}-service + namespace: {{ $.Values.namespace }} spec: selector: - app.kubernetes.io/instance: {{ include "love-manager-producers.fullname" . }} + app.kubernetes.io/instance: {{ include "love-manager-producer.fullname" $ }}-{{ $manager_producer.name }} ports: - - port: {{ .Values.manager.producers.ports.container }} + - port: {{ $.Values.manager.producers_ports.container }} +{{- end }} \ No newline at end of file diff --git a/applications/love/charts/love-manager/values.yaml b/applications/love/charts/love-manager/values.yaml index 391b5c51e7..d5534ee77c 100644 --- a/applications/love/charts/love-manager/values.yaml +++ b/applications/love/charts/love-manager/values.yaml @@ -1,5 +1,21 @@ # -- The overall namespace for the application namespace: love +# -- Configuration for the different manager instances. +# This is divided into two sessions; frontend and producers. +# _frontend_ Configuration for the manager frontend. +# The frontend session defines the configuration for the +# so-called frontend managers. These serves the frontend artifacts +# as well as handles the data piping from the system to the frontend. +# Every time a user opens a view in LOVE the page will connect to the +# frontend manager and will receive the telemetry data from the system. +# Once a connection is established between a frontend and the manager it +# is kept alive. As more connections come in, the autoscaler will scale +# up the number of frontend managers and new connections should be redirected +# to them. The redirect is handled by the manager-frontend-service ClusterIP. +# _producers_ Configurations for the manger producers. +# This is basically a list of managers (with the same structure as the +# frontend, but in a list). These defines services that the LOVE-producers +# connect to, to feed data from the control system. manager: frontend: image: @@ -110,113 +126,123 @@ manager: # -- Configuration for the LOVE manager frontend pods readiness probe readinessProbe: {} producers: - image: - # -- The LOVE manager producers image to use - repository: lsstts/love-manager - # -- The pull policy on the LOVE manager producers image - pullPolicy: IfNotPresent - # -- The tag name for the Nexus3 Docker repository secrets if private images need to be pulled - nexus3: "" - ports: - # -- The port on the container for normal communications - container: 8000 - # -- The port on the node for normal communcations - node: 30000 - env: - # -- The site tag where LOVE is being run - LOVE_SITE: local - # -- The external URL from the NGINX server for LOVE - SERVER_URL: love.lsst.local - # -- The Kubernetes sub-path for LOVE - URL_SUBPATH: /love - # -- Set the manager to use LFA storage - REMOTE_STORAGE: true - # -- Set the hostname for the Jira instance - JIRA_API_HOSTNAME: rubinobs.atlassian.net - # -- Set the Jira project ID - JIRA_PROJECT_ID: 10063 - # -- Set the URL for the OLE instance - OLE_API_HOSTNAME: site.lsst.local - # -- Set the URI for the 1st LDAP server - AUTH_LDAP_1_SERVER_URI: ldap://ipa1.lsst.local - # -- Set the URI for the 2nd LDAP server - AUTH_LDAP_2_SERVER_URI: ldap://ipa2.lsst.local - # -- Set the URI for the 3rd LDAP server - AUTH_LDAP_3_SERVER_URI: ldap://ipa3.lsst.local - # -- Have the LOVE producer managers not query commander - HEARTBEAT_QUERY_COMMANDER: false - # -- Label for the LOVE commander service. - # Must match the one spcified in the LOVE commander chart - COMMANDER_HOSTNAME: love-commander-service - # -- Port number for the LOVE commander service. - # Must match the one spcified in the LOVE commander chart - COMMANDER_PORT: 5000 - # -- The type of database engine being used for the LOVE manager producers - DB_ENGINE: postgresql - # -- The name of the database being used for the LOVE manager producers - DB_NAME: love - # -- The database user needed for access from the LOVE manager producers - DB_USER: love - # -- The name of the database service - DB_HOST: love-manager-database-service - # -- The port for the database service - DB_PORT: 5432 - # -- The name of the redis service - REDIS_HOST: love-manager-redis-service - # -- The expiration time for the redis service - REDIS_CONFIG_EXPIRY: 5 - # -- The connection capacity for the redis service - REDIS_CONFIG_CAPACITY: 5000 - envSecrets: - # -- The LOVE manager producers secret secret key name - SECRET_KEY: manager-secret-key - # -- The LOVE manager producers process connection password secret key name - PROCESS_CONNECTION_PASS: process-connection-pass - # -- The LOVE manager producers admin user password secret key name - ADMIN_USER_PASS: admin-user-pass - # -- The LOVE manager producers user user password secret key name - USER_USER_PASS: user-user-pass - # -- The LOVE manager producers cmd_user user password secret key name - CMD_USER_PASS: cmd-user-pass - # -- The LOVE manager producers authlist_user password secret key name - AUTHLIST_USER_PASS: authlist-user-pass - # -- The LOVE manager producers LDAP binding password secret key name - AUTH_LDAP_BIND_PASSWORD: auth-ldap-bind-password - # -- The database password secret key name. - # Must match `database.envSecrets.POSTGRES_PASSWORD` - DB_PASS: db-pass - # -- The redis password secret key name. - # Must match `redis.envSecrets.REDIS_PASS` - REDIS_PASS: redis-pass - # -- The LOVE manager jira API token secret key name - JIRA_API_TOKEN: jira-api-token - # -- Set the default number of LOVE manager producers pod replicas - replicas: 1 - autoscaling: - # -- Whether automatic horizontal scaling is active - enabled: true - # -- The allowed minimum number of replicas - minReplicas: 1 - # -- The allowed maximum number of replicas - maxReplicas: 100 - # -- The percentage of CPU utilization that will trigger the scaling - targetCPUUtilizationPercentage: 80 - # -- (int) The percentage of memory utilization that will trigger the scaling - targetMemoryUtilizationPercentage: "" - # -- Policy for scaling up manager pods - scaleUpPolicy: {} - # -- Policy for scaling down manager pods - scaleDownPolicy: {} - # -- Resource specifications for the LOVE manager producers pods - resources: {} - # -- Node selection rules for the LOVE manager producers pods - nodeSelector: {} - # -- Toleration specifications for the LOVE manager producers pods - tolerations: [] - # -- Affinity rules for the LOVE manager producers pods - affinity: {} - # -- Configuration for the LOVE manager producers pods readiness probe - readinessProbe: {} + # -- Example producer configuration. Each producer should follow the + # same structure as frontend with the added name field. + - name: example-producer + image: + # -- The LOVE manager producers image to use + repository: lsstts/love-manager + # -- The pull policy on the LOVE manager producers image + pullPolicy: IfNotPresent + # -- The tag name for the Nexus3 Docker repository secrets if private images need to be pulled + nexus3: "" + ports: + # -- The port on the container for normal communications + container: 8000 + # -- The port on the node for normal communcations + node: 30000 + env: + # -- The site tag where LOVE is being run + LOVE_SITE: local + # -- The external URL from the NGINX server for LOVE + SERVER_URL: love.lsst.local + # -- The Kubernetes sub-path for LOVE + URL_SUBPATH: /love + # -- Set the manager to use LFA storage + REMOTE_STORAGE: true + # -- Set the hostname for the Jira instance + JIRA_API_HOSTNAME: rubinobs.atlassian.net + # -- Set the Jira project ID + JIRA_PROJECT_ID: 10063 + # -- Set the URL for the OLE instance + OLE_API_HOSTNAME: site.lsst.local + # -- Set the URI for the 1st LDAP server + AUTH_LDAP_1_SERVER_URI: ldap://ipa1.lsst.local + # -- Set the URI for the 2nd LDAP server + AUTH_LDAP_2_SERVER_URI: ldap://ipa2.lsst.local + # -- Set the URI for the 3rd LDAP server + AUTH_LDAP_3_SERVER_URI: ldap://ipa3.lsst.local + # -- Have the LOVE producer managers not query commander + HEARTBEAT_QUERY_COMMANDER: false + # -- Label for the LOVE commander service. + # Must match the one spcified in the LOVE commander chart + COMMANDER_HOSTNAME: love-commander-service + # -- Port number for the LOVE commander service. + # Must match the one spcified in the LOVE commander chart + COMMANDER_PORT: 5000 + # -- The type of database engine being used for the LOVE manager producers + DB_ENGINE: postgresql + # -- The name of the database being used for the LOVE manager producers + DB_NAME: love + # -- The database user needed for access from the LOVE manager producers + DB_USER: love + # -- The name of the database service + DB_HOST: love-manager-database-service + # -- The port for the database service + DB_PORT: 5432 + # -- The name of the redis service + REDIS_HOST: love-manager-redis-service + # -- The expiration time for the redis service + REDIS_CONFIG_EXPIRY: 5 + # -- The connection capacity for the redis service + REDIS_CONFIG_CAPACITY: 5000 + envSecrets: + # -- The LOVE manager producers secret secret key name + SECRET_KEY: manager-secret-key + # -- The LOVE manager producers process connection password secret key name + PROCESS_CONNECTION_PASS: process-connection-pass + # -- The LOVE manager producers admin user password secret key name + ADMIN_USER_PASS: admin-user-pass + # -- The LOVE manager producers user user password secret key name + USER_USER_PASS: user-user-pass + # -- The LOVE manager producers cmd_user user password secret key name + CMD_USER_PASS: cmd-user-pass + # -- The LOVE manager producers authlist_user password secret key name + AUTHLIST_USER_PASS: authlist-user-pass + # -- The LOVE manager producers LDAP binding password secret key name + AUTH_LDAP_BIND_PASSWORD: auth-ldap-bind-password + # -- The database password secret key name. + # Must match `database.envSecrets.POSTGRES_PASSWORD` + DB_PASS: db-pass + # -- The redis password secret key name. + # Must match `redis.envSecrets.REDIS_PASS` + REDIS_PASS: redis-pass + # -- The LOVE manager jira API token secret key name + JIRA_API_TOKEN: jira-api-token + # -- Set the default number of LOVE manager producers pod replicas + replicas: 1 + autoscaling: + # -- Whether automatic horizontal scaling is active + enabled: true + # -- The allowed minimum number of replicas + minReplicas: 1 + # -- The allowed maximum number of replicas + maxReplicas: 100 + # -- The percentage of CPU utilization that will trigger the scaling + targetCPUUtilizationPercentage: 80 + # -- (int) The percentage of memory utilization that will trigger the scaling + targetMemoryUtilizationPercentage: "" + # -- Policy for scaling up manager pods + scaleUpPolicy: {} + # -- Policy for scaling down manager pods + scaleDownPolicy: {} + # -- Resource specifications for the LOVE manager producers pods + resources: {} + # -- Node selection rules for the LOVE manager producers pods + nodeSelector: {} + # -- Toleration specifications for the LOVE manager producers pods + tolerations: [] + # -- Affinity rules for the LOVE manager producers pods + affinity: {} + # -- Configuration for the LOVE manager producers pods readiness probe + readinessProbe: {} + # -- Configuration for the producers ports. + # this is a single configuration for all the producers. + producers_ports: + # -- The port on the container for normal communications + container: 8000 + # -- The port on the node for normal communcations + node: 30000 redis: image: # -- The redis image to use diff --git a/applications/love/charts/love-producer/README.md b/applications/love/charts/love-producer/README.md index 7857e17d30..5420c2e03f 100644 --- a/applications/love/charts/love-producer/README.md +++ b/applications/love/charts/love-producer/README.md @@ -8,7 +8,7 @@ Helm chart for the LOVE producers. |-----|------|---------|-------------| | affinity | object | `{}` | Affinity rules applied to all LOVE producer pods | | annotations | object | `{}` | This allows for the specification of pod annotations. | -| env | object | `{"WEBSOCKET_HOST":"love-nginx/manager/ws/subscription"}` | This section holds a set of key, value pairs for environmental variables | +| env | object | `{}` | This section holds a set of key, value pairs for environmental variables | | envSecrets | object | `{"PROCESS_CONNECTION_PASS":"process-connection-pass"}` | This section holds a set of key, value pairs for secrets | | image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE producer image | | image.repository | string | `"lsstts/love-producer"` | The LOVE producer image to use | diff --git a/applications/love/charts/love-producer/templates/deployment.yaml b/applications/love/charts/love-producer/templates/deployment.yaml index 77209f1579..5221670b21 100644 --- a/applications/love/charts/love-producer/templates/deployment.yaml +++ b/applications/love/charts/love-producer/templates/deployment.yaml @@ -34,6 +34,8 @@ spec: env: - name: LOVE_CSC_PRODUCER value: {{ $producer.csc | quote }} + - name: WEBSOCKET_HOST + value: {{ $producer.WEBSOCKET_HOST | quote }} - name: LSST_KAFKA_SECURITY_PASSWORD valueFrom: secretKeyRef: diff --git a/applications/love/charts/love-producer/values.yaml b/applications/love/charts/love-producer/values.yaml index ca39d63d95..49d6de9594 100644 --- a/applications/love/charts/love-producer/values.yaml +++ b/applications/love/charts/love-producer/values.yaml @@ -8,8 +8,7 @@ image: # -- The pull policy on the LOVE producer image pullPolicy: IfNotPresent # -- This section holds a set of key, value pairs for environmental variables -env: - WEBSOCKET_HOST: love-nginx/manager/ws/subscription +env: {} # -- This section holds a set of key, value pairs for secrets envSecrets: PROCESS_CONNECTION_PASS: process-connection-pass diff --git a/applications/love/values-base.yaml b/applications/love/values-base.yaml index fecc6d4326..090346b111 100644 --- a/applications/love/values-base.yaml +++ b/applications/love/values-base.yaml @@ -23,6 +23,7 @@ love-manager: frontend: image: repository: ts-dockerhub.lsst.org/love-manager + tag: k0002 pullPolicy: Always env: SERVER_URL: base-lsp.lsst.codes @@ -59,19 +60,174 @@ love-manager: initialDelaySeconds: 20 periodSeconds: 10 producers: + - name: general image: repository: ts-dockerhub.lsst.org/love-manager + tag: k0002 pullPolicy: Always env: + LOVE_SITE: base SERVER_URL: base-lsp.lsst.codes OLE_API_HOSTNAME: base-lsp.lsst.codes AUTH_LDAP_1_SERVER_URI: ldap://ipa1.ls.lsst.org AUTH_LDAP_2_SERVER_URI: ldap://ipa2.ls.lsst.org AUTH_LDAP_3_SERVER_URI: ldap://ipa3.ls.lsst.org + COMMANDER_HOSTNAME: love-commander-service + COMMANDER_PORT: 5000 DB_HOST: postgresdb01.ls.lsst.org + DB_ENGINE: postgresql + DB_NAME: love + DB_PORT: 5432 + DB_USER: love + HEARTBEAT_QUERY_COMMANDER: false + JIRA_API_HOSTNAME: rubinobs.atlassian.net + JIRA_PROJECT_ID: 10063 + REDIS_CONFIG_CAPACITY: 5000 + REDIS_CONFIG_EXPIRY: 5 + REDIS_HOST: love-manager-redis-service + REMOTE_STORAGE: true + URL_SUBPATH: /love + envSecrets: + SECRET_KEY: manager-secret-key + PROCESS_CONNECTION_PASS: process-connection-pass + ADMIN_USER_PASS: admin-user-pass + USER_USER_PASS: user-user-pass + CMD_USER_PASS: cmd-user-pass + AUTHLIST_USER_PASS: authlist-user-pass + AUTH_LDAP_BIND_PASSWORD: auth-ldap-bind-password + DB_PASS: db-pass + REDIS_PASS: redis-pass + replicas: 10 + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 25 + targetCPUUtilizationPercentage: 50 + scaleDownPolicy: + policies: + - type: Pods + value: 2 + periodSeconds: 120 + - type: Percent + value: 10 + periodSeconds: 120 + selectPolicy: Min + resources: + requests: + cpu: 150m + memory: 200Mi + limits: + cpu: 1000m + memory: 1500Mi + readinessProbe: + tcpSocket: + port: 8000 + initialDelaySeconds: 20 + periodSeconds: 10 + - name: queue + image: + repository: ts-dockerhub.lsst.org/love-manager + tag: k0002 + pullPolicy: Always + env: LOVE_SITE: base + SERVER_URL: base-lsp.lsst.codes + OLE_API_HOSTNAME: base-lsp.lsst.codes + AUTH_LDAP_1_SERVER_URI: ldap://ipa1.ls.lsst.org + AUTH_LDAP_2_SERVER_URI: ldap://ipa2.ls.lsst.org + AUTH_LDAP_3_SERVER_URI: ldap://ipa3.ls.lsst.org + COMMANDER_HOSTNAME: love-commander-service + COMMANDER_PORT: 5000 + DB_HOST: postgresdb01.ls.lsst.org + DB_ENGINE: postgresql + DB_NAME: love + DB_PORT: 5432 + DB_USER: love + HEARTBEAT_QUERY_COMMANDER: false + JIRA_API_HOSTNAME: rubinobs.atlassian.net + JIRA_PROJECT_ID: 10063 + REDIS_CONFIG_CAPACITY: 5000 + REDIS_CONFIG_EXPIRY: 5 + REDIS_HOST: love-manager-redis-service + REMOTE_STORAGE: true + URL_SUBPATH: /love + envSecrets: + SECRET_KEY: manager-secret-key + PROCESS_CONNECTION_PASS: process-connection-pass + ADMIN_USER_PASS: admin-user-pass + USER_USER_PASS: user-user-pass + CMD_USER_PASS: cmd-user-pass + AUTHLIST_USER_PASS: authlist-user-pass + AUTH_LDAP_BIND_PASSWORD: auth-ldap-bind-password + DB_PASS: db-pass + REDIS_PASS: redis-pass + replicas: 3 autoscaling: - enabled: true + enabled: false + minReplicas: 2 + maxReplicas: 25 + targetCPUUtilizationPercentage: 50 + scaleDownPolicy: + policies: + - type: Pods + value: 2 + periodSeconds: 120 + - type: Percent + value: 10 + periodSeconds: 120 + selectPolicy: Min + resources: + requests: + cpu: 150m + memory: 200Mi + limits: + cpu: 1000m + memory: 1500Mi + readinessProbe: + tcpSocket: + port: 8000 + initialDelaySeconds: 20 + periodSeconds: 10 + - name: m1m3 + image: + repository: ts-dockerhub.lsst.org/love-manager + tag: k0002 + pullPolicy: Always + env: + LOVE_SITE: base + SERVER_URL: base-lsp.lsst.codes + OLE_API_HOSTNAME: base-lsp.lsst.codes + AUTH_LDAP_1_SERVER_URI: ldap://ipa1.ls.lsst.org + AUTH_LDAP_2_SERVER_URI: ldap://ipa2.ls.lsst.org + AUTH_LDAP_3_SERVER_URI: ldap://ipa3.ls.lsst.org + COMMANDER_HOSTNAME: love-commander-service + COMMANDER_PORT: 5000 + DB_HOST: postgresdb01.ls.lsst.org + DB_ENGINE: postgresql + DB_NAME: love + DB_PORT: 5432 + DB_USER: love + HEARTBEAT_QUERY_COMMANDER: false + JIRA_API_HOSTNAME: rubinobs.atlassian.net + JIRA_PROJECT_ID: 10063 + REDIS_CONFIG_CAPACITY: 5000 + REDIS_CONFIG_EXPIRY: 5 + REDIS_HOST: love-manager-redis-service + REMOTE_STORAGE: true + URL_SUBPATH: /love + envSecrets: + SECRET_KEY: manager-secret-key + PROCESS_CONNECTION_PASS: process-connection-pass + ADMIN_USER_PASS: admin-user-pass + USER_USER_PASS: user-user-pass + CMD_USER_PASS: cmd-user-pass + AUTHLIST_USER_PASS: authlist-user-pass + AUTH_LDAP_BIND_PASSWORD: auth-ldap-bind-password + DB_PASS: db-pass + REDIS_PASS: redis-pass + replicas: 1 + autoscaling: + enabled: false minReplicas: 2 maxReplicas: 25 targetCPUUtilizationPercentage: 50 @@ -156,7 +312,23 @@ love-nginx: proxy_redirect off; } location /love/manager/producers { - proxy_pass http://love-manager-producers-service:8000; + proxy_pass http://love-manager-producer-general-service:8000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_redirect off; + } + location /love/manager/m1m3 { + proxy_pass http://love-manager-producer-m1m3-service:8000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_redirect off; + } + location /love/manager/queue { + proxy_pass http://love-manager-producer-queue-service:8000; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; @@ -200,8 +372,6 @@ love-producer: image: repository: ts-dockerhub.lsst.org/love-producer pullPolicy: Always - env: - WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription resources: requests: cpu: 10m @@ -212,90 +382,133 @@ love-producer: producers: - name: ataos csc: ATAOS:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atcamera csc: ATCamera:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atdome csc: ATDome:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atdometrajectory csc: ATDomeTrajectory:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atheaderservice csc: ATHeaderService:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: athexapod csc: ATHexapod:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atmcs csc: ATMCS:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atocps csc: OCPS:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atoods csc: ATOODS:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atpneumatics csc: ATPneumatics:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atptg csc: ATPtg:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atscheduler csc: Scheduler:2 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atscriptqueue csc: ScriptQueue:2 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/queue/ws/subscription - name: atspectrograph csc: ATSpectrograph:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: auxteless201 csc: ESS:201 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: auxteless202 csc: ESS:202 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: auxteless203 csc: ESS:203 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: auxteless204 csc: ESS:204 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: auxteless205 csc: ESS:205 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: calibhilless301 csc: ESS:301 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: camerahexapod csc: MTHexapod:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: dimm1 csc: DIMM:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: dimm2 csc: DIMM:2 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: dsm1 csc: DSM:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: dsm2 csc: DSM:2 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: epm1 csc: EPM:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: gcheaderservice1 csc: GCHeaderService:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: genericcamera1 csc: GenericCamera:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: lasertracker1 csc: LaserTracker:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: love csc: LOVE:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: m2ess106 csc: ESS:106 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: m2hexapod csc: MTHexapod:2 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtaircompressor1 csc: MTAirCompressor:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtaircompressor2 csc: MTAirCompressor:2 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtaos csc: MTAOS:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtdome csc: MTDome:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtdomeess101 csc: ESS:101 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtdomeess102 csc: ESS:102 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtdomeess103 csc: ESS:103 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtdomeess107 csc: ESS:107 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtdomeess108 csc: ESS:108 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtdometrajectory csc: MTDomeTrajectory:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtm1m3 csc: MTM1M3:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/m1m3/ws/subscription resources: requests: cpu: 10m @@ -305,27 +518,40 @@ love-producer: memory: 600Mi - name: mtm2 csc: MTM2:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtmount csc: MTMount:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtptg csc: MTPtg:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtrotator csc: MTRotator:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtscheduler csc: Scheduler:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtscriptqueue csc: ScriptQueue:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/queue/ws/subscription - name: ocsscheduler csc: Scheduler:3 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: ocsscriptqueue csc: ScriptQueue:3 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/queue/ws/subscription - name: tmaess001 csc: ESS:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: tmaess104 csc: ESS:104 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: tmaess105 csc: ESS:105 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: watcher csc: Watcher:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: weatherforecast csc: WeatherForecast:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription From c57bdefeb07ae126a4c6b767e61d41ddf150787a Mon Sep 17 00:00:00 2001 From: Hsin-Fang Chiang Date: Wed, 4 Sep 2024 11:24:44 -0700 Subject: [PATCH 031/193] Use new survey name BLOCK-306 for LATISS prompt processing "BLOCK-306" is the new "AUXTEL_PHOTO_IMAGING". The survey name change happened on 2024-09-04. --- .../values-usdfprod-prompt-processing.yaml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index 5115052b25..da86def651 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -20,10 +20,7 @@ prompt-proto-service: pipelines: # BLOCK-295 is the daily calibration sequence as of May 27, 2024 main: >- - (survey="AUXTEL_PHOTO_IMAGING")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml, - ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/SingleFrame.yaml, - ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Isr.yaml] - (survey="AUXTEL_DRP_IMAGING")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml, + (survey="BLOCK-306")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml, ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/SingleFrame.yaml, ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Isr.yaml] (survey="BLOCK-T17")=[] @@ -34,8 +31,7 @@ prompt-proto-service: (survey="BLOCK-295")=[] (survey="")=[] preprocessing: >- - (survey="AUXTEL_PHOTO_IMAGING")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Preprocessing.yaml] - (survey="AUXTEL_DRP_IMAGING")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Preprocessing.yaml] + (survey="BLOCK-306")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Preprocessing.yaml] (survey="BLOCK-T17")=[] (survey="cwfs")=[] (survey="cwfs-focus-sweep")=[] From 3b6cbe81f03d9241da8f4b6c81d45037ec057a3d Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Wed, 4 Sep 2024 10:54:49 -0400 Subject: [PATCH 032/193] Deploy Times Square 0.12.0 https://github.com/lsst-sqre/times-square/pull/80 https://github.com/lsst-sqre/times-square/releases/tag/0.12.0 --- applications/times-square/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/times-square/Chart.yaml b/applications/times-square/Chart.yaml index fc984d18bb..daa934699b 100644 --- a/applications/times-square/Chart.yaml +++ b/applications/times-square/Chart.yaml @@ -8,7 +8,7 @@ sources: type: application # The default version tag of the times-square docker image -appVersion: "0.11.0" +appVersion: "0.12.0" dependencies: - name: redis From 513321109c514e24b1a5273d4395bcbc4d8a8184 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 6 Sep 2024 10:05:50 -0700 Subject: [PATCH 033/193] Summit: Add LSSTCam butler directories to nublado. --- applications/nublado/values-summit.yaml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/applications/nublado/values-summit.yaml b/applications/nublado/values-summit.yaml index 289eaea4e6..0ccb69cac4 100644 --- a/applications/nublado/values-summit.yaml +++ b/applications/nublado/values-summit.yaml @@ -60,6 +60,11 @@ controller: type: "nfs" serverPath: "/repo/LSSTComCam" server: "comcam-archiver.cp.lsst.org" + - name: "lsstcam" + source: + type: "nfs" + serverPath: "/lsstcam/repo/LSSTCam" + server: "nfs3.cp.lsst.org" - name: "obs-env" source: type: "nfs" @@ -80,6 +85,11 @@ controller: type: "nfs" serverPath: "/auxtel/lsstdata" server: "nfs-auxtel.cp.lsst.org" + - name: "lsstdata-lsstcam" + source: + type: "nfs" + serverPath: "/lsstcam/lsstdata" + server: "nfs3.cp.lsst.org" - name: "lsstdata-base-comcam" source: type: "nfs" @@ -90,6 +100,11 @@ controller: type: "nfs" serverPath: "/auxtel/lsstdata/base/auxtel" server: "nfs-auxtel.cp.lsst.org" + - name: "lsstdata-base-lsstcam" + source: + type: "nfs" + serverPath: "/lsstcam/lsstdata/base/lsstcam" + server: "nfs3.cp.lsst.org" volumeMounts: - containerPath: "/home" volumeName: "home" @@ -101,6 +116,8 @@ controller: volumeName: "latiss" - containerPath: "/repo/LSSTComCam" volumeName: "lsstcomcam" + - containerPath: "/repo/LSSTCam" + volumeName: "lsstcam" - containerPath: "/net/obs-env" volumeName: "obs-env" - containerPath: "/readonly/lsstdata/other" @@ -109,10 +126,14 @@ controller: volumeName: "lsstdata-comcam" - containerPath: "/readonly/lsstdata/auxtel" volumeName: "lsstdata-auxtel" + - containerPath: "/readonly/lsstdata/lsstcam" + volumeName: "lsstdata-lsstcam" - containerPath: "/data/lsstdata/base/comcam" volumeName: "lsstdata-base-comcam" - containerPath: "/data/lsstdata/base/auxtel" volumeName: "lsstdata-base-auxtel" + - containerPath: "/data/lsstdata/base/lsstcam" + volumeName: "lsstdata-base-lsstcam" hub: internalDatabase: false From 5563beaa4973bf057af92ced6abd42254c0588a6 Mon Sep 17 00:00:00 2001 From: Erin Howard Date: Fri, 6 Sep 2024 11:16:28 -0700 Subject: [PATCH 034/193] Update LATISS Prompt Processing to 4.4.0. --- .../values-usdfprod-prompt-processing.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index da86def651..d6ac56dc6c 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -14,7 +14,7 @@ prompt-proto-service: image: pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. - tag: 4.3.0 + tag: 4.4.0 instrument: pipelines: From ba1f9bcde694f7c9bc94f9fcd7c0a0eedce8edeb Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 6 Sep 2024 12:54:44 -0700 Subject: [PATCH 035/193] BTS: Update to Kafka Cycle 2. --- applications/nublado/values-base.yaml | 2 +- environments/values-base.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/nublado/values-base.yaml b/applications/nublado/values-base.yaml index 269181582f..6fb6f5ad9c 100644 --- a/applications/nublado/values-base.yaml +++ b/applications/nublado/values-base.yaml @@ -10,7 +10,7 @@ controller: numWeeklies: 3 numDailies: 2 cycle: null - recommendedTag: "recommended_k0001" + recommendedTag: "recommended_k0002" lab: extraAnnotations: k8s.v1.cni.cncf.io/networks: "kube-system/dds" diff --git a/environments/values-base.yaml b/environments/values-base.yaml index b5d2a8c5ac..e0a262f932 100644 --- a/environments/values-base.yaml +++ b/environments/values-base.yaml @@ -34,6 +34,6 @@ applications: uws: true controlSystem: - imageTag: "k0001" + imageTag: "k0002" siteTag: "base" s3EndpointUrl: "https://s3.ls.lsst.org" From 3bfe48eb409fd59eecca880776f6f6b3ce944dc8 Mon Sep 17 00:00:00 2001 From: Tiago Ribeiro Date: Fri, 6 Sep 2024 20:56:52 -0700 Subject: [PATCH 036/193] Add new IMAGE_SERVER_URL environment variable for all the ScriptQueue deployments at BTS. --- applications/obssys/values-base.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/applications/obssys/values-base.yaml b/applications/obssys/values-base.yaml index 399aab63ef..c221197c1b 100644 --- a/applications/obssys/values-base.yaml +++ b/applications/obssys/values-base.yaml @@ -7,6 +7,7 @@ atqueue: DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml RUN_ARG: 2 --state enabled USER_USERNAME: user + IMAGE_SERVER_URL: http://lsstcam-mcm.ls.lsst.org butlerSecret: containerPath: &bS-cP /home/saluser/.lsst dbUser: &bS-dbU oods @@ -84,6 +85,7 @@ mtqueue: DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml RUN_ARG: 1 --state enabled USER_USERNAME: user + IMAGE_SERVER_URL: http://lsstcam-mcm.ls.lsst.org butlerSecret: containerPath: *bS-cP dbUser: *bS-dbU @@ -161,6 +163,7 @@ ocsqueue: DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml RUN_ARG: 3 --state enabled USER_USERNAME: user + IMAGE_SERVER_URL: http://lsstcam-mcm.ls.lsst.org butlerSecret: containerPath: *bS-cP dbUser: *bS-dbU From c1200cf81ccc0123b74d8fb780dfd3771d9f524b Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 9 Sep 2024 08:12:00 -0700 Subject: [PATCH 037/193] Summit: Fix nublado LSSTCam mount. --- applications/nublado/values-summit.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/nublado/values-summit.yaml b/applications/nublado/values-summit.yaml index 0ccb69cac4..3fddbdcecc 100644 --- a/applications/nublado/values-summit.yaml +++ b/applications/nublado/values-summit.yaml @@ -103,7 +103,7 @@ controller: - name: "lsstdata-base-lsstcam" source: type: "nfs" - serverPath: "/lsstcam/lsstdata/base/lsstcam" + serverPath: "/lsstcam/lsstdata/base/maintel" server: "nfs3.cp.lsst.org" volumeMounts: - containerPath: "/home" @@ -132,7 +132,7 @@ controller: volumeName: "lsstdata-base-comcam" - containerPath: "/data/lsstdata/base/auxtel" volumeName: "lsstdata-base-auxtel" - - containerPath: "/data/lsstdata/base/lsstcam" + - containerPath: "/data/lsstdata/base/maintel" volumeName: "lsstdata-base-lsstcam" hub: From 2ccfbfa6d4aacbd15ef324d0027a0c290fa28e10 Mon Sep 17 00:00:00 2001 From: Hsin-Fang Chiang Date: Mon, 12 Aug 2024 16:06:57 -0700 Subject: [PATCH 038/193] Enable prompt processing for BLOCK-T17 BLOCK-T17 is the LATISS daytime checkout. Let the second exposure in the daytime checkout trigger ISR processinga as a system test and potentially expose problems if any, before the telescope goes on sky. --- .../values-usdfprod-prompt-processing.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index d6ac56dc6c..a379a6d328 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -23,7 +23,7 @@ prompt-proto-service: (survey="BLOCK-306")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml, ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/SingleFrame.yaml, ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Isr.yaml] - (survey="BLOCK-T17")=[] + (survey="BLOCK-T17")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Isr-cal.yaml] (survey="cwfs")=[] (survey="cwfs-focus-sweep")=[] (survey="spec-survey")=[] From d194f563376351ab54f6855ae3d0a79b9d50ad4c Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 6 Sep 2024 19:34:26 -0700 Subject: [PATCH 039/193] TTS: Update nublado to Cycle 39. --- applications/nublado/values-tucson-teststand.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/nublado/values-tucson-teststand.yaml b/applications/nublado/values-tucson-teststand.yaml index abe987c409..bafd20a7a2 100644 --- a/applications/nublado/values-tucson-teststand.yaml +++ b/applications/nublado/values-tucson-teststand.yaml @@ -8,8 +8,8 @@ controller: numReleases: 0 numWeeklies: 3 numDailies: 2 - cycle: 38 - recommendedTag: "recommended_c0038" + cycle: 39 + recommendedTag: "recommended_c0039" lab: extraAnnotations: k8s.v1.cni.cncf.io/networks: "kube-system/dds" From fc22c27ae27a80126af441d3031e553c9f2e768f Mon Sep 17 00:00:00 2001 From: Colin Slater Date: Thu, 15 Aug 2024 14:57:22 -0700 Subject: [PATCH 040/193] Update USDF TAP authentication to qserv. --- applications/tap/secrets-usdfdev.yaml | 4 ++++ applications/tap/secrets-usdfint.yaml | 4 ++++ applications/tap/secrets-usdfprod.yaml | 4 ++++ applications/tap/values-usdfdev.yaml | 3 ++- applications/tap/values-usdfint.yaml | 3 ++- applications/tap/values-usdfprod.yaml | 3 ++- 6 files changed, 18 insertions(+), 3 deletions(-) create mode 100644 applications/tap/secrets-usdfdev.yaml create mode 100644 applications/tap/secrets-usdfint.yaml create mode 100644 applications/tap/secrets-usdfprod.yaml diff --git a/applications/tap/secrets-usdfdev.yaml b/applications/tap/secrets-usdfdev.yaml new file mode 100644 index 0000000000..f6a85b9f26 --- /dev/null +++ b/applications/tap/secrets-usdfdev.yaml @@ -0,0 +1,4 @@ +qserv-password: + description: >- + Password for the QServ database server + if: cadc-tap.config.qserv.passwordEnabled diff --git a/applications/tap/secrets-usdfint.yaml b/applications/tap/secrets-usdfint.yaml new file mode 100644 index 0000000000..f6a85b9f26 --- /dev/null +++ b/applications/tap/secrets-usdfint.yaml @@ -0,0 +1,4 @@ +qserv-password: + description: >- + Password for the QServ database server + if: cadc-tap.config.qserv.passwordEnabled diff --git a/applications/tap/secrets-usdfprod.yaml b/applications/tap/secrets-usdfprod.yaml new file mode 100644 index 0000000000..f6a85b9f26 --- /dev/null +++ b/applications/tap/secrets-usdfprod.yaml @@ -0,0 +1,4 @@ +qserv-password: + description: >- + Password for the QServ database server + if: cadc-tap.config.qserv.passwordEnabled diff --git a/applications/tap/values-usdfdev.yaml b/applications/tap/values-usdfdev.yaml index e82393ad97..5cd01dbb7e 100644 --- a/applications/tap/values-usdfdev.yaml +++ b/applications/tap/values-usdfdev.yaml @@ -6,7 +6,8 @@ cadc-tap: config: qserv: host: "172.24.49.51:4040" - jdbcParams: "?enabledTLSProtocols=TLSv1.2" + jdbcParams: "?enabledTLSProtocols=TLSv1.3" + passwordEnabled: true gcsBucket: "rubin:rubin-qserv" gcsBucketUrl: "https://s3dfrgw.slac.stanford.edu" diff --git a/applications/tap/values-usdfint.yaml b/applications/tap/values-usdfint.yaml index b8f8ab9404..06b5e08204 100644 --- a/applications/tap/values-usdfint.yaml +++ b/applications/tap/values-usdfint.yaml @@ -6,7 +6,8 @@ cadc-tap: config: qserv: host: "172.24.49.51:4040" - jdbcParams: "?enabledTLSProtocols=TLSv1.2" + jdbcParams: "?enabledTLSProtocols=TLSv1.3" + passwordEnabled: true gcsBucket: "rubin:rubin-qserv" gcsBucketUrl: "https://s3dfrgw.slac.stanford.edu" diff --git a/applications/tap/values-usdfprod.yaml b/applications/tap/values-usdfprod.yaml index b8f8ab9404..06b5e08204 100644 --- a/applications/tap/values-usdfprod.yaml +++ b/applications/tap/values-usdfprod.yaml @@ -6,7 +6,8 @@ cadc-tap: config: qserv: host: "172.24.49.51:4040" - jdbcParams: "?enabledTLSProtocols=TLSv1.2" + jdbcParams: "?enabledTLSProtocols=TLSv1.3" + passwordEnabled: true gcsBucket: "rubin:rubin-qserv" gcsBucketUrl: "https://s3dfrgw.slac.stanford.edu" From 598a5d24d0c46c5895ac77ac51ef88baea2f48a7 Mon Sep 17 00:00:00 2001 From: Colin Slater Date: Thu, 15 Aug 2024 15:19:35 -0700 Subject: [PATCH 041/193] Point USDF TAP int/dev to USDF qserv int. Use FQDNs. --- applications/tap/values-usdfdev.yaml | 2 +- applications/tap/values-usdfint.yaml | 2 +- applications/tap/values-usdfprod.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/tap/values-usdfdev.yaml b/applications/tap/values-usdfdev.yaml index 5cd01dbb7e..69109d427f 100644 --- a/applications/tap/values-usdfdev.yaml +++ b/applications/tap/values-usdfdev.yaml @@ -5,7 +5,7 @@ cadc-tap: config: qserv: - host: "172.24.49.51:4040" + host: "sdfqserv001.sdf.slac.stanford.edu:4090" jdbcParams: "?enabledTLSProtocols=TLSv1.3" passwordEnabled: true diff --git a/applications/tap/values-usdfint.yaml b/applications/tap/values-usdfint.yaml index 06b5e08204..ca53594d8f 100644 --- a/applications/tap/values-usdfint.yaml +++ b/applications/tap/values-usdfint.yaml @@ -5,7 +5,7 @@ cadc-tap: config: qserv: - host: "172.24.49.51:4040" + host: "sdfqserv001.sdf.slac.stanford.edu:4090" jdbcParams: "?enabledTLSProtocols=TLSv1.3" passwordEnabled: true diff --git a/applications/tap/values-usdfprod.yaml b/applications/tap/values-usdfprod.yaml index 06b5e08204..9021a9e3fa 100644 --- a/applications/tap/values-usdfprod.yaml +++ b/applications/tap/values-usdfprod.yaml @@ -5,7 +5,7 @@ cadc-tap: config: qserv: - host: "172.24.49.51:4040" + host: "sdfqserv001.sdf.slac.stanford.edu:4040" jdbcParams: "?enabledTLSProtocols=TLSv1.3" passwordEnabled: true From d3143f4f08d938071773d3614f9d148e446de9d2 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Tue, 10 Sep 2024 11:15:23 -0700 Subject: [PATCH 042/193] vbecker argocd --- applications/argocd/values-usdfdev.yaml | 1 + applications/argocd/values-usdfint.yaml | 1 + applications/argocd/values-usdfprod.yaml | 1 + 3 files changed, 3 insertions(+) diff --git a/applications/argocd/values-usdfdev.yaml b/applications/argocd/values-usdfdev.yaml index c5343fe22b..a58a82f77e 100644 --- a/applications/argocd/values-usdfdev.yaml +++ b/applications/argocd/values-usdfdev.yaml @@ -58,6 +58,7 @@ argo-cd: g, smart@slac.stanford.edu, role:developer g, spothi@slac.stanford.edu, role:developer g, bbrond@slac.stanford.edu, role:developer + g, vbecker@slac.stanford.edu, role:developer scopes: "[email]" server: diff --git a/applications/argocd/values-usdfint.yaml b/applications/argocd/values-usdfint.yaml index 5eaeafb2bd..12ba88dd96 100644 --- a/applications/argocd/values-usdfint.yaml +++ b/applications/argocd/values-usdfint.yaml @@ -57,6 +57,7 @@ argo-cd: g, smart@slac.stanford.edu, role:developer g, spothi@slac.stanford.edu, role:developer g, bbrond@slac.stanford.edu, role:developer + g, vbecker@slac.stanford.edu, role:developer scopes: "[email]" server: diff --git a/applications/argocd/values-usdfprod.yaml b/applications/argocd/values-usdfprod.yaml index 59a611f653..9c5fdf1734 100644 --- a/applications/argocd/values-usdfprod.yaml +++ b/applications/argocd/values-usdfprod.yaml @@ -53,6 +53,7 @@ argo-cd: g, smart@slac.stanford.edu, role:developer g, spothi@slac.stanford.edu, role:developer g, bbrond@slac.stanford.edu, role:developer + g, vbecker@slac.stanford.edu, role:developer scopes: "[email]" server: From f1ff416adffb12345f1fe078e25582e964a03a85 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 6 Sep 2024 14:00:31 -0700 Subject: [PATCH 043/193] Enable collection of memory statistics --- .../charts/telegraf-kafka-consumer/templates/configmap.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml index 8e5e0aa29d..fa0e7e4cf1 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml @@ -58,5 +58,6 @@ data: [[inputs.internal]] collect_memstats = false + collect_memstats = true {{- end }} {{- end }} From 9288ad4de455df038e469c1edb29ae05cb0371d2 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 6 Sep 2024 14:01:00 -0700 Subject: [PATCH 044/193] Add an extra tag to identify the Telegraf instance --- .../charts/telegraf-kafka-consumer/templates/configmap.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml index fa0e7e4cf1..ad6fe31dbd 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml @@ -57,7 +57,8 @@ data: consumer_fetch_default = {{ default "20MB" $value.consumer_fetch_default | quote }} [[inputs.internal]] - collect_memstats = false collect_memstats = true + tags = { instance = "{{ $key }}" } + {{- end }} {{- end }} From 872d85b0cb9c18ecf3d601a02f1591f7ce001b97 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 6 Sep 2024 14:20:28 -0700 Subject: [PATCH 045/193] Route Telegraf internal metrics to its own database - Now that we are tagging the Telegraf internal metrics send them to a separate database. This allows for setting a different retention period for the telegraf database. --- .../telegraf-kafka-consumer/templates/configmap.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml index ad6fe31dbd..c2419057fc 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml @@ -30,6 +30,15 @@ data: username = "${INFLUXDB_USER}" password = "${INFLUXDB_PASSWORD}" + [[outputs.influxdb]] + namepass = ["telegraf_*"] + urls = [ + {{ $.Values.influxdb.url | quote }} + ] + database = "telegraf" + username = "${INFLUXDB_USER}" + password = "${INFLUXDB_PASSWORD}" + [[inputs.kafka_consumer]] brokers = [ "sasquatch-kafka-brokers.sasquatch:9092" @@ -57,6 +66,7 @@ data: consumer_fetch_default = {{ default "20MB" $value.consumer_fetch_default | quote }} [[inputs.internal]] + name_prefix = "telegraf_" collect_memstats = true tags = { instance = "{{ $key }}" } From 3b1c8f55bfdb4f08d461038be8de19d0be0ba932 Mon Sep 17 00:00:00 2001 From: "David H. Irving" Date: Wed, 11 Sep 2024 09:39:47 -0700 Subject: [PATCH 046/193] Upgrade Butler server Upgrade to a new version of Butler server with support for a `query_collection_info` endpoint that is needed by the new Butler Collections API that will be part of the 2024_37 pipelines stack. --- applications/butler/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/butler/Chart.yaml b/applications/butler/Chart.yaml index 20d3063518..9d3b40a094 100644 --- a/applications/butler/Chart.yaml +++ b/applications/butler/Chart.yaml @@ -4,4 +4,4 @@ version: 1.0.0 description: Server for Butler data abstraction service sources: - https://github.com/lsst/daf_butler -appVersion: server-2.0.0 +appVersion: server-2.1.0 From 9224f18a8514129bb14943e7a57bffff7a8637bd Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 11 Sep 2024 14:40:48 -0700 Subject: [PATCH 047/193] Update dependencies Switch to universal dependencies and remove the explicit dependency on greenlet. Move the runtime dependencies into pyproject.toml so that normal Python package installation will pick them up, but keep freezing dependencies as well. --- .pre-commit-config.yaml | 2 +- Makefile | 16 +- pyproject.toml | 12 + requirements/dev.in | 5 - requirements/dev.txt | 566 ++++++++++++++++++++++++---------------- requirements/main.in | 22 -- requirements/main.txt | 362 ++++++++++++------------- requirements/tox.txt | 75 +++--- 8 files changed, 581 insertions(+), 479 deletions(-) delete mode 100644 requirements/main.in diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3c7869cff9..99ad10ee7c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -46,7 +46,7 @@ repos: - --template-files=../helm-docs.md.gotmpl - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.3 + rev: v0.6.4 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] diff --git a/Makefile b/Makefile index 7916bf066c..48c10041bb 100644 --- a/Makefile +++ b/Makefile @@ -41,20 +41,20 @@ update-deps: pip install --upgrade pip uv uv pip install --upgrade pre-commit pre-commit autoupdate - uv pip compile --upgrade --generate-hashes \ - --output-file requirements/main.txt requirements/main.in - uv pip compile --upgrade --generate-hashes \ + uv pip compile --upgrade --universal --generate-hashes \ + --output-file requirements/main.txt pyproject.toml + uv pip compile --upgrade --universal --generate-hashes \ --output-file requirements/dev.txt requirements/dev.in - uv pip compile --upgrade --generate-hashes \ + uv pip compile --upgrade --universal --generate-hashes \ --output-file requirements/tox.txt requirements/tox.in # Useful for testing against a Git version of Safir. .PHONY: update-deps-no-hashes update-deps-no-hashes: pip install --upgrade uv - uv pip compile --upgrade \ - --output-file requirements/main.txt requirements/main.in - uv pip compile --upgrade \ + uv pip compile --upgrade --universal \ + --output-file requirements/main.txt pyproject.toml + uv pip compile --upgrade --universal \ --output-file requirements/dev.txt requirements/dev.in - uv pip compile --upgrade \ + uv pip compile --upgrade --universal \ --output-file requirements/tox.txt requirements/tox.in diff --git a/pyproject.toml b/pyproject.toml index 3b5c56bdb4..2911638a09 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,6 +21,18 @@ classifiers = [ "Operating System :: POSIX", ] requires-python = ">=3.11" +dependencies = [ + "bcrypt", + "click", + "cryptography", + "GitPython", + "hvac", + "jinja2", + "onepasswordconnectsdk", + "pydantic>2", + "PyYAML", + "safir>5", +] [project.scripts] phalanx = "phalanx.cli:main" diff --git a/requirements/dev.in b/requirements/dev.in index de327ec14d..00f15c1027 100644 --- a/requirements/dev.in +++ b/requirements/dev.in @@ -21,8 +21,3 @@ documenteer[guide]>1 sphinx-click sphinx-diagrams sphinx-jinja - -# Greenlet is a SQLAlchemy dependency on x86_64 but not on macOS, so we need -# to explicitly include it. Otherwise, if dependencies are rebuilt on macOS, -# dependency installation will fail on all other platforms. -greenlet diff --git a/requirements/dev.txt b/requirements/dev.txt index 8658c4de3d..515080cba5 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -1,5 +1,5 @@ # This file was autogenerated by uv via the following command: -# uv pip compile --generate-hashes --output-file requirements/dev.txt requirements/dev.in +# uv pip compile --universal --generate-hashes --output-file requirements/dev.txt requirements/dev.in alabaster==1.0.0 \ --hash=sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e \ --hash=sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b @@ -10,7 +10,7 @@ annotated-types==0.7.0 \ # via # -c requirements/main.txt # pydantic -appnope==0.1.4 \ +appnope==0.1.4 ; platform_system == 'Darwin' \ --hash=sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee \ --hash=sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c # via ipykernel @@ -43,6 +43,77 @@ certifi==2024.8.30 \ # -c requirements/main.txt # requests # sphinx-prompt +cffi==1.17.1 ; implementation_name == 'pypy' \ + --hash=sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8 \ + --hash=sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2 \ + --hash=sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1 \ + --hash=sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15 \ + --hash=sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36 \ + --hash=sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824 \ + --hash=sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8 \ + --hash=sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36 \ + --hash=sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17 \ + --hash=sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf \ + --hash=sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc \ + --hash=sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3 \ + --hash=sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed \ + --hash=sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702 \ + --hash=sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1 \ + --hash=sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8 \ + --hash=sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903 \ + --hash=sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6 \ + --hash=sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d \ + --hash=sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b \ + --hash=sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e \ + --hash=sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be \ + --hash=sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c \ + --hash=sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683 \ + --hash=sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9 \ + --hash=sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c \ + --hash=sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8 \ + --hash=sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1 \ + --hash=sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4 \ + --hash=sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655 \ + --hash=sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67 \ + --hash=sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595 \ + --hash=sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0 \ + --hash=sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65 \ + --hash=sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41 \ + --hash=sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6 \ + --hash=sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401 \ + --hash=sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6 \ + --hash=sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3 \ + --hash=sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16 \ + --hash=sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93 \ + --hash=sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e \ + --hash=sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4 \ + --hash=sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964 \ + --hash=sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c \ + --hash=sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576 \ + --hash=sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0 \ + --hash=sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3 \ + --hash=sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662 \ + --hash=sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3 \ + --hash=sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff \ + --hash=sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5 \ + --hash=sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd \ + --hash=sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f \ + --hash=sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5 \ + --hash=sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14 \ + --hash=sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d \ + --hash=sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9 \ + --hash=sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7 \ + --hash=sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382 \ + --hash=sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a \ + --hash=sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e \ + --hash=sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a \ + --hash=sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4 \ + --hash=sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99 \ + --hash=sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87 \ + --hash=sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b + # via + # -c requirements/main.txt + # pyzmq charset-normalizer==3.3.2 \ --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \ --hash=sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087 \ @@ -145,6 +216,15 @@ click==8.1.7 \ # documenteer # jupyter-cache # sphinx-click +colorama==0.4.6 ; sys_platform == 'win32' or platform_system == 'Windows' \ + --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \ + --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 + # via + # -c requirements/main.txt + # click + # ipython + # pytest + # sphinx comm==0.2.2 \ --hash=sha256:3fd7a84065306e07bea1773df6eb8282de51ba82f77c72f9c85716ab11fe980e \ --hash=sha256:e6fb86cb70ff661ee8c9c14e7d36d6de3b4066f1441be4063df9c5009f0a64d3 @@ -298,66 +378,74 @@ graphviz==0.20.3 \ --hash=sha256:09d6bc81e6a9fa392e7ba52135a9d49f1ed62526f96499325930e87ca1b5925d \ --hash=sha256:81f848f2904515d8cd359cc611faba817598d2feaac4027b266aa3eda7b3dde5 # via diagrams -greenlet==3.0.3 \ - --hash=sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67 \ - --hash=sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6 \ - --hash=sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257 \ - --hash=sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4 \ - --hash=sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676 \ - --hash=sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61 \ - --hash=sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc \ - --hash=sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca \ - --hash=sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7 \ - --hash=sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728 \ - --hash=sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305 \ - --hash=sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6 \ - --hash=sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379 \ - --hash=sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414 \ - --hash=sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04 \ - --hash=sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a \ - --hash=sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf \ - --hash=sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491 \ - --hash=sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559 \ - --hash=sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e \ - --hash=sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274 \ - --hash=sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb \ - --hash=sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b \ - --hash=sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9 \ - --hash=sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b \ - --hash=sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be \ - --hash=sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506 \ - --hash=sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405 \ - --hash=sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113 \ - --hash=sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f \ - --hash=sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5 \ - --hash=sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230 \ - --hash=sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d \ - --hash=sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f \ - --hash=sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a \ - --hash=sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e \ - --hash=sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61 \ - --hash=sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6 \ - --hash=sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d \ - --hash=sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71 \ - --hash=sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22 \ - --hash=sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2 \ - --hash=sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3 \ - --hash=sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067 \ - --hash=sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc \ - --hash=sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881 \ - --hash=sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3 \ - --hash=sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e \ - --hash=sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac \ - --hash=sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53 \ - --hash=sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0 \ - --hash=sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b \ - --hash=sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83 \ - --hash=sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41 \ - --hash=sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c \ - --hash=sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf \ - --hash=sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da \ - --hash=sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33 - # via -r requirements/dev.in +greenlet==3.1.0 ; (python_full_version < '3.13' and platform_machine == 'AMD64') or (python_full_version < '3.13' and platform_machine == 'WIN32') or (python_full_version < '3.13' and platform_machine == 'aarch64') or (python_full_version < '3.13' and platform_machine == 'amd64') or (python_full_version < '3.13' and platform_machine == 'ppc64le') or (python_full_version < '3.13' and platform_machine == 'win32') or (python_full_version < '3.13' and platform_machine == 'x86_64') \ + --hash=sha256:01059afb9b178606b4b6e92c3e710ea1635597c3537e44da69f4531e111dd5e9 \ + --hash=sha256:037d9ac99540ace9424cb9ea89f0accfaff4316f149520b4ae293eebc5bded17 \ + --hash=sha256:0e49a65d25d7350cca2da15aac31b6f67a43d867448babf997fe83c7505f57bc \ + --hash=sha256:13ff8c8e54a10472ce3b2a2da007f915175192f18e6495bad50486e87c7f6637 \ + --hash=sha256:1544b8dd090b494c55e60c4ff46e238be44fdc472d2589e943c241e0169bcea2 \ + --hash=sha256:184258372ae9e1e9bddce6f187967f2e08ecd16906557c4320e3ba88a93438c3 \ + --hash=sha256:1ddc7bcedeb47187be74208bc652d63d6b20cb24f4e596bd356092d8000da6d6 \ + --hash=sha256:221169d31cada333a0c7fd087b957c8f431c1dba202c3a58cf5a3583ed973e9b \ + --hash=sha256:243a223c96a4246f8a30ea470c440fe9db1f5e444941ee3c3cd79df119b8eebf \ + --hash=sha256:24fc216ec7c8be9becba8b64a98a78f9cd057fd2dc75ae952ca94ed8a893bf27 \ + --hash=sha256:2651dfb006f391bcb240635079a68a261b227a10a08af6349cba834a2141efa1 \ + --hash=sha256:26811df4dc81271033a7836bc20d12cd30938e6bd2e9437f56fa03da81b0f8fc \ + --hash=sha256:26d9c1c4f1748ccac0bae1dbb465fb1a795a75aba8af8ca871503019f4285e2a \ + --hash=sha256:28fe80a3eb673b2d5cc3b12eea468a5e5f4603c26aa34d88bf61bba82ceb2f9b \ + --hash=sha256:2cd8518eade968bc52262d8c46727cfc0826ff4d552cf0430b8d65aaf50bb91d \ + --hash=sha256:2d004db911ed7b6218ec5c5bfe4cf70ae8aa2223dffbb5b3c69e342bb253cb28 \ + --hash=sha256:3d07c28b85b350564bdff9f51c1c5007dfb2f389385d1bc23288de51134ca303 \ + --hash=sha256:3e7e6ef1737a819819b1163116ad4b48d06cfdd40352d813bb14436024fcda99 \ + --hash=sha256:44151d7b81b9391ed759a2f2865bbe623ef00d648fed59363be2bbbd5154656f \ + --hash=sha256:44cd313629ded43bb3b98737bba2f3e2c2c8679b55ea29ed73daea6b755fe8e7 \ + --hash=sha256:4a3dae7492d16e85ea6045fd11cb8e782b63eac8c8d520c3a92c02ac4573b0a6 \ + --hash=sha256:4b5ea3664eed571779403858d7cd0a9b0ebf50d57d2cdeafc7748e09ef8cd81a \ + --hash=sha256:4c3446937be153718250fe421da548f973124189f18fe4575a0510b5c928f0cc \ + --hash=sha256:5415b9494ff6240b09af06b91a375731febe0090218e2898d2b85f9b92abcda0 \ + --hash=sha256:5fd6e94593f6f9714dbad1aaba734b5ec04593374fa6638df61592055868f8b8 \ + --hash=sha256:619935a44f414274a2c08c9e74611965650b730eb4efe4b2270f91df5e4adf9a \ + --hash=sha256:655b21ffd37a96b1e78cc48bf254f5ea4b5b85efaf9e9e2a526b3c9309d660ca \ + --hash=sha256:665b21e95bc0fce5cab03b2e1d90ba9c66c510f1bb5fdc864f3a377d0f553f6b \ + --hash=sha256:6a4bf607f690f7987ab3291406e012cd8591a4f77aa54f29b890f9c331e84989 \ + --hash=sha256:6cea1cca3be76c9483282dc7760ea1cc08a6ecec1f0b6ca0a94ea0d17432da19 \ + --hash=sha256:713d450cf8e61854de9420fb7eea8ad228df4e27e7d4ed465de98c955d2b3fa6 \ + --hash=sha256:726377bd60081172685c0ff46afbc600d064f01053190e4450857483c4d44484 \ + --hash=sha256:76b3e3976d2a452cba7aa9e453498ac72240d43030fdc6d538a72b87eaff52fd \ + --hash=sha256:76dc19e660baea5c38e949455c1181bc018893f25372d10ffe24b3ed7341fb25 \ + --hash=sha256:76e5064fd8e94c3f74d9fd69b02d99e3cdb8fc286ed49a1f10b256e59d0d3a0b \ + --hash=sha256:7f346d24d74c00b6730440f5eb8ec3fe5774ca8d1c9574e8e57c8671bb51b910 \ + --hash=sha256:81eeec4403a7d7684b5812a8aaa626fa23b7d0848edb3a28d2eb3220daddcbd0 \ + --hash=sha256:90b5bbf05fe3d3ef697103850c2ce3374558f6fe40fd57c9fac1bf14903f50a5 \ + --hash=sha256:9730929375021ec90f6447bff4f7f5508faef1c02f399a1953870cdb78e0c345 \ + --hash=sha256:9eb4a1d7399b9f3c7ac68ae6baa6be5f9195d1d08c9ddc45ad559aa6b556bce6 \ + --hash=sha256:a0409bc18a9f85321399c29baf93545152d74a49d92f2f55302f122007cfda00 \ + --hash=sha256:a22f4e26400f7f48faef2d69c20dc055a1f3043d330923f9abe08ea0aecc44df \ + --hash=sha256:a53dfe8f82b715319e9953330fa5c8708b610d48b5c59f1316337302af5c0811 \ + --hash=sha256:a771dc64fa44ebe58d65768d869fcfb9060169d203446c1d446e844b62bdfdca \ + --hash=sha256:a814dc3100e8a046ff48faeaa909e80cdb358411a3d6dd5293158425c684eda8 \ + --hash=sha256:a8870983af660798dc1b529e1fd6f1cefd94e45135a32e58bd70edd694540f33 \ + --hash=sha256:ac0adfdb3a21dc2a24ed728b61e72440d297d0fd3a577389df566651fcd08f97 \ + --hash=sha256:b395121e9bbe8d02a750886f108d540abe66075e61e22f7353d9acb0b81be0f0 \ + --hash=sha256:b9505a0c8579899057cbefd4ec34d865ab99852baf1ff33a9481eb3924e2da0b \ + --hash=sha256:c0a5b1c22c82831f56f2f7ad9bbe4948879762fe0d59833a4a71f16e5fa0f682 \ + --hash=sha256:c3967dcc1cd2ea61b08b0b276659242cbce5caca39e7cbc02408222fb9e6ff39 \ + --hash=sha256:c6f4c2027689093775fd58ca2388d58789009116844432d920e9147f91acbe64 \ + --hash=sha256:c9d86401550b09a55410f32ceb5fe7efcd998bd2dad9e82521713cb148a4a15f \ + --hash=sha256:cd468ec62257bb4544989402b19d795d2305eccb06cde5da0eb739b63dc04665 \ + --hash=sha256:cfcfb73aed40f550a57ea904629bdaf2e562c68fa1164fa4588e752af6efdc3f \ + --hash=sha256:d0dd943282231480aad5f50f89bdf26690c995e8ff555f26d8a5b9887b559bcc \ + --hash=sha256:d3c59a06c2c28a81a026ff11fbf012081ea34fb9b7052f2ed0366e14896f0a1d \ + --hash=sha256:d45b75b0f3fd8d99f62eb7908cfa6d727b7ed190737dec7fe46d993da550b81a \ + --hash=sha256:d46d5069e2eeda111d6f71970e341f4bd9aeeee92074e649ae263b834286ecc0 \ + --hash=sha256:d58ec349e0c2c0bc6669bf2cd4982d2f93bf067860d23a0ea1fe677b0f0b1e09 \ + --hash=sha256:db1b3ccb93488328c74e97ff888604a8b95ae4f35f4f56677ca57a4fc3a4220b \ + --hash=sha256:dd65695a8df1233309b701dec2539cc4b11e97d4fcc0f4185b4a12ce54db0491 \ + --hash=sha256:f9482c2ed414781c0af0b35d9d575226da6b728bd1a720668fa05837184965b7 \ + --hash=sha256:f9671e7282d8c6fcabc32c0fb8d7c0ea8894ae85cee89c9aadc2d7129e1a9954 \ + --hash=sha256:fad7a051e07f64e297e6e8399b4d6a3bdcad3d7297409e9a06ef8cbccff4f501 \ + --hash=sha256:ffb08f2a1e59d38c7b8b9ac8083c9c8b9875f0955b1e9b9b9a965607a51f8e54 + # via sqlalchemy idna==3.8 \ --hash=sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac \ --hash=sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603 @@ -369,9 +457,9 @@ imagesize==1.4.1 \ --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \ --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a # via sphinx -importlib-metadata==8.4.0 \ - --hash=sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1 \ - --hash=sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5 +importlib-metadata==8.5.0 \ + --hash=sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b \ + --hash=sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7 # via # jupyter-cache # myst-nb @@ -516,9 +604,9 @@ matplotlib-inline==0.1.7 \ # via # ipykernel # ipython -mdit-py-plugins==0.4.1 \ - --hash=sha256:1020dfe4e6bfc2c79fb49ae4e3f5b297f5ccd20f010187acc52af2921e27dc6a \ - --hash=sha256:834b8ac23d1cd60cec703646ffd22ae97b7955a6d596eb1d304be1e251ae499c +mdit-py-plugins==0.4.2 \ + --hash=sha256:0c673c3f889399a33b95e88d2f0d111b4447bdfea7f237dab2d488f459835636 \ + --hash=sha256:5f2cd1fdb606ddf152d37ec30e46101a60512bc0e5fa1a7002c36647b09e26b5 # via myst-parser mdurl==0.1.2 \ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ @@ -597,13 +685,13 @@ parso==0.8.4 \ --hash=sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18 \ --hash=sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d # via jedi -pexpect==4.9.0 \ +pexpect==4.9.0 ; sys_platform != 'emscripten' and sys_platform != 'win32' \ --hash=sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523 \ --hash=sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f # via ipython -platformdirs==4.2.2 \ - --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \ - --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3 +platformdirs==4.3.2 \ + --hash=sha256:9e5e27a08aa095dd127b9f2e764d74254f482fef22b0970773bfba79d091ab8c \ + --hash=sha256:eb1c8582560b34ed4ba105009a4badf7f6f85768b30126f351328507b2beb617 # via jupyter-core pluggy==1.5.0 \ --hash=sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1 \ @@ -632,7 +720,7 @@ psutil==6.0.0 \ --hash=sha256:fc8c9510cde0146432bbdb433322861ee8c3efbf8589865c8bf8d21cb30c4d14 \ --hash=sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0 # via ipykernel -ptyprocess==0.7.0 \ +ptyprocess==0.7.0 ; sys_platform != 'emscripten' and sys_platform != 'win32' \ --hash=sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35 \ --hash=sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220 # via pexpect @@ -650,110 +738,116 @@ pybtex-docutils==1.0.3 \ --hash=sha256:3a7ebdf92b593e00e8c1c538aa9a20bca5d92d84231124715acc964d51d93c6b \ --hash=sha256:8fd290d2ae48e32fcb54d86b0efb8d573198653c7e2447d5bec5847095f430b9 # via sphinxcontrib-bibtex -pydantic==2.8.2 \ - --hash=sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a \ - --hash=sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8 +pycparser==2.22 ; implementation_name == 'pypy' \ + --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ + --hash=sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc + # via + # -c requirements/main.txt + # cffi +pydantic==2.9.1 \ + --hash=sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2 \ + --hash=sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612 # via # -c requirements/main.txt # autodoc-pydantic # documenteer # pydantic-settings -pydantic-core==2.20.1 \ - --hash=sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d \ - --hash=sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f \ - --hash=sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686 \ - --hash=sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482 \ - --hash=sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006 \ - --hash=sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83 \ - --hash=sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6 \ - --hash=sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88 \ - --hash=sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86 \ - --hash=sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a \ - --hash=sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6 \ - --hash=sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a \ - --hash=sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6 \ - --hash=sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6 \ - --hash=sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43 \ - --hash=sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c \ - --hash=sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4 \ - --hash=sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e \ - --hash=sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203 \ - --hash=sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd \ - --hash=sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1 \ - --hash=sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24 \ - --hash=sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc \ - --hash=sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc \ - --hash=sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3 \ - --hash=sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598 \ - --hash=sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98 \ - --hash=sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331 \ - --hash=sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2 \ - --hash=sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a \ - --hash=sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6 \ - --hash=sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688 \ - --hash=sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91 \ - --hash=sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa \ - --hash=sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b \ - --hash=sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0 \ - --hash=sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840 \ - --hash=sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c \ - --hash=sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd \ - --hash=sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3 \ - --hash=sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231 \ - --hash=sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1 \ - --hash=sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953 \ - --hash=sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250 \ - --hash=sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a \ - --hash=sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2 \ - --hash=sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20 \ - --hash=sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434 \ - --hash=sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab \ - --hash=sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703 \ - --hash=sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a \ - --hash=sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2 \ - --hash=sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac \ - --hash=sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611 \ - --hash=sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121 \ - --hash=sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e \ - --hash=sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b \ - --hash=sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09 \ - --hash=sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906 \ - --hash=sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9 \ - --hash=sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7 \ - --hash=sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b \ - --hash=sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987 \ - --hash=sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c \ - --hash=sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b \ - --hash=sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e \ - --hash=sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237 \ - --hash=sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1 \ - --hash=sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19 \ - --hash=sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b \ - --hash=sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad \ - --hash=sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0 \ - --hash=sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94 \ - --hash=sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312 \ - --hash=sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f \ - --hash=sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669 \ - --hash=sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1 \ - --hash=sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe \ - --hash=sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99 \ - --hash=sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a \ - --hash=sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a \ - --hash=sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52 \ - --hash=sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c \ - --hash=sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad \ - --hash=sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1 \ - --hash=sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a \ - --hash=sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f \ - --hash=sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a \ - --hash=sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27 +pydantic-core==2.23.3 \ + --hash=sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801 \ + --hash=sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec \ + --hash=sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295 \ + --hash=sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba \ + --hash=sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e \ + --hash=sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e \ + --hash=sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4 \ + --hash=sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211 \ + --hash=sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea \ + --hash=sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c \ + --hash=sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835 \ + --hash=sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d \ + --hash=sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c \ + --hash=sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c \ + --hash=sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61 \ + --hash=sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83 \ + --hash=sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb \ + --hash=sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1 \ + --hash=sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5 \ + --hash=sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690 \ + --hash=sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b \ + --hash=sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7 \ + --hash=sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70 \ + --hash=sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a \ + --hash=sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8 \ + --hash=sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd \ + --hash=sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee \ + --hash=sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1 \ + --hash=sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab \ + --hash=sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958 \ + --hash=sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5 \ + --hash=sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b \ + --hash=sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961 \ + --hash=sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c \ + --hash=sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25 \ + --hash=sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4 \ + --hash=sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4 \ + --hash=sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f \ + --hash=sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326 \ + --hash=sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab \ + --hash=sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8 \ + --hash=sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b \ + --hash=sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6 \ + --hash=sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8 \ + --hash=sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01 \ + --hash=sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc \ + --hash=sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d \ + --hash=sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e \ + --hash=sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b \ + --hash=sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855 \ + --hash=sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700 \ + --hash=sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a \ + --hash=sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa \ + --hash=sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541 \ + --hash=sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791 \ + --hash=sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162 \ + --hash=sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611 \ + --hash=sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef \ + --hash=sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe \ + --hash=sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5 \ + --hash=sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba \ + --hash=sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28 \ + --hash=sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa \ + --hash=sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27 \ + --hash=sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4 \ + --hash=sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b \ + --hash=sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2 \ + --hash=sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c \ + --hash=sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8 \ + --hash=sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb \ + --hash=sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c \ + --hash=sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e \ + --hash=sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305 \ + --hash=sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8 \ + --hash=sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4 \ + --hash=sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433 \ + --hash=sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45 \ + --hash=sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16 \ + --hash=sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed \ + --hash=sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0 \ + --hash=sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d \ + --hash=sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710 \ + --hash=sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48 \ + --hash=sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423 \ + --hash=sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf \ + --hash=sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9 \ + --hash=sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63 \ + --hash=sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5 \ + --hash=sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb # via # -c requirements/main.txt # pydantic -pydantic-settings==2.4.0 \ - --hash=sha256:bb6849dc067f1687574c12a639e231f3a6feeed0a12d710c1382045c5db1c315 \ - --hash=sha256:ed81c3a0f46392b4d7c0a565c05884e6e54b3456e6f0fe4d8814981172dc9a88 +pydantic-settings==2.5.2 \ + --hash=sha256:2c912e55fd5794a59bf8c832b9de832dcfdf4778d79ff79b708744eed499a907 \ + --hash=sha256:f90b139682bee4d2065273d5185d71d37ea46cfe57e1b5ae184fc6a0b2484ca0 # via autodoc-pydantic pydata-sphinx-theme==0.12.0 \ --hash=sha256:7a07c3ac1fb1cfbb5f7d1e147a9500fb120e329d610e0fa2caac4a645141bdd9 \ @@ -770,9 +864,9 @@ pygments==2.18.0 \ pylatexenc==2.10 \ --hash=sha256:3dd8fd84eb46dc30bee1e23eaab8d8fb5a7f507347b23e5f38ad9675c84f40d3 # via documenteer -pytest==8.3.2 \ - --hash=sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5 \ - --hash=sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce +pytest==8.3.3 \ + --hash=sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181 \ + --hash=sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2 # via # -r requirements/dev.in # pytest-cov @@ -795,6 +889,22 @@ python-dotenv==1.0.1 \ --hash=sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca \ --hash=sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a # via pydantic-settings +pywin32==306 ; platform_python_implementation != 'PyPy' and sys_platform == 'win32' \ + --hash=sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d \ + --hash=sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65 \ + --hash=sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e \ + --hash=sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b \ + --hash=sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4 \ + --hash=sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040 \ + --hash=sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a \ + --hash=sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36 \ + --hash=sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8 \ + --hash=sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e \ + --hash=sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802 \ + --hash=sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a \ + --hash=sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407 \ + --hash=sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0 + # via jupyter-core pyyaml==6.0.2 \ --hash=sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff \ --hash=sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48 \ @@ -1091,9 +1201,9 @@ rpds-py==0.20.0 \ # via # jsonschema # referencing -setuptools==74.1.1 \ - --hash=sha256:2353af060c06388be1cecbf5953dcdb1f38362f87a2356c480b6b4d5fcfc8847 \ - --hash=sha256:fc91b5f89e392ef5b77fe143b17e32f65d3024744fba66dc3afe07201684d766 +setuptools==74.1.2 \ + --hash=sha256:5f4c08aa4d3ebcb57a50c33b1b07e94315d7fc7230f7115e47fc99776c8ce308 \ + --hash=sha256:95b40ed940a1c67eb70fc099094bd6e99c6ee7c23aa2306f4d2697ba7916f9c6 # via documenteer six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ @@ -1140,9 +1250,9 @@ sphinx==8.0.2 \ # sphinxcontrib-youtube # sphinxext-opengraph # sphinxext-rediraffe -sphinx-autodoc-typehints==2.3.0 \ - --hash=sha256:3098e2c6d0ba99eacd013eb06861acc9b51c6e595be86ab05c08ee5506ac0c67 \ - --hash=sha256:535c78ed2d6a1bad393ba9f3dfa2602cf424e2631ee207263e07874c38fde084 +sphinx-autodoc-typehints==2.4.0 \ + --hash=sha256:8f8281654ddf5709875429b7120d367f4edee39a131e13d5806e4f779a81bf0f \ + --hash=sha256:c9774d47e7d304cf975e073df49ebf19763dca94ac0295e7013b522b26cb18de # via documenteer sphinx-automodapi==0.17.0 \ --hash=sha256:4d029cb79eef29413e94ab01bb0177ebd2d5ba86e9789b73575afe9c06ae1501 \ @@ -1225,56 +1335,56 @@ sphinxext-rediraffe==0.2.7 \ --hash=sha256:651dcbfae5ffda9ffd534dfb8025f36120e5efb6ea1a33f5420023862b9f725d \ --hash=sha256:9e430a52d4403847f4ffb3a8dd6dfc34a9fe43525305131f52ed899743a5fd8c # via documenteer -sqlalchemy==2.0.33 \ - --hash=sha256:06504d9625e3ef114b39803ebca6f379133acad58a87c33117ddc5df66079915 \ - --hash=sha256:06b30bbc43c6dd8b7cdc509cd2e58f4f1dce867565642e1d1a65e32459c89bd0 \ - --hash=sha256:0ea64443a86c3b5a0fd7c93363ad2f9465cb3af61f9920b7c75d1a7bebbeef8a \ - --hash=sha256:1109cc6dc5c9d1223c42186391e6a5509e6d4ab2c30fa629573c10184f742f2e \ - --hash=sha256:17d0c69f66392ad2db1609373a74d1f834b2e632f3f52d446747b8ec220aea53 \ - --hash=sha256:1d81e3aeab456fe24c3f0dcfd4f952a3a5ee45e9c14fc66d34c1d7a60cf7b698 \ - --hash=sha256:2415824ec658891ac38d13a2f36b4ceb2033f034dee1c226f83917589a65f072 \ - --hash=sha256:28c0800c851955f5bd11c0b904638c1343002650d0c071c6fbf0d157cc78627d \ - --hash=sha256:2b1e98507ec2aa200af980d592e936e9dac1c1ec50acc94330ae4b13c55d6fea \ - --hash=sha256:30a3f55be76364b64c83788728faaba782ab282a24909e1994404c2146d39982 \ - --hash=sha256:31e56020832be602201fbf8189f379569cf5c3604cdc4ce79f10dbbfcbf8a0eb \ - --hash=sha256:32a4f38d2efca066ec793451ef6852cb0d9086dc3d5479d88a5a25529d1d1861 \ - --hash=sha256:3ad94634338d8c576b1d47a96c798be186650aa5282072053ce2d12c6f309f82 \ - --hash=sha256:3c64d58e83a68e228b1ae6ebac8721241e9d8cc5e0c0dd11ed5d89155477b243 \ - --hash=sha256:454e9b4355f0051063daebc4060140251c19f33fc5d02151c347431860fd104b \ - --hash=sha256:459099ab8dd43a5edbb99f58ba4730baec457df9c06ebc71434c6b4b78cc8cf9 \ - --hash=sha256:49541a43828e273325c520fbacf786615bd974dad63ff60b8ea1e1216e914d1a \ - --hash=sha256:4f1c44c8d66101e6f627f330d8b5b3de5ad25eedb6df3ce39a2e6f92debbcf15 \ - --hash=sha256:523ae689c023cbf0fe1613101254824515193f85f806ba04611dee83302660b5 \ - --hash=sha256:570ec43e8c3c020abac4f0720baa5fe5187334e3f1e8e1777183c041962b61cc \ - --hash=sha256:60c54b677d4f0a0b2df3b79e89e84d601fb931c720176641742efd66b50601f9 \ - --hash=sha256:61e9a2d68a5a8ca6a84cbc79aa7f2e430ae854d3351d6e9ceb3edf6798797b63 \ - --hash=sha256:63b7d9890f7958dabd95cf98a3f48740fbe2bb0493523aef590e82164fa68194 \ - --hash=sha256:67eb8e0ffbebd3d82ec5079ca5f807a661c574b482785483717857c2acab833a \ - --hash=sha256:684aee5fd811091b2f48006fb3fe6c7f2de4a716ef8d294a2aab762099753133 \ - --hash=sha256:751eaafa907a66dd9a328a9d15c3dcfdcba3ef8dd8f7f4a9771cdacdec45d9bf \ - --hash=sha256:77eaf8fdf305266b806a91ae4633edbf86ad37e13bd92ac85e305e7f654c19a5 \ - --hash=sha256:7fd0a28bc24a75326f13735a58272247f65c9e8ee16205eacb2431d6ee94f44a \ - --hash=sha256:816c927dd51e4951d6e79870c945340057a5d8e63543419dee0d247bd67a88f8 \ - --hash=sha256:81759e77a4985abdbac068762a0eaf0f11860fe041ad6da170aae7615ea72531 \ - --hash=sha256:82c72da5be489c8d150deba70d5732398695418df5232bceb52ee323ddd9753b \ - --hash=sha256:8bef11d31a1c48f5943e577d1ef81085ec1550c37552bfc9bf8e5d184ce47142 \ - --hash=sha256:91c93333c2b37ff721dc83b37e28c29de4c502b5612f2d093468037b86aa2be0 \ - --hash=sha256:92249ac94279b8e5f0c0c8420e09b804d0a49d2269f52f549d4cb536c8382434 \ - --hash=sha256:93efa4b72f7cb70555b0f66ee5e113ae40073c57054a72887e50b05bfd97baa4 \ - --hash=sha256:9d035a672d5b3e4793a4a8865c3274a7bbbac7fac67a47b415023b5539105087 \ - --hash=sha256:9e5819822050e6e36e2aa41260d05074c026a1bbb9baa6869170b5ce64db7a4d \ - --hash=sha256:a3926e4ed4a3e956c8b2b0f1140493378c8cd17cad123b4fc1e0f6ecd3e05b19 \ - --hash=sha256:a3da2371628e28ef279f3f756f5e58858fad7820de08508138c9f5f9e4d8f4ac \ - --hash=sha256:ac252bafe8cbadfac7b1e8a74748ffd775e27325186d12b82600b652d9adcb86 \ - --hash=sha256:ae294808afde1b14a1a69aa86a69cadfe391848bbb233a5332a8065e4081cabc \ - --hash=sha256:c40e0213beaf410a151e4329e30c73687838c251c998ba1b312975dbbcb2d05d \ - --hash=sha256:c5d5a733c6af7f392435e673d1b136f6bdf2366033abb35eed680400dc730840 \ - --hash=sha256:c633e2d2f8a7b88c06e276bbe16cb7e62fed815fcbeb69cd9752cea166ecb8e8 \ - --hash=sha256:c9f4f92eee7d06531cc6a5b814e603a0c7639876aab03638dcc70c420a3974f6 \ - --hash=sha256:ca8788dc1baee100f09110f33a01d928cf9df4483d2bfb25a37be31a659d46bb \ - --hash=sha256:d004a623ad4aa8d2eb31b37e65b5e020c9f65a1852b8b9e6301f0e411aca5b9a \ - --hash=sha256:ee2b82b170591ccd19d463c9798a9caeea0cad967a8d2f3264de459f582696d5 \ - --hash=sha256:f7c82a7930126bb5ccfbb73fc1562d52942fbffb2fda2791fab49de249fc202a +sqlalchemy==2.0.34 \ + --hash=sha256:10d8f36990dd929690666679b0f42235c159a7051534adb135728ee52828dd22 \ + --hash=sha256:13be2cc683b76977a700948411a94c67ad8faf542fa7da2a4b167f2244781cf3 \ + --hash=sha256:165bbe0b376541092bf49542bd9827b048357f4623486096fc9aaa6d4e7c59a2 \ + --hash=sha256:173f5f122d2e1bff8fbd9f7811b7942bead1f5e9f371cdf9e670b327e6703ebd \ + --hash=sha256:196958cde924a00488e3e83ff917be3b73cd4ed8352bbc0f2989333176d1c54d \ + --hash=sha256:203d46bddeaa7982f9c3cc693e5bc93db476ab5de9d4b4640d5c99ff219bee8c \ + --hash=sha256:220574e78ad986aea8e81ac68821e47ea9202b7e44f251b7ed8c66d9ae3f4278 \ + --hash=sha256:243f92596f4fd4c8bd30ab8e8dd5965afe226363d75cab2468f2c707f64cd83b \ + --hash=sha256:24af3dc43568f3780b7e1e57c49b41d98b2d940c1fd2e62d65d3928b6f95f021 \ + --hash=sha256:25691f4adfb9d5e796fd48bf1432272f95f4bbe5f89c475a788f31232ea6afba \ + --hash=sha256:2e6965346fc1491a566e019a4a1d3dfc081ce7ac1a736536367ca305da6472a8 \ + --hash=sha256:3166dfff2d16fe9be3241ee60ece6fcb01cf8e74dd7c5e0b64f8e19fab44911b \ + --hash=sha256:413c85cd0177c23e32dee6898c67a5f49296640041d98fddb2c40888fe4daa2e \ + --hash=sha256:430093fce0efc7941d911d34f75a70084f12f6ca5c15d19595c18753edb7c33b \ + --hash=sha256:43f28005141165edd11fbbf1541c920bd29e167b8bbc1fb410d4fe2269c1667a \ + --hash=sha256:526ce723265643dbc4c7efb54f56648cc30e7abe20f387d763364b3ce7506c82 \ + --hash=sha256:53e68b091492c8ed2bd0141e00ad3089bcc6bf0e6ec4142ad6505b4afe64163e \ + --hash=sha256:5bc08e75ed11693ecb648b7a0a4ed80da6d10845e44be0c98c03f2f880b68ff4 \ + --hash=sha256:6831a78bbd3c40f909b3e5233f87341f12d0b34a58f14115c9e94b4cdaf726d3 \ + --hash=sha256:6a1e03db964e9d32f112bae36f0cc1dcd1988d096cfd75d6a588a3c3def9ab2b \ + --hash=sha256:6daeb8382d0df526372abd9cb795c992e18eed25ef2c43afe518c73f8cccb721 \ + --hash=sha256:6e7cde3a2221aa89247944cafb1b26616380e30c63e37ed19ff0bba5e968688d \ + --hash=sha256:707c8f44931a4facd4149b52b75b80544a8d824162602b8cd2fe788207307f9a \ + --hash=sha256:7286c353ee6475613d8beff83167374006c6b3e3f0e6491bfe8ca610eb1dec0f \ + --hash=sha256:79cb400c360c7c210097b147c16a9e4c14688a6402445ac848f296ade6283bbc \ + --hash=sha256:7cee4c6917857fd6121ed84f56d1dc78eb1d0e87f845ab5a568aba73e78adf83 \ + --hash=sha256:80bd73ea335203b125cf1d8e50fef06be709619eb6ab9e7b891ea34b5baa2287 \ + --hash=sha256:895184dfef8708e15f7516bd930bda7e50ead069280d2ce09ba11781b630a434 \ + --hash=sha256:8fddde2368e777ea2a4891a3fb4341e910a056be0bb15303bf1b92f073b80c02 \ + --hash=sha256:95d0b2cf8791ab5fb9e3aa3d9a79a0d5d51f55b6357eecf532a120ba3b5524db \ + --hash=sha256:9661268415f450c95f72f0ac1217cc6f10256f860eed85c2ae32e75b60278ad8 \ + --hash=sha256:97b850f73f8abbffb66ccbab6e55a195a0eb655e5dc74624d15cff4bfb35bd74 \ + --hash=sha256:9ea54f7300553af0a2a7235e9b85f4204e1fc21848f917a3213b0e0818de9a24 \ + --hash=sha256:9ebc11c54c6ecdd07bb4efbfa1554538982f5432dfb8456958b6d46b9f834bb7 \ + --hash=sha256:a17d8fac6df9835d8e2b4c5523666e7051d0897a93756518a1fe101c7f47f2f0 \ + --hash=sha256:ae92bebca3b1e6bd203494e5ef919a60fb6dfe4d9a47ed2453211d3bd451b9f5 \ + --hash=sha256:b68094b165a9e930aedef90725a8fcfafe9ef95370cbb54abc0464062dbf808f \ + --hash=sha256:b75b00083e7fe6621ce13cfce9d4469c4774e55e8e9d38c305b37f13cf1e874c \ + --hash=sha256:bcd18441a49499bf5528deaa9dee1f5c01ca491fc2791b13604e8f972877f812 \ + --hash=sha256:bd90c221ed4e60ac9d476db967f436cfcecbd4ef744537c0f2d5291439848768 \ + --hash=sha256:c29d03e0adf3cc1a8c3ec62d176824972ae29b67a66cbb18daff3062acc6faa8 \ + --hash=sha256:c3330415cd387d2b88600e8e26b510d0370db9b7eaf984354a43e19c40df2e2b \ + --hash=sha256:c7db3db284a0edaebe87f8f6642c2b2c27ed85c3e70064b84d1c9e4ec06d5d84 \ + --hash=sha256:ce119fc4ce0d64124d37f66a6f2a584fddc3c5001755f8a49f1ca0a177ef9796 \ + --hash=sha256:dbcdf987f3aceef9763b6d7b1fd3e4ee210ddd26cac421d78b3c206d07b2700b \ + --hash=sha256:e54ef33ea80d464c3dcfe881eb00ad5921b60f8115ea1a30d781653edc2fd6a2 \ + --hash=sha256:e60ed6ef0a35c6b76b7640fe452d0e47acc832ccbb8475de549a5cc5f90c2c06 \ + --hash=sha256:fb1b30f31a36c7f3fee848391ff77eebdd3af5750bf95fbf9b8b5323edfdb4ec \ + --hash=sha256:fbb034f565ecbe6c530dff948239377ba859420d146d5f62f0271407ffb8c580 # via jupyter-cache stack-data==0.6.3 \ --hash=sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9 \ diff --git a/requirements/main.in b/requirements/main.in deleted file mode 100644 index 8847952ab1..0000000000 --- a/requirements/main.in +++ /dev/null @@ -1,22 +0,0 @@ -# Editable runtime dependencies (equivalent to project.dependencies). -# Add direct runtime dependencies here, as well as implicit dependencies -# with constrained versions. These should be sufficient to run the phalanx -# command-line tool. -# -# After editing, update requirements/main.txt by running: -# make update-deps - -bcrypt -click -cryptography -GitPython -hvac -jinja2 -onepasswordconnectsdk -pydantic>2 -PyYAML -safir>5 - -# Uncomment this, change the branch, comment out safir above, and run make -# update-deps-no-hashes to test against an unreleased version of Safir. -# safir @ git+https://github.com/lsst-sqre/safir@main diff --git a/requirements/main.txt b/requirements/main.txt index 67830cad11..2bbd0d4a85 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -1,5 +1,5 @@ # This file was autogenerated by uv via the following command: -# uv pip compile --generate-hashes --output-file requirements/main.txt requirements/main.in +# uv pip compile --universal --generate-hashes --output-file requirements/main.txt pyproject.toml annotated-types==0.7.0 \ --hash=sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53 \ --hash=sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89 @@ -38,7 +38,7 @@ bcrypt==4.2.0 \ --hash=sha256:cb2a8ec2bc07d3553ccebf0746bbf3d19426d1c6d1adbd4fa48925f66af7b9e8 \ --hash=sha256:cf69eaf5185fd58f268f805b505ce31f9b9fc2d64b376642164e9244540c1221 \ --hash=sha256:f4f4acf526fcd1c34e7ce851147deedd4e26e6402369304220250598b26448db - # via -r requirements/main.in + # via phalanx (pyproject.toml) certifi==2024.8.30 \ --hash=sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8 \ --hash=sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9 @@ -46,74 +46,74 @@ certifi==2024.8.30 \ # httpcore # httpx # requests -cffi==1.17.0 \ - --hash=sha256:011aff3524d578a9412c8b3cfaa50f2c0bd78e03eb7af7aa5e0df59b158efb2f \ - --hash=sha256:0a048d4f6630113e54bb4b77e315e1ba32a5a31512c31a273807d0027a7e69ab \ - --hash=sha256:0bb15e7acf8ab35ca8b24b90af52c8b391690ef5c4aec3d31f38f0d37d2cc499 \ - --hash=sha256:0d46ee4764b88b91f16661a8befc6bfb24806d885e27436fdc292ed7e6f6d058 \ - --hash=sha256:0e60821d312f99d3e1569202518dddf10ae547e799d75aef3bca3a2d9e8ee693 \ - --hash=sha256:0fdacad9e0d9fc23e519efd5ea24a70348305e8d7d85ecbb1a5fa66dc834e7fb \ - --hash=sha256:14b9cbc8f7ac98a739558eb86fabc283d4d564dafed50216e7f7ee62d0d25377 \ - --hash=sha256:17c6d6d3260c7f2d94f657e6872591fe8733872a86ed1345bda872cfc8c74885 \ - --hash=sha256:1a2ddbac59dc3716bc79f27906c010406155031a1c801410f1bafff17ea304d2 \ - --hash=sha256:2404f3de742f47cb62d023f0ba7c5a916c9c653d5b368cc966382ae4e57da401 \ - --hash=sha256:24658baf6224d8f280e827f0a50c46ad819ec8ba380a42448e24459daf809cf4 \ - --hash=sha256:24aa705a5f5bd3a8bcfa4d123f03413de5d86e497435693b638cbffb7d5d8a1b \ - --hash=sha256:2770bb0d5e3cc0e31e7318db06efcbcdb7b31bcb1a70086d3177692a02256f59 \ - --hash=sha256:331ad15c39c9fe9186ceaf87203a9ecf5ae0ba2538c9e898e3a6967e8ad3db6f \ - --hash=sha256:3aa9d43b02a0c681f0bfbc12d476d47b2b2b6a3f9287f11ee42989a268a1833c \ - --hash=sha256:41f4915e09218744d8bae14759f983e466ab69b178de38066f7579892ff2a555 \ - --hash=sha256:4304d4416ff032ed50ad6bb87416d802e67139e31c0bde4628f36a47a3164bfa \ - --hash=sha256:435a22d00ec7d7ea533db494da8581b05977f9c37338c80bc86314bec2619424 \ - --hash=sha256:45f7cd36186db767d803b1473b3c659d57a23b5fa491ad83c6d40f2af58e4dbb \ - --hash=sha256:48b389b1fd5144603d61d752afd7167dfd205973a43151ae5045b35793232aa2 \ - --hash=sha256:4e67d26532bfd8b7f7c05d5a766d6f437b362c1bf203a3a5ce3593a645e870b8 \ - --hash=sha256:516a405f174fd3b88829eabfe4bb296ac602d6a0f68e0d64d5ac9456194a5b7e \ - --hash=sha256:5ba5c243f4004c750836f81606a9fcb7841f8874ad8f3bf204ff5e56332b72b9 \ - --hash=sha256:5bdc0f1f610d067c70aa3737ed06e2726fd9d6f7bfee4a351f4c40b6831f4e82 \ - --hash=sha256:6107e445faf057c118d5050560695e46d272e5301feffda3c41849641222a828 \ - --hash=sha256:6327b572f5770293fc062a7ec04160e89741e8552bf1c358d1a23eba68166759 \ - --hash=sha256:669b29a9eca6146465cc574659058ed949748f0809a2582d1f1a324eb91054dc \ - --hash=sha256:6ce01337d23884b21c03869d2f68c5523d43174d4fc405490eb0091057943118 \ - --hash=sha256:6d872186c1617d143969defeadac5a904e6e374183e07977eedef9c07c8953bf \ - --hash=sha256:6f76a90c345796c01d85e6332e81cab6d70de83b829cf1d9762d0a3da59c7932 \ - --hash=sha256:70d2aa9fb00cf52034feac4b913181a6e10356019b18ef89bc7c12a283bf5f5a \ - --hash=sha256:7cbc78dc018596315d4e7841c8c3a7ae31cc4d638c9b627f87d52e8abaaf2d29 \ - --hash=sha256:856bf0924d24e7f93b8aee12a3a1095c34085600aa805693fb7f5d1962393206 \ - --hash=sha256:8a98748ed1a1df4ee1d6f927e151ed6c1a09d5ec21684de879c7ea6aa96f58f2 \ - --hash=sha256:93a7350f6706b31f457c1457d3a3259ff9071a66f312ae64dc024f049055f72c \ - --hash=sha256:964823b2fc77b55355999ade496c54dde161c621cb1f6eac61dc30ed1b63cd4c \ - --hash=sha256:a003ac9edc22d99ae1286b0875c460351f4e101f8c9d9d2576e78d7e048f64e0 \ - --hash=sha256:a0ce71725cacc9ebf839630772b07eeec220cbb5f03be1399e0457a1464f8e1a \ - --hash=sha256:a47eef975d2b8b721775a0fa286f50eab535b9d56c70a6e62842134cf7841195 \ - --hash=sha256:a8b5b9712783415695663bd463990e2f00c6750562e6ad1d28e072a611c5f2a6 \ - --hash=sha256:a9015f5b8af1bb6837a3fcb0cdf3b874fe3385ff6274e8b7925d81ccaec3c5c9 \ - --hash=sha256:aec510255ce690d240f7cb23d7114f6b351c733a74c279a84def763660a2c3bc \ - --hash=sha256:b00e7bcd71caa0282cbe3c90966f738e2db91e64092a877c3ff7f19a1628fdcb \ - --hash=sha256:b50aaac7d05c2c26dfd50c3321199f019ba76bb650e346a6ef3616306eed67b0 \ - --hash=sha256:b7b6ea9e36d32582cda3465f54c4b454f62f23cb083ebc7a94e2ca6ef011c3a7 \ - --hash=sha256:bb9333f58fc3a2296fb1d54576138d4cf5d496a2cc118422bd77835e6ae0b9cb \ - --hash=sha256:c1c13185b90bbd3f8b5963cd8ce7ad4ff441924c31e23c975cb150e27c2bf67a \ - --hash=sha256:c3b8bd3133cd50f6b637bb4322822c94c5ce4bf0d724ed5ae70afce62187c492 \ - --hash=sha256:c5d97162c196ce54af6700949ddf9409e9833ef1003b4741c2b39ef46f1d9720 \ - --hash=sha256:c815270206f983309915a6844fe994b2fa47e5d05c4c4cef267c3b30e34dbe42 \ - --hash=sha256:cab2eba3830bf4f6d91e2d6718e0e1c14a2f5ad1af68a89d24ace0c6b17cced7 \ - --hash=sha256:d1df34588123fcc88c872f5acb6f74ae59e9d182a2707097f9e28275ec26a12d \ - --hash=sha256:d6bdcd415ba87846fd317bee0774e412e8792832e7805938987e4ede1d13046d \ - --hash=sha256:db9a30ec064129d605d0f1aedc93e00894b9334ec74ba9c6bdd08147434b33eb \ - --hash=sha256:dbc183e7bef690c9abe5ea67b7b60fdbca81aa8da43468287dae7b5c046107d4 \ - --hash=sha256:dca802c8db0720ce1c49cce1149ff7b06e91ba15fa84b1d59144fef1a1bc7ac2 \ - --hash=sha256:dec6b307ce928e8e112a6bb9921a1cb00a0e14979bf28b98e084a4b8a742bd9b \ - --hash=sha256:df8bb0010fdd0a743b7542589223a2816bdde4d94bb5ad67884348fa2c1c67e8 \ - --hash=sha256:e4094c7b464cf0a858e75cd14b03509e84789abf7b79f8537e6a72152109c76e \ - --hash=sha256:e4760a68cab57bfaa628938e9c2971137e05ce48e762a9cb53b76c9b569f1204 \ - --hash=sha256:eb09b82377233b902d4c3fbeeb7ad731cdab579c6c6fda1f763cd779139e47c3 \ - --hash=sha256:eb862356ee9391dc5a0b3cbc00f416b48c1b9a52d252d898e5b7696a5f9fe150 \ - --hash=sha256:ef9528915df81b8f4c7612b19b8628214c65c9b7f74db2e34a646a0a2a0da2d4 \ - --hash=sha256:f3157624b7558b914cb039fd1af735e5e8049a87c817cc215109ad1c8779df76 \ - --hash=sha256:f3e0992f23bbb0be00a921eae5363329253c3b86287db27092461c887b791e5e \ - --hash=sha256:f9338cc05451f1942d0d8203ec2c346c830f8e86469903d5126c1f0a13a2bcbb \ - --hash=sha256:ffef8fd58a36fb5f1196919638f73dd3ae0db1a878982b27a9a5a176ede4ba91 +cffi==1.17.1 ; platform_python_implementation != 'PyPy' \ + --hash=sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8 \ + --hash=sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2 \ + --hash=sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1 \ + --hash=sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15 \ + --hash=sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36 \ + --hash=sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824 \ + --hash=sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8 \ + --hash=sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36 \ + --hash=sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17 \ + --hash=sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf \ + --hash=sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc \ + --hash=sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3 \ + --hash=sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed \ + --hash=sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702 \ + --hash=sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1 \ + --hash=sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8 \ + --hash=sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903 \ + --hash=sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6 \ + --hash=sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d \ + --hash=sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b \ + --hash=sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e \ + --hash=sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be \ + --hash=sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c \ + --hash=sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683 \ + --hash=sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9 \ + --hash=sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c \ + --hash=sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8 \ + --hash=sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1 \ + --hash=sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4 \ + --hash=sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655 \ + --hash=sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67 \ + --hash=sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595 \ + --hash=sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0 \ + --hash=sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65 \ + --hash=sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41 \ + --hash=sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6 \ + --hash=sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401 \ + --hash=sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6 \ + --hash=sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3 \ + --hash=sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16 \ + --hash=sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93 \ + --hash=sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e \ + --hash=sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4 \ + --hash=sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964 \ + --hash=sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c \ + --hash=sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576 \ + --hash=sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0 \ + --hash=sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3 \ + --hash=sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662 \ + --hash=sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3 \ + --hash=sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff \ + --hash=sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5 \ + --hash=sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd \ + --hash=sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f \ + --hash=sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5 \ + --hash=sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14 \ + --hash=sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d \ + --hash=sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9 \ + --hash=sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7 \ + --hash=sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382 \ + --hash=sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a \ + --hash=sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e \ + --hash=sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a \ + --hash=sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4 \ + --hash=sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99 \ + --hash=sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87 \ + --hash=sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b # via cryptography charset-normalizer==3.3.2 \ --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \ @@ -211,8 +211,12 @@ click==8.1.7 \ --hash=sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28 \ --hash=sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de # via - # -r requirements/main.in + # phalanx (pyproject.toml) # safir +colorama==0.4.6 ; platform_system == 'Windows' \ + --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \ + --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 + # via click cryptography==43.0.1 \ --hash=sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494 \ --hash=sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806 \ @@ -242,12 +246,12 @@ cryptography==43.0.1 \ --hash=sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a \ --hash=sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289 # via - # -r requirements/main.in + # phalanx (pyproject.toml) # pyjwt # safir -fastapi==0.112.2 \ - --hash=sha256:3d4729c038414d5193840706907a41839d839523da6ed0c2811f1168cac1798c \ - --hash=sha256:db84b470bd0e2b1075942231e90e3577e12a903c4dc8696f0d206a7904a7af1c +fastapi==0.114.1 \ + --hash=sha256:1d7bbbeabbaae0acb0c22f0ab0b040f642d3093ca3645f8c876b6f91391861d8 \ + --hash=sha256:5d4746f6e4b7dff0b4f6b6c6d5445645285f662fe75886e99af7ee2d6b58bb3e # via safir gidgethub==5.3.0 \ --hash=sha256:4dd92f2252d12756b13f9dd15cde322bfb0d625b6fb5d680da1567ec74b462c0 \ @@ -260,7 +264,7 @@ gitdb==4.0.11 \ gitpython==3.1.43 \ --hash=sha256:35f314a9f878467f5453cc1fee295c3e18e52f1b99f10f6cf5b1682e968a9e7c \ --hash=sha256:eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff - # via -r requirements/main.in + # via phalanx (pyproject.toml) h11==0.14.0 \ --hash=sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d \ --hash=sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761 @@ -278,7 +282,7 @@ httpx==0.23.3 \ hvac==2.3.0 \ --hash=sha256:1b85e3320e8642dd82f234db63253cda169a817589e823713dc5fca83119b1e2 \ --hash=sha256:a3afc5710760b6ee9b3571769df87a0333da45da05a5f9f963e1d3925a84be7d - # via -r requirements/main.in + # via phalanx (pyproject.toml) idna==3.8 \ --hash=sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac \ --hash=sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603 @@ -289,7 +293,7 @@ idna==3.8 \ jinja2==3.1.4 \ --hash=sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369 \ --hash=sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d - # via -r requirements/main.in + # via phalanx (pyproject.toml) markupsafe==2.1.5 \ --hash=sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf \ --hash=sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff \ @@ -355,108 +359,108 @@ markupsafe==2.1.5 \ onepasswordconnectsdk==1.5.1 \ --hash=sha256:8924c614ffed98f29faada03dba940dc0bc47851b1f5f4ef7e312e43c10ec25b \ --hash=sha256:f8e033dbb5dcc5ff08fbdbbfe329655adce6ec44cfe54652474d7e31175de48e - # via -r requirements/main.in -pycparser==2.22 \ + # via phalanx (pyproject.toml) +pycparser==2.22 ; platform_python_implementation != 'PyPy' \ --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ --hash=sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc # via cffi -pydantic==2.8.2 \ - --hash=sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a \ - --hash=sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8 +pydantic==2.9.1 \ + --hash=sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2 \ + --hash=sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612 # via - # -r requirements/main.in + # phalanx (pyproject.toml) # fastapi # safir -pydantic-core==2.20.1 \ - --hash=sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d \ - --hash=sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f \ - --hash=sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686 \ - --hash=sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482 \ - --hash=sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006 \ - --hash=sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83 \ - --hash=sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6 \ - --hash=sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88 \ - --hash=sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86 \ - --hash=sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a \ - --hash=sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6 \ - --hash=sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a \ - --hash=sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6 \ - --hash=sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6 \ - --hash=sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43 \ - --hash=sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c \ - --hash=sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4 \ - --hash=sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e \ - --hash=sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203 \ - --hash=sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd \ - --hash=sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1 \ - --hash=sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24 \ - --hash=sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc \ - --hash=sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc \ - --hash=sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3 \ - --hash=sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598 \ - --hash=sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98 \ - --hash=sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331 \ - --hash=sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2 \ - --hash=sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a \ - --hash=sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6 \ - --hash=sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688 \ - --hash=sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91 \ - --hash=sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa \ - --hash=sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b \ - --hash=sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0 \ - --hash=sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840 \ - --hash=sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c \ - --hash=sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd \ - --hash=sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3 \ - --hash=sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231 \ - --hash=sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1 \ - --hash=sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953 \ - --hash=sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250 \ - --hash=sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a \ - --hash=sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2 \ - --hash=sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20 \ - --hash=sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434 \ - --hash=sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab \ - --hash=sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703 \ - --hash=sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a \ - --hash=sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2 \ - --hash=sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac \ - --hash=sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611 \ - --hash=sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121 \ - --hash=sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e \ - --hash=sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b \ - --hash=sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09 \ - --hash=sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906 \ - --hash=sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9 \ - --hash=sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7 \ - --hash=sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b \ - --hash=sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987 \ - --hash=sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c \ - --hash=sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b \ - --hash=sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e \ - --hash=sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237 \ - --hash=sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1 \ - --hash=sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19 \ - --hash=sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b \ - --hash=sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad \ - --hash=sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0 \ - --hash=sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94 \ - --hash=sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312 \ - --hash=sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f \ - --hash=sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669 \ - --hash=sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1 \ - --hash=sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe \ - --hash=sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99 \ - --hash=sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a \ - --hash=sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a \ - --hash=sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52 \ - --hash=sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c \ - --hash=sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad \ - --hash=sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1 \ - --hash=sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a \ - --hash=sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f \ - --hash=sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a \ - --hash=sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27 +pydantic-core==2.23.3 \ + --hash=sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801 \ + --hash=sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec \ + --hash=sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295 \ + --hash=sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba \ + --hash=sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e \ + --hash=sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e \ + --hash=sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4 \ + --hash=sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211 \ + --hash=sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea \ + --hash=sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c \ + --hash=sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835 \ + --hash=sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d \ + --hash=sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c \ + --hash=sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c \ + --hash=sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61 \ + --hash=sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83 \ + --hash=sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb \ + --hash=sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1 \ + --hash=sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5 \ + --hash=sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690 \ + --hash=sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b \ + --hash=sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7 \ + --hash=sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70 \ + --hash=sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a \ + --hash=sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8 \ + --hash=sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd \ + --hash=sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee \ + --hash=sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1 \ + --hash=sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab \ + --hash=sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958 \ + --hash=sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5 \ + --hash=sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b \ + --hash=sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961 \ + --hash=sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c \ + --hash=sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25 \ + --hash=sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4 \ + --hash=sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4 \ + --hash=sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f \ + --hash=sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326 \ + --hash=sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab \ + --hash=sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8 \ + --hash=sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b \ + --hash=sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6 \ + --hash=sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8 \ + --hash=sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01 \ + --hash=sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc \ + --hash=sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d \ + --hash=sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e \ + --hash=sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b \ + --hash=sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855 \ + --hash=sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700 \ + --hash=sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a \ + --hash=sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa \ + --hash=sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541 \ + --hash=sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791 \ + --hash=sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162 \ + --hash=sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611 \ + --hash=sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef \ + --hash=sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe \ + --hash=sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5 \ + --hash=sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba \ + --hash=sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28 \ + --hash=sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa \ + --hash=sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27 \ + --hash=sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4 \ + --hash=sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b \ + --hash=sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2 \ + --hash=sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c \ + --hash=sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8 \ + --hash=sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb \ + --hash=sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c \ + --hash=sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e \ + --hash=sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305 \ + --hash=sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8 \ + --hash=sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4 \ + --hash=sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433 \ + --hash=sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45 \ + --hash=sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16 \ + --hash=sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed \ + --hash=sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0 \ + --hash=sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d \ + --hash=sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710 \ + --hash=sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48 \ + --hash=sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423 \ + --hash=sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf \ + --hash=sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9 \ + --hash=sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63 \ + --hash=sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5 \ + --hash=sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb # via # pydantic # safir @@ -522,7 +526,7 @@ pyyaml==6.0.2 \ --hash=sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba \ --hash=sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12 \ --hash=sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4 - # via -r requirements/main.in + # via phalanx (pyproject.toml) requests==2.32.3 \ --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ --hash=sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6 @@ -534,7 +538,7 @@ rfc3986==1.5.0 \ safir==6.3.0 \ --hash=sha256:2fcd64bf37dd42eacedd6378341b2487cd06dbaf1f28403301b8d80f60a4fb56 \ --hash=sha256:6ad7dad520d87d853628849ef95a348c55dbd0180ad3f15c1cf2f7f8fe32f915 - # via -r requirements/main.in + # via phalanx (pyproject.toml) safir-logging==6.3.0 \ --hash=sha256:491dfe85de89a3f2daa29c491a22a0551f0961444490418d91ec50c040ae16eb \ --hash=sha256:e14754ab0bba6cfa248c3fc4cb5ca28410d97ff3965e831eab6581ed37485e79 @@ -554,9 +558,9 @@ sniffio==1.3.1 \ # anyio # httpcore # httpx -starlette==0.38.4 \ - --hash=sha256:526f53a77f0e43b85f583438aee1a940fd84f8fd610353e8b0c1a77ad8a87e76 \ - --hash=sha256:53a7439060304a208fea17ed407e998f46da5e5d9b1addfea3040094512a6379 +starlette==0.38.5 \ + --hash=sha256:04a92830a9b6eb1442c766199d62260c3d4dc9c4f9188360626b1e0273cb7077 \ + --hash=sha256:632f420a9d13e3ee2a6f18f437b0a9f1faecb0bc42e1942aa2ea0e379a4c4206 # via # fastapi # safir diff --git a/requirements/tox.txt b/requirements/tox.txt index 5acb5ec80e..5a2eb7afd2 100644 --- a/requirements/tox.txt +++ b/requirements/tox.txt @@ -1,5 +1,5 @@ # This file was autogenerated by uv via the following command: -# uv pip compile --generate-hashes --output-file requirements/tox.txt requirements/tox.in +# uv pip compile --universal --generate-hashes --output-file requirements/tox.txt requirements/tox.in cachetools==5.5.0 \ --hash=sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292 \ --hash=sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a @@ -11,14 +11,17 @@ chardet==5.2.0 \ colorama==0.4.6 \ --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \ --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 - # via tox + # via + # -c requirements/dev.txt + # -c requirements/main.txt + # tox distlib==0.3.8 \ --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \ --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64 # via virtualenv -filelock==3.15.4 \ - --hash=sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb \ - --hash=sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7 +filelock==3.16.0 \ + --hash=sha256:81de9eb8453c769b63369f87f11131a7ab04e367f8d97ad39dc230daa07e3bec \ + --hash=sha256:f6ed4c963184f4c84dd5557ce8fece759a3724b37b80c6c4f20a2f63a4dc6609 # via # tox # virtualenv @@ -30,9 +33,9 @@ packaging==24.1 \ # pyproject-api # tox # tox-uv -platformdirs==4.2.2 \ - --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \ - --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3 +platformdirs==4.3.2 \ + --hash=sha256:9e5e27a08aa095dd127b9f2e764d74254f482fef22b0970773bfba79d091ab8c \ + --hash=sha256:eb1c8582560b34ed4ba105009a4badf7f6f85768b30126f351328507b2beb617 # via # -c requirements/dev.txt # tox @@ -47,37 +50,37 @@ pyproject-api==1.7.1 \ --hash=sha256:2dc1654062c2b27733d8fd4cdda672b22fe8741ef1dde8e3a998a9547b071eeb \ --hash=sha256:7ebc6cd10710f89f4cf2a2731710a98abce37ebff19427116ff2174c9236a827 # via tox -tox==4.18.0 \ - --hash=sha256:0a457400cf70615dc0627eb70d293e80cd95d8ce174bb40ac011011f0c03a249 \ - --hash=sha256:5dfa1cab9f146becd6e351333a82f9e0ade374451630ba65ee54584624c27b58 +tox==4.18.1 \ + --hash=sha256:35d472032ee1f73fe20c3e0e73d7073a4e85075c86ff02c576f9fc7c6a15a578 \ + --hash=sha256:3c0c96bc3a568a5c7e66387a4cfcf8c875b52e09f4d47c9f7a277ec82f1a0b11 # via # -r requirements/tox.in # tox-uv -tox-uv==1.11.2 \ - --hash=sha256:7f8f1737b3277e1cddcb5b89fcc5931d04923562c940ae60f29e140908566df2 \ - --hash=sha256:a7aded5c3fb69f055b523357988c1055bb573e91bfd7ecfb9b5233ebcab5d10b +tox-uv==1.11.3 \ + --hash=sha256:316f559ae5525edec12791d9e1f393e405ded5b7e7d50fbaee4726676951f49a \ + --hash=sha256:d434787406ff2854600c1ceaa555519080026208cf7f65bb5d4b2d7c9c4776de # via -r requirements/tox.in -uv==0.4.4 \ - --hash=sha256:051589ab42bfdb2997ea61a56a78a2bab0b6ae7d014f96a578dcc5f9d8766757 \ - --hash=sha256:0c9ada2fbfe3ca29c50914acd714fe35100ab56fdb83510d1aadd00d55191d1b \ - --hash=sha256:0d0af47198dc4ca635540b72c933219c6c967885788fd1f651112f168fcade0a \ - --hash=sha256:0d51db6bf89b7b0a4aae229f7efee00fc52a1d7391605f3b789996f9c7986653 \ - --hash=sha256:14f06ed0e0f163c9ec8b26f4fc2df14530080e405d7348ad0c59f9c296c55918 \ - --hash=sha256:3e9139f171329b6fa40a064f9e7923848d44e60bc31da138758695ec34d8200d \ - --hash=sha256:433c69a6e7f35c865172d69e51bf78521a9d87eac6f8772af04667f5d25cc9a9 \ - --hash=sha256:718bbdf0675eab8d15f52041725b60743a9496fde3dc493d34913aa4a15f0a81 \ - --hash=sha256:8ba084d6d5baf92a3cfe41a20fd912dea4e2ea3eca8401f1892394c5c2b79c92 \ - --hash=sha256:918d4da22f9ff4403dc72dfb4c58c994400a64797a3a17f00f5c0d3717d1cb8c \ - --hash=sha256:9ba6abad0a531181bcb90b9af818e2490211f2d4b3eb83eb2a27df1f07f299fb \ - --hash=sha256:c1b7db1db176e46184c974ed30687671ec5d67cfcce34c7ed4a63141ecb6c70e \ - --hash=sha256:d2e2c9db83efd81b0b8dcaa45533b461b058d5aec49a6ed6cc98832e56e45856 \ - --hash=sha256:d66242bba1bbec847b77fcdc7d3191eab733189213a5d2717dbda1ff04e24b46 \ - --hash=sha256:da3a77ad858be5239ae33509ddfeaf097d7bda77fc0b2a42994cbec32cef4769 \ - --hash=sha256:dc881ea11dcb443940bbac5d7601cd7c74f80e7086c2e310e86ebf10d1c8816b \ - --hash=sha256:dd94e5be00a0a06ab5cbba7014720440a12bae73150d8146bc3535f3a22ff069 \ - --hash=sha256:f866f9a44982ef8041a982c3197a17e18d4a8ac7717b4462477ea0ca6a088a52 +uv==0.4.9 \ + --hash=sha256:0340d2c7bf9afe0098e3301c1885de10e317232cfa346f0ac16374cee284a4cb \ + --hash=sha256:060af185481ef46ab97008cad330f3cd7a7aa1ce3d219b67d27c5a2a551ac2ea \ + --hash=sha256:1a8acc7abb2174bd3c8f5fc98345f2bb602f31b7558e37f3d23bef99ddd58dec \ + --hash=sha256:34bce9f4892130b01a7605d27bbeb71395e9b031d793123c250b79187ee307ca \ + --hash=sha256:45bf0cead2436b1977f71669e945db19990ca70a7765111fb951545815467bb6 \ + --hash=sha256:52101bc8652b4284b78fac52ed7878f3bae414bc4076c377735962666b309dde \ + --hash=sha256:5422680436f4cebef945bb2e562e01c02a4fa0a95f85d1b8010f2ee868a0b8c1 \ + --hash=sha256:55cf2522262ef663114bda5d80375ddc7f7af0d054df89426372a0d494380875 \ + --hash=sha256:566d4d7a475aacd21dbb4aba053cd4f4f52d65acdef2c83c59bcdff08756701e \ + --hash=sha256:5b66a52cb60a2882a882bc5f13afa6daf3172a54fe9fb998529d19418d5aed18 \ + --hash=sha256:630a6fe215829f734278e618c1633c2bb88ee03dc6a92ae9890fabd98ee810a9 \ + --hash=sha256:69529b6bf5de6ec8fbe8e022f5bcbaef778e76136fc37fae6ec7a8b18b3f9024 \ + --hash=sha256:71e87038fcc9f61b2d6f66c4a92354c6d0abe4baae21bb90241693f161ddeaa1 \ + --hash=sha256:8869637ea6231f66fe643be22f9334874db3496844b3d8bfd8efd4227ded3d44 \ + --hash=sha256:9c9b70f016f28cc05633b564d8690cfdb7ebac4d2210d9158819947841e00347 \ + --hash=sha256:b54a9022e9e1fdbf3ae15ef340a0d1d1847dd739df5023896aa8d97d88af1efe \ + --hash=sha256:bf834f7f360a192372d879eda86f6a1dd94195faf68154dcf7c90247098d2bb2 \ + --hash=sha256:f50cbdfbc8399e1211c580e47f42650a184541ee398af95ad29bf9a2e977baba # via tox-uv -virtualenv==20.26.3 \ - --hash=sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a \ - --hash=sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589 +virtualenv==20.26.4 \ + --hash=sha256:48f2695d9809277003f30776d155615ffc11328e6a0a8c1f0ec80188d7874a55 \ + --hash=sha256:c17f4e0f3e6036e9f26700446f85c76ab11df65ff6d8a9cbfad9f71aabfcf23c # via tox From d9e60d1312a554a2b10766eac971e5f99f861d80 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 11 Sep 2024 14:48:25 -0700 Subject: [PATCH 048/193] Update secrets.yaml schema The new version of Pydantic generates a slightly different schema for secrets.yaml. --- docs/extras/schemas/secrets.json | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/docs/extras/schemas/secrets.json b/docs/extras/schemas/secrets.json index 415c78af8a..638d86b804 100644 --- a/docs/extras/schemas/secrets.json +++ b/docs/extras/schemas/secrets.json @@ -60,11 +60,7 @@ "title": "Condition" }, "onepassword": { - "allOf": [ - { - "$ref": "#/$defs/SecretOnepasswordConfig" - } - ], + "$ref": "#/$defs/SecretOnepasswordConfig", "description": "Configuration for how the secret is stored in 1Password", "title": "1Password configuration" }, From 72ad6b0700b9389fee94ef2a92082bd4a24b35c2 Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Tue, 10 Sep 2024 17:58:22 -0300 Subject: [PATCH 049/193] exposurelog: add butler secrets --- applications/exposurelog/secrets-usdfdev.yaml | 12 ++++++++++++ applications/exposurelog/templates/deployment.yaml | 6 ++++++ 2 files changed, 18 insertions(+) create mode 100644 applications/exposurelog/secrets-usdfdev.yaml diff --git a/applications/exposurelog/secrets-usdfdev.yaml b/applications/exposurelog/secrets-usdfdev.yaml new file mode 100644 index 0000000000..317e9c5aab --- /dev/null +++ b/applications/exposurelog/secrets-usdfdev.yaml @@ -0,0 +1,12 @@ +"aws-credentials.ini": + description: >- + S3 Butler credentials in AWS format. + copy: + application: nublado + key: "aws-credentials.ini" +"postgres-credentials.txt": + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. + copy: + application: nublado + key: "postgres-credentials.txt" diff --git a/applications/exposurelog/templates/deployment.yaml b/applications/exposurelog/templates/deployment.yaml index 775d9a7635..8f7e1f0ca0 100644 --- a/applications/exposurelog/templates/deployment.yaml +++ b/applications/exposurelog/templates/deployment.yaml @@ -86,6 +86,8 @@ spec: - name: volume2 mountPath: /volume_2 {{- end }} + - name: aws-secrets + mountPath: /var/secrets/aws - name: tmp mountPath: /tmp volumes: @@ -110,6 +112,10 @@ spec: readOnly: true server: {{ .Values.config.nfs_server_3 }} {{- end }} + - name: aws-secrets + secret: + defaultMode: 420 + secretName: exposurelog - name: tmp emptyDir: {} {{- with .Values.nodeSelector }} From f39451012fc55066316743458f6e09cc9ee12369 Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Wed, 11 Sep 2024 16:06:41 -0300 Subject: [PATCH 050/193] exposurelog: add init container for secrets permission fix --- .../exposurelog/templates/deployment.yaml | 23 ++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/applications/exposurelog/templates/deployment.yaml b/applications/exposurelog/templates/deployment.yaml index 8f7e1f0ca0..312033545d 100644 --- a/applications/exposurelog/templates/deployment.yaml +++ b/applications/exposurelog/templates/deployment.yaml @@ -86,10 +86,25 @@ spec: - name: volume2 mountPath: /volume_2 {{- end }} - - name: aws-secrets - mountPath: /var/secrets/aws + - name: user-secrets + mountPath: /var/secrets/butler - name: tmp mountPath: /tmp + initContainers: + - name: secret-perm-fixer + image: busybox + command: + - "/bin/sh" + - "-c" + - | + cp /secrets/* /etc/secrets && \ + chown 1000:1000 /etc/secrets/* && \ + chmod 0400 /etc/secrets/* + volumeMounts: + - name: butler-secrets + mountPath: /secrets + - name: user-secrets + mountPath: /etc/secrets volumes: {{- if .Values.config.nfs_path_1 }} - name: volume1 @@ -112,10 +127,12 @@ spec: readOnly: true server: {{ .Values.config.nfs_server_3 }} {{- end }} - - name: aws-secrets + - name: butler-secrets secret: defaultMode: 420 secretName: exposurelog + - name: user-secrets + emptyDir: {} - name: tmp emptyDir: {} {{- with .Values.nodeSelector }} From 137807b3b9e04ad487d99682dc30886798285097 Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Wed, 11 Sep 2024 16:14:22 -0300 Subject: [PATCH 051/193] exposurelog: add environment variable to point to `aws-credentials.ini` file --- applications/exposurelog/templates/deployment.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/applications/exposurelog/templates/deployment.yaml b/applications/exposurelog/templates/deployment.yaml index 312033545d..714ccb5136 100644 --- a/applications/exposurelog/templates/deployment.yaml +++ b/applications/exposurelog/templates/deployment.yaml @@ -77,6 +77,8 @@ spec: value: {{ .Values.db.database | quote }} - name: SITE_ID value: {{ .Values.config.site_id | quote }} + - name: AWS_SHARED_CREDENTIALS_FILE + value: /var/secrets/butler/aws-credentials.ini volumeMounts: {{- if .Values.config.nfs_path_1 }} - name: volume1 From 7d3a98f0297e3dc3c8c9b253fab2a92c0b449587 Mon Sep 17 00:00:00 2001 From: Stelios Voutsinas Date: Thu, 12 Sep 2024 11:50:26 -0700 Subject: [PATCH 052/193] Upgrade version of ssotap to 1.18.6 --- charts/cadc-tap/README.md | 2 +- charts/cadc-tap/values.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/cadc-tap/README.md b/charts/cadc-tap/README.md index 3e13b7fc98..f0ab35a1aa 100644 --- a/charts/cadc-tap/README.md +++ b/charts/cadc-tap/README.md @@ -31,7 +31,7 @@ IVOA TAP service | config.pg.host | string | None, must be set if backend is `pg` | Host to connect to | | config.pg.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP image | | config.pg.image.repository | string | `"ghcr.io/lsst-sqre/tap-postgres-service"` | TAP image to use | -| config.pg.image.tag | string | `"1.18.5"` | Tag of TAP image to use | +| config.pg.image.tag | string | `"1.18.6"` | Tag of TAP image to use | | config.pg.username | string | None, must be set if backend is `pg` | Username to connect with | | config.qserv.host | string | `"mock-db:3306"` (the mock QServ) | QServ hostname:port to connect to | | config.qserv.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP image | diff --git a/charts/cadc-tap/values.yaml b/charts/cadc-tap/values.yaml index 317a89e879..d61fdf37af 100644 --- a/charts/cadc-tap/values.yaml +++ b/charts/cadc-tap/values.yaml @@ -71,7 +71,7 @@ config: pullPolicy: "IfNotPresent" # -- Tag of TAP image to use - tag: "1.18.5" + tag: "1.18.6" qserv: # -- QServ hostname:port to connect to From 35fd94bb00e14cc8f09d46cc08db933d0949505e Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Fri, 6 Sep 2024 13:46:36 -0400 Subject: [PATCH 053/193] Deploy Times Square 0.13.0 See https://github.com/lsst-sqre/times-square/pull/81 https://github.com/lsst-sqre/times-square/releases/tag/0.13.0 --- applications/times-square/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/times-square/Chart.yaml b/applications/times-square/Chart.yaml index daa934699b..5a48fbb4f9 100644 --- a/applications/times-square/Chart.yaml +++ b/applications/times-square/Chart.yaml @@ -8,7 +8,7 @@ sources: type: application # The default version tag of the times-square docker image -appVersion: "0.12.0" +appVersion: "0.13.0" dependencies: - name: redis From ce778b0ea064abc47f7117bee0e9f8e181547e7d Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Mon, 9 Sep 2024 16:13:51 -0400 Subject: [PATCH 054/193] Deploy Noteburst 0.13.0 https://github.com/lsst-sqre/noteburst/releases/tag/0.13.0 --- applications/noteburst/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/noteburst/Chart.yaml b/applications/noteburst/Chart.yaml index dbb9d3de95..bcac46aef7 100644 --- a/applications/noteburst/Chart.yaml +++ b/applications/noteburst/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: noteburst version: 1.0.0 -appVersion: "0.12.1" +appVersion: "0.13.0" description: Noteburst is a notebook execution service for the Rubin Science Platform. type: application home: https://noteburst.lsst.io/ From b333b95d9645093e963b7914fcf97cee36db34ed Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Wed, 11 Sep 2024 18:24:14 -0400 Subject: [PATCH 055/193] Add timeout configurations for Times Square This adds TS_CHECK_RUN_TIMEOUT and TS_DEFAULT_EXECUTION_TIMEOUT environment variables. --- applications/times-square/README.md | 2 ++ applications/times-square/templates/configmap.yaml | 2 ++ applications/times-square/values.yaml | 6 ++++++ 3 files changed, 10 insertions(+) diff --git a/applications/times-square/README.md b/applications/times-square/README.md index c690816d26..e1c49b6ed0 100644 --- a/applications/times-square/README.md +++ b/applications/times-square/README.md @@ -23,8 +23,10 @@ An API service for managing and rendering parameterized Jupyter notebooks. | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a Cloud SQL PostgreSQL instance | | cloudsql.serviceAccount | string | `""` | The Google service account that has an IAM binding to the `times-square` Kubernetes service accounts and has the `cloudsql.client` role | | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | +| config.defaultExecutionTimeout | string | `"60"` | Default execution timeout for notebooks in seconds | | config.enableGitHubApp | string | `"False"` | Toggle to enable the GitHub App functionality | | config.githubAppId | string | `""` | GitHub application ID | +| config.githubCheckRunTimeout | string | `"600"` | Timeout for GitHub check runs in seconds | | config.githubOrgs | string | `"lsst,lsst-sqre,lsst-dm,lsst-ts,lsst-sitcom,lsst-pst"` | GitHub organizations that can sync repos to Times Square (comma-separated). | | config.logLevel | string | `"INFO"` | Logging level: "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" | | config.name | string | `"times-square"` | Name of the service. | diff --git a/applications/times-square/templates/configmap.yaml b/applications/times-square/templates/configmap.yaml index 739914e85b..d11584634f 100644 --- a/applications/times-square/templates/configmap.yaml +++ b/applications/times-square/templates/configmap.yaml @@ -16,3 +16,5 @@ data: TS_ENABLE_GITHUB_APP: {{ .Values.config.enableGitHubApp | quote }} TS_GITHUB_APP_ID: {{ .Values.config.githubAppId | quote }} TS_GITHUB_ORGS: {{ .Values.config.githubOrgs | quote }} + TS_CHECK_RUN_TIMEOUT: {{ .Values.config.githubCheckRunTimeout | quote }} + TS_DEFAULT_EXECUTION_TIMEOUT: {{ .Values.config.defaultExecutionTimeout | quote }} diff --git a/applications/times-square/values.yaml b/applications/times-square/values.yaml index ac482b06df..595bb9677b 100644 --- a/applications/times-square/values.yaml +++ b/applications/times-square/values.yaml @@ -136,6 +136,12 @@ config: # -- GitHub organizations that can sync repos to Times Square (comma-separated). githubOrgs: "lsst,lsst-sqre,lsst-dm,lsst-ts,lsst-sitcom,lsst-pst" + # -- Timeout for GitHub check runs in seconds + githubCheckRunTimeout: "600" # 10 minutes + + # -- Default execution timeout for notebooks in seconds + defaultExecutionTimeout: "60" # 1 minute + worker: # -- Enable liveness checks for the arq queue enableLivenessCheck: true From ada629f17fdd345701b3e3fba2d2dc791fa06c8d Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 12 Sep 2024 12:55:34 -0400 Subject: [PATCH 056/193] Increase default Times Square timeouts Make the defaults large enough to handle some of the heavier-weight Times Square notebooks like https://usdf-rsp-dev.slac.stanford.edu/times-square/github/lsst/schedview_notebooks/nightly/scheduler-nightsum that currently take 200 seconds to execute. On data-dev I've kept the shorter timeout for testing. --- applications/times-square/README.md | 4 ++-- applications/times-square/values-idfdev.yaml | 2 ++ applications/times-square/values.yaml | 4 ++-- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/applications/times-square/README.md b/applications/times-square/README.md index e1c49b6ed0..7385571ed7 100644 --- a/applications/times-square/README.md +++ b/applications/times-square/README.md @@ -23,10 +23,10 @@ An API service for managing and rendering parameterized Jupyter notebooks. | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a Cloud SQL PostgreSQL instance | | cloudsql.serviceAccount | string | `""` | The Google service account that has an IAM binding to the `times-square` Kubernetes service accounts and has the `cloudsql.client` role | | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | -| config.defaultExecutionTimeout | string | `"60"` | Default execution timeout for notebooks in seconds | +| config.defaultExecutionTimeout | string | `"300"` | Default execution timeout for notebooks in seconds | | config.enableGitHubApp | string | `"False"` | Toggle to enable the GitHub App functionality | | config.githubAppId | string | `""` | GitHub application ID | -| config.githubCheckRunTimeout | string | `"600"` | Timeout for GitHub check runs in seconds | +| config.githubCheckRunTimeout | string | `"900"` | Timeout for GitHub check runs in seconds | | config.githubOrgs | string | `"lsst,lsst-sqre,lsst-dm,lsst-ts,lsst-sitcom,lsst-pst"` | GitHub organizations that can sync repos to Times Square (comma-separated). | | config.logLevel | string | `"INFO"` | Logging level: "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" | | config.name | string | `"times-square"` | Name of the service. | diff --git a/applications/times-square/values-idfdev.yaml b/applications/times-square/values-idfdev.yaml index de7c4d6e60..9adb89ef9b 100644 --- a/applications/times-square/values-idfdev.yaml +++ b/applications/times-square/values-idfdev.yaml @@ -7,6 +7,8 @@ config: databaseUrl: "postgresql://times-square@localhost/times-square" githubAppId: "196798" enableGitHubApp: "True" + githubCheckRunTimeout: "600" # 10 minutes + defaultExecutionTimeout: "60" # 1 minute cloudsql: enabled: true instanceConnectionName: "science-platform-dev-7696:us-central1:science-platform-dev-e9e11de2" diff --git a/applications/times-square/values.yaml b/applications/times-square/values.yaml index 595bb9677b..1a26a01356 100644 --- a/applications/times-square/values.yaml +++ b/applications/times-square/values.yaml @@ -137,10 +137,10 @@ config: githubOrgs: "lsst,lsst-sqre,lsst-dm,lsst-ts,lsst-sitcom,lsst-pst" # -- Timeout for GitHub check runs in seconds - githubCheckRunTimeout: "600" # 10 minutes + githubCheckRunTimeout: "900" # 15 minutes # -- Default execution timeout for notebooks in seconds - defaultExecutionTimeout: "60" # 1 minute + defaultExecutionTimeout: "300" # 5 minutes worker: # -- Enable liveness checks for the arq queue From 3d2bd2520a881cfbde8bb9246f5ac98cf5c0e279 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 12 Sep 2024 12:58:50 -0400 Subject: [PATCH 057/193] Change noteburst maxConcurrentJobs to 1 This seems better because the JupyterLab notebook execution endpoint is synchronous, so we may need to also run the noteburst jobs synchronously. --- applications/noteburst/README.md | 2 +- applications/noteburst/values.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/noteburst/README.md b/applications/noteburst/README.md index 5db1416c00..0f195c8d15 100644 --- a/applications/noteburst/README.md +++ b/applications/noteburst/README.md @@ -25,7 +25,7 @@ Noteburst is a notebook execution service for the Rubin Science Platform. | config.worker.imageSelector | string | `"recommended"` | Nublado image stream to select: "recommended", "weekly" or "reference" | | config.worker.jobTimeout | int | `300` | The default notebook execution timeout, in seconds. | | config.worker.keepAlive | string | `"normal"` | Worker keep alive mode: "normal", "fast", "disabled" | -| config.worker.maxConcurrentJobs | int | `3` | Max number of concurrent notebook executions per worker | +| config.worker.maxConcurrentJobs | int | `1` | Max number of concurrent notebook executions per worker | | config.worker.tokenLifetime | string | `"2419200"` | Worker token lifetime, in seconds. | | config.worker.tokenScopes | string | `"exec:notebook,read:image,read:tap,read:alertdb"` | Nublado2 worker account's token scopes as a comma-separated list. | | config.worker.workerCount | int | `1` | Number of workers to run | diff --git a/applications/noteburst/values.yaml b/applications/noteburst/values.yaml index d136cbe1f9..0ec9a6f705 100644 --- a/applications/noteburst/values.yaml +++ b/applications/noteburst/values.yaml @@ -123,7 +123,7 @@ config: jobTimeout: 300 # -- Max number of concurrent notebook executions per worker - maxConcurrentJobs: 3 + maxConcurrentJobs: 1 # -- Worker token lifetime, in seconds. tokenLifetime: "2419200" From 8246fdbb0a2f8d124d48dd5da295dde17b43e11a Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 3 Sep 2024 14:00:07 -0700 Subject: [PATCH 058/193] Enable Cruise Control --- applications/sasquatch/values-summit.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index 5c82488f2a..e036c9e140 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -51,6 +51,8 @@ strimzi-kafka: nginx.ingress.kubernetes.io/rewrite-target: /$2 hostname: summit-lsp.lsst.codes path: /schema-registry(/|$)(.*) + cruiseControl: + enabled: true influxdb: persistence: From 7db8b42fbfe48892830e0780b9f3819c4d1d6a50 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 3 Sep 2024 14:16:14 -0700 Subject: [PATCH 059/193] Create new KafkaNodePool resource for local storage --- applications/sasquatch/values-summit.yaml | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index e036c9e140..13390710a1 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -53,7 +53,24 @@ strimzi-kafka: path: /schema-registry(/|$)(.*) cruiseControl: enabled: true - + brokerStorage: + enabled: false + storageClassName: localdrive + size: 15Ti + migration: + enabled: true + rebalance: false + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - yagan17 + - yagan18 + - yagan19 influxdb: persistence: storageClass: rook-ceph-block From 7d49867c5804fb2fa98553557565fcddb4100ee6 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 3 Sep 2024 14:38:45 -0700 Subject: [PATCH 060/193] Add resources requests and limits configuration --- applications/sasquatch/values-summit.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index 13390710a1..f58607ae3e 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -18,6 +18,13 @@ strimzi-kafka: - broker: 2 loadBalancerIP: "139.229.180.5" host: sasquatch-summit-kafka-2.lsst.codes + resources: + requests: + memory: 32Gi + cpu: 4 + limits: + memory: 32Gi + cpu: 4 kraft: enabled: true kafkaController: From c0e54c7840b728f86dde8a67641342122534f725 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 4 Sep 2024 09:14:02 -0700 Subject: [PATCH 061/193] Allocate IPs from the LHN pool - Use the metallb.universe.tf/address-pool: lhn annotation to allocate IPs from the LHN pool, that's required to replicate data to USDF - Pin IP addresses after assignment --- applications/sasquatch/values-summit.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index f58607ae3e..11ec819265 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -18,6 +18,21 @@ strimzi-kafka: - broker: 2 loadBalancerIP: "139.229.180.5" host: sasquatch-summit-kafka-2.lsst.codes + - broker: 6 + loadBalancerIP: "139.229.180.8" + host: sasquatch-summit-kafka-6.lsst.codes + annotations: + metallb.universe.tf/address-pool: lhn + - broker: 7 + loadBalancerIP: "139.229.180.9" + host: sasquatch-summit-kafka-7.lsst.codes + annotations: + metallb.universe.tf/address-pool: lhn + - broker: 8 + loadBalancerIP: "139.229.180.10" + host: sasquatch-summit-kafka-8.lsst.codes + annotations: + metallb.universe.tf/address-pool: lhn resources: requests: memory: 32Gi From 6f6e85df3dcf4cdde7cea1375343e9cb223615c5 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 4 Sep 2024 09:19:11 -0700 Subject: [PATCH 062/193] Rollback to Kafka version 3.7.1 temporarily --- applications/sasquatch/README.md | 2 +- applications/sasquatch/charts/strimzi-kafka/README.md | 2 +- applications/sasquatch/charts/strimzi-kafka/values.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 2e8e8fca70..243ea90458 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -365,7 +365,7 @@ Rubin Observatory's telemetry service | strimzi-kafka.kafka.storage.size | string | `"500Gi"` | Size of the backing storage disk for each of the Kafka brokers | | strimzi-kafka.kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes | | strimzi-kafka.kafka.tolerations | list | `[]` | Tolerations for Kafka broker pod assignment | -| strimzi-kafka.kafka.version | string | `"3.8.0"` | Version of Kafka to deploy | +| strimzi-kafka.kafka.version | string | `"3.7.1"` | Version of Kafka to deploy | | strimzi-kafka.kafkaController.enabled | bool | `false` | Enable Kafka Controller | | strimzi-kafka.kafkaController.resources | object | See `values.yaml` | Kubernetes requests and limits for the Kafka Controller | | strimzi-kafka.kafkaController.storage.size | string | `"20Gi"` | Size of the backing storage disk for each of the Kafka controllers | diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index fd425d5279..4e844c02a3 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -41,7 +41,7 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | kafka.storage.size | string | `"500Gi"` | Size of the backing storage disk for each of the Kafka brokers | | kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes | | kafka.tolerations | list | `[]` | Tolerations for Kafka broker pod assignment | -| kafka.version | string | `"3.8.0"` | Version of Kafka to deploy | +| kafka.version | string | `"3.7.1"` | Version of Kafka to deploy | | kafkaController.enabled | bool | `false` | Enable Kafka Controller | | kafkaController.resources | object | See `values.yaml` | Kubernetes requests and limits for the Kafka Controller | | kafkaController.storage.size | string | `"20Gi"` | Size of the backing storage disk for each of the Kafka controllers | diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index fa0deaa57b..f43fd60e4c 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -11,7 +11,7 @@ cluster: kafka: # -- Version of Kafka to deploy - version: "3.8.0" + version: "3.7.1" # -- Number of Kafka broker replicas to run replicas: 3 From 428c65365b9b7dd0b522773fa94fda7fad9c5b5a Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 11 Sep 2024 09:00:31 -0700 Subject: [PATCH 063/193] Start Kafka cluster rebalancing --- applications/sasquatch/values-summit.yaml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index 11ec819265..ca003f3c8a 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -81,7 +81,11 @@ strimzi-kafka: size: 15Ti migration: enabled: true - rebalance: false + rebalance: true + brokers: + - 0 + - 1 + - 2 affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: From 285ce6c87182c303cb57a4f247d756659084d6f2 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Thu, 12 Sep 2024 17:01:22 -0700 Subject: [PATCH 064/193] Complete migration --- applications/sasquatch/values-summit.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index ca003f3c8a..7c4a45f4d6 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -76,12 +76,12 @@ strimzi-kafka: cruiseControl: enabled: true brokerStorage: - enabled: false + enabled: true storageClassName: localdrive size: 15Ti migration: - enabled: true - rebalance: true + enabled: false + rebalance: false brokers: - 0 - 1 From 7d3c94c40e9d76be6a28282af4af9dc4b3de7d72 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Thu, 12 Sep 2024 17:16:45 -0700 Subject: [PATCH 065/193] Remove old brokers - Remove reference to the old broker ids --- applications/sasquatch/values-summit.yaml | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index 7c4a45f4d6..2c377045ff 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -9,15 +9,6 @@ strimzi-kafka: loadBalancerIP: "139.229.180.2" host: sasquatch-summit-kafka-bootstrap.lsst.codes brokers: - - broker: 0 - loadBalancerIP: "139.229.180.3" - host: sasquatch-summit-kafka-0.lsst.codes - - broker: 1 - loadBalancerIP: "139.229.180.4" - host: sasquatch-summit-kafka-1.lsst.codes - - broker: 2 - loadBalancerIP: "139.229.180.5" - host: sasquatch-summit-kafka-2.lsst.codes - broker: 6 loadBalancerIP: "139.229.180.8" host: sasquatch-summit-kafka-6.lsst.codes @@ -82,10 +73,6 @@ strimzi-kafka: migration: enabled: false rebalance: false - brokers: - - 0 - - 1 - - 2 affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: From c352d3fe346cb30d77ce2bb0680892518112ce0e Mon Sep 17 00:00:00 2001 From: Erin Howard Date: Thu, 12 Sep 2024 17:16:56 -0700 Subject: [PATCH 066/193] Update LATISS Prompt Processing to 4.5.0. --- .../values-usdfprod-prompt-processing.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index a379a6d328..77d8ba6207 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -14,7 +14,7 @@ prompt-proto-service: image: pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. - tag: 4.4.0 + tag: 4.5.0 instrument: pipelines: From 58fbf928f06d051eaa1d464d7b258c616fbb5939 Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Fri, 13 Sep 2024 10:56:15 -0300 Subject: [PATCH 067/193] rubintv: update app version for summit and usdf production deployments --- applications/rubintv/values-summit.yaml | 2 +- applications/rubintv/values-usdfprod.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/rubintv/values-summit.yaml b/applications/rubintv/values-summit.yaml index 62ef88610c..aa74c4dffe 100644 --- a/applications/rubintv/values-summit.yaml +++ b/applications/rubintv/values-summit.yaml @@ -20,7 +20,7 @@ rubintv: - name: DDV_CLIENT_WS_ADDRESS value: "rubintv/ws/ddv" image: - tag: v2.2.0 + tag: v2.3.0 pullPolicy: Always workers: diff --git a/applications/rubintv/values-usdfprod.yaml b/applications/rubintv/values-usdfprod.yaml index 2499da043c..7349f935f4 100644 --- a/applications/rubintv/values-usdfprod.yaml +++ b/applications/rubintv/values-usdfprod.yaml @@ -16,7 +16,7 @@ rubintv: - name: DDV_CLIENT_WS_ADDRESS value: "rubintv/ws/ddv" image: - tag: v2.2.0 + tag: v2.3.0 pullPolicy: Always workers: From 0f24f951a46fb8cc1ef66947a4ed86d528a816ae Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Fri, 13 Sep 2024 11:04:24 -0300 Subject: [PATCH 068/193] rubintv: add worker pods for summit production deployment --- applications/rubintv/values-summit.yaml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/applications/rubintv/values-summit.yaml b/applications/rubintv/values-summit.yaml index aa74c4dffe..c1f2cb88ef 100644 --- a/applications/rubintv/values-summit.yaml +++ b/applications/rubintv/values-summit.yaml @@ -24,7 +24,7 @@ rubintv: pullPolicy: Always workers: - replicas: 0 + replicas: 1 image: repository: ts-dockerhub.lsst.org/rapid-analysis tag: c0037 @@ -32,7 +32,7 @@ rubintv: uid: 73006 gid: 73006 scriptsLocation: /repos/rubintv_analysis_service/scripts - script: rubintv_worker.py -a rubintv-dev -p 8080 -l summit + script: rubintv_worker.py -a rubintv -p 8080 -l summit env: - name: S3_ENDPOINT_URL value: *s3E @@ -43,9 +43,12 @@ rubintv: - name: DEPLOY_BRANCH value: *dbE resources: + requests: + cpu: 0.5 + memory: 1G limits: - cpu: 2.0 - memory: "8Gi" + cpu: 1.0 + memory: 2.5G global: tsVaultSecretsPath: "" From ba5ea835630f54087dee48e2d315b62027a2a8d1 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 13 Sep 2024 13:31:42 -0700 Subject: [PATCH 069/193] Summit: Update nublado to Cycle 39. --- applications/nublado/values-summit.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/nublado/values-summit.yaml b/applications/nublado/values-summit.yaml index 3fddbdcecc..6bede5f34d 100644 --- a/applications/nublado/values-summit.yaml +++ b/applications/nublado/values-summit.yaml @@ -8,8 +8,8 @@ controller: numReleases: 0 numWeeklies: 3 numDailies: 2 - cycle: 38 - recommendedTag: "recommended_c0038" + cycle: 39 + recommendedTag: "recommended_c0039" lab: extraAnnotations: k8s.v1.cni.cncf.io/networks: "kube-system/dds" From 9810bf8c08db0684900662879183bcde7fefe931 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 10 Sep 2024 14:11:50 -0700 Subject: [PATCH 070/193] Remove the interval setting - The kafka consumer input plugin is a service input plugin. Normal plugins gather metrics determined by the interval setting. Service plugins start a service waits for metrics or events to occur. --- applications/sasquatch/README.md | 2 -- .../sasquatch/charts/telegraf-kafka-consumer/README.md | 1 - .../charts/telegraf-kafka-consumer/templates/configmap.yaml | 2 -- .../sasquatch/charts/telegraf-kafka-consumer/values.yaml | 4 ---- 4 files changed, 9 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 243ea90458..8408c84e4e 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -416,7 +416,6 @@ Rubin Observatory's telemetry service | telegraf-kafka-consumer.kafkaConsumers.test.fields | list | `[]` | List of Avro fields to be recorded as InfluxDB fields. If not specified, any Avro field that is not marked as a tag will become an InfluxDB field. | | telegraf-kafka-consumer.kafkaConsumers.test.flush_interval | string | "1s" | Data flushing interval for all outputs. Don’t set this below interval. Maximum flush_interval is flush_interval + flush_jitter | | telegraf-kafka-consumer.kafkaConsumers.test.flush_jitter | string | "0s" | Jitter the flush interval by a random amount. This is primarily to avoid large write spikes for users running a large number of telegraf instances. | -| telegraf-kafka-consumer.kafkaConsumers.test.interval | string | "1s" | Data collection interval for the Kafka consumer. | | telegraf-kafka-consumer.kafkaConsumers.test.max_processing_time | string | "5s" | Maximum processing time for a single message. | | telegraf-kafka-consumer.kafkaConsumers.test.metric_batch_size | int | 1000 | Sends metrics to the output in batches of at most metric_batch_size metrics. | | telegraf-kafka-consumer.kafkaConsumers.test.metric_buffer_limit | int | 10000 | Caches metric_buffer_limit metrics for each output, and flushes this buffer on a successful write. This should be a multiple of metric_batch_size and could not be less than 2 times metric_batch_size. | @@ -452,7 +451,6 @@ Rubin Observatory's telemetry service | telegraf-kafka-consumer-oss.kafkaConsumers.test.fields | list | `[]` | List of Avro fields to be recorded as InfluxDB fields. If not specified, any Avro field that is not marked as a tag will become an InfluxDB field. | | telegraf-kafka-consumer-oss.kafkaConsumers.test.flush_interval | string | "1s" | Data flushing interval for all outputs. Don’t set this below interval. Maximum flush_interval is flush_interval + flush_jitter | | telegraf-kafka-consumer-oss.kafkaConsumers.test.flush_jitter | string | "0s" | Jitter the flush interval by a random amount. This is primarily to avoid large write spikes for users running a large number of telegraf instances. | -| telegraf-kafka-consumer-oss.kafkaConsumers.test.interval | string | "1s" | Data collection interval for the Kafka consumer. | | telegraf-kafka-consumer-oss.kafkaConsumers.test.max_processing_time | string | "5s" | Maximum processing time for a single message. | | telegraf-kafka-consumer-oss.kafkaConsumers.test.metric_batch_size | int | 1000 | Sends metrics to the output in batches of at most metric_batch_size metrics. | | telegraf-kafka-consumer-oss.kafkaConsumers.test.metric_buffer_limit | int | 10000 | Caches metric_buffer_limit metrics for each output, and flushes this buffer on a successful write. This should be a multiple of metric_batch_size and could not be less than 2 times metric_batch_size. | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md index d9c8dbcb70..a523f08be7 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md @@ -24,7 +24,6 @@ Telegraf is an agent written in Go for collecting, processing, aggregating, and | kafkaConsumers.test.fields | list | `[]` | List of Avro fields to be recorded as InfluxDB fields. If not specified, any Avro field that is not marked as a tag will become an InfluxDB field. | | kafkaConsumers.test.flush_interval | string | "1s" | Data flushing interval for all outputs. Don’t set this below interval. Maximum flush_interval is flush_interval + flush_jitter | | kafkaConsumers.test.flush_jitter | string | "0s" | Jitter the flush interval by a random amount. This is primarily to avoid large write spikes for users running a large number of telegraf instances. | -| kafkaConsumers.test.interval | string | "1s" | Data collection interval for the Kafka consumer. | | kafkaConsumers.test.max_processing_time | string | "5s" | Maximum processing time for a single message. | | kafkaConsumers.test.metric_batch_size | int | 1000 | Sends metrics to the output in batches of at most metric_batch_size metrics. | | kafkaConsumers.test.metric_buffer_limit | int | 10000 | Caches metric_buffer_limit metrics for each output, and flushes this buffer on a successful write. This should be a multiple of metric_batch_size and could not be less than 2 times metric_batch_size. | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml index c2419057fc..25ee702d97 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml @@ -12,8 +12,6 @@ metadata: data: telegraf.conf: |+ [agent] - interval = {{ default "1s" $value.interval | quote }} - round_interval = true metric_batch_size = {{ default 1000 $value.metric_batch_size }} metric_buffer_limit = {{ default 10000 $value.metric_buffer_limit }} collection_jitter = {{ default "0s" $value.collection_jitter | quote }} diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml index 9b8e89ebb3..383d4097d7 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml @@ -60,10 +60,6 @@ kafkaConsumers: # increase the consumer throughput. replicaCount: 1 - # -- Data collection interval for the Kafka consumer. - # @default -- "1s" - interval: "1s" - # -- Sends metrics to the output in batches of at most metric_batch_size # metrics. # @default -- 1000 From 4c1e229787b4711495ba7989305e62bada680db2 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 10 Sep 2024 14:17:04 -0700 Subject: [PATCH 071/193] Tune configuration for high throughput - Increase metric_batch_size, metric_buffer_limit and max_undelivered_messages for high throughput --- applications/sasquatch/README.md | 10 ++++++---- .../charts/telegraf-kafka-consumer/README.md | 5 +++-- .../templates/configmap.yaml | 5 +++-- .../charts/telegraf-kafka-consumer/values.yaml | 14 ++++++++++---- 4 files changed, 22 insertions(+), 12 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 8408c84e4e..821a6d07fa 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -417,8 +417,9 @@ Rubin Observatory's telemetry service | telegraf-kafka-consumer.kafkaConsumers.test.flush_interval | string | "1s" | Data flushing interval for all outputs. Don’t set this below interval. Maximum flush_interval is flush_interval + flush_jitter | | telegraf-kafka-consumer.kafkaConsumers.test.flush_jitter | string | "0s" | Jitter the flush interval by a random amount. This is primarily to avoid large write spikes for users running a large number of telegraf instances. | | telegraf-kafka-consumer.kafkaConsumers.test.max_processing_time | string | "5s" | Maximum processing time for a single message. | -| telegraf-kafka-consumer.kafkaConsumers.test.metric_batch_size | int | 1000 | Sends metrics to the output in batches of at most metric_batch_size metrics. | -| telegraf-kafka-consumer.kafkaConsumers.test.metric_buffer_limit | int | 10000 | Caches metric_buffer_limit metrics for each output, and flushes this buffer on a successful write. This should be a multiple of metric_batch_size and could not be less than 2 times metric_batch_size. | +| telegraf-kafka-consumer.kafkaConsumers.test.max_undelivered_messages | int | 10000 | Maximum number of undelivered messages. Should be a multiple of metric_batch_size, setting it too low may never flush the broker's messages. | +| telegraf-kafka-consumer.kafkaConsumers.test.metric_batch_size | int | 5000 | Sends metrics to the output in batches of at most metric_batch_size metrics. | +| telegraf-kafka-consumer.kafkaConsumers.test.metric_buffer_limit | int | 100000 | Caches metric_buffer_limit metrics for each output, and flushes this buffer on a successful write. This should be a multiple of metric_batch_size and could not be less than 2 times metric_batch_size. | | telegraf-kafka-consumer.kafkaConsumers.test.offset | string | `"oldest"` | Kafka consumer offset. Possible values are `oldest` and `newest`. | | telegraf-kafka-consumer.kafkaConsumers.test.precision | string | "1us" | Data precision. | | telegraf-kafka-consumer.kafkaConsumers.test.replicaCount | int | `1` | Number of Telegraf Kafka consumer replicas. Increase this value to increase the consumer throughput. | @@ -452,8 +453,9 @@ Rubin Observatory's telemetry service | telegraf-kafka-consumer-oss.kafkaConsumers.test.flush_interval | string | "1s" | Data flushing interval for all outputs. Don’t set this below interval. Maximum flush_interval is flush_interval + flush_jitter | | telegraf-kafka-consumer-oss.kafkaConsumers.test.flush_jitter | string | "0s" | Jitter the flush interval by a random amount. This is primarily to avoid large write spikes for users running a large number of telegraf instances. | | telegraf-kafka-consumer-oss.kafkaConsumers.test.max_processing_time | string | "5s" | Maximum processing time for a single message. | -| telegraf-kafka-consumer-oss.kafkaConsumers.test.metric_batch_size | int | 1000 | Sends metrics to the output in batches of at most metric_batch_size metrics. | -| telegraf-kafka-consumer-oss.kafkaConsumers.test.metric_buffer_limit | int | 10000 | Caches metric_buffer_limit metrics for each output, and flushes this buffer on a successful write. This should be a multiple of metric_batch_size and could not be less than 2 times metric_batch_size. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.max_undelivered_messages | int | 10000 | Maximum number of undelivered messages. Should be a multiple of metric_batch_size, setting it too low may never flush the broker's messages. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.metric_batch_size | int | 5000 | Sends metrics to the output in batches of at most metric_batch_size metrics. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.metric_buffer_limit | int | 100000 | Caches metric_buffer_limit metrics for each output, and flushes this buffer on a successful write. This should be a multiple of metric_batch_size and could not be less than 2 times metric_batch_size. | | telegraf-kafka-consumer-oss.kafkaConsumers.test.offset | string | `"oldest"` | Kafka consumer offset. Possible values are `oldest` and `newest`. | | telegraf-kafka-consumer-oss.kafkaConsumers.test.precision | string | "1us" | Data precision. | | telegraf-kafka-consumer-oss.kafkaConsumers.test.replicaCount | int | `1` | Number of Telegraf Kafka consumer replicas. Increase this value to increase the consumer throughput. | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md index a523f08be7..83e69f6047 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md @@ -25,8 +25,9 @@ Telegraf is an agent written in Go for collecting, processing, aggregating, and | kafkaConsumers.test.flush_interval | string | "1s" | Data flushing interval for all outputs. Don’t set this below interval. Maximum flush_interval is flush_interval + flush_jitter | | kafkaConsumers.test.flush_jitter | string | "0s" | Jitter the flush interval by a random amount. This is primarily to avoid large write spikes for users running a large number of telegraf instances. | | kafkaConsumers.test.max_processing_time | string | "5s" | Maximum processing time for a single message. | -| kafkaConsumers.test.metric_batch_size | int | 1000 | Sends metrics to the output in batches of at most metric_batch_size metrics. | -| kafkaConsumers.test.metric_buffer_limit | int | 10000 | Caches metric_buffer_limit metrics for each output, and flushes this buffer on a successful write. This should be a multiple of metric_batch_size and could not be less than 2 times metric_batch_size. | +| kafkaConsumers.test.max_undelivered_messages | int | 10000 | Maximum number of undelivered messages. Should be a multiple of metric_batch_size, setting it too low may never flush the broker's messages. | +| kafkaConsumers.test.metric_batch_size | int | 5000 | Sends metrics to the output in batches of at most metric_batch_size metrics. | +| kafkaConsumers.test.metric_buffer_limit | int | 100000 | Caches metric_buffer_limit metrics for each output, and flushes this buffer on a successful write. This should be a multiple of metric_batch_size and could not be less than 2 times metric_batch_size. | | kafkaConsumers.test.offset | string | `"oldest"` | Kafka consumer offset. Possible values are `oldest` and `newest`. | | kafkaConsumers.test.precision | string | "1us" | Data precision. | | kafkaConsumers.test.replicaCount | int | `1` | Number of Telegraf Kafka consumer replicas. Increase this value to increase the consumer throughput. | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml index 25ee702d97..6cc13266b5 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml @@ -12,8 +12,8 @@ metadata: data: telegraf.conf: |+ [agent] - metric_batch_size = {{ default 1000 $value.metric_batch_size }} - metric_buffer_limit = {{ default 10000 $value.metric_buffer_limit }} + metric_batch_size = {{ default 5000 $value.metric_batch_size }} + metric_buffer_limit = {{ default 100000 $value.metric_buffer_limit }} collection_jitter = {{ default "0s" $value.collection_jitter | quote }} flush_interval = {{ default "10s" $value.flush_interval | quote }} flush_jitter = {{ default "0s" $value.flush_jitter | quote }} @@ -62,6 +62,7 @@ data: precision = {{ default "1us" $value.precision | quote }} max_processing_time = {{ default "5s" $value.max_processing_time | quote }} consumer_fetch_default = {{ default "20MB" $value.consumer_fetch_default | quote }} + max_undelivered_messages = {{ default 10000 $value.max_undelivered_messages }} [[inputs.internal]] name_prefix = "telegraf_" diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml index 383d4097d7..683c0fadc6 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml @@ -62,14 +62,14 @@ kafkaConsumers: # -- Sends metrics to the output in batches of at most metric_batch_size # metrics. - # @default -- 1000 - metric_batch_size: 1000 + # @default -- 5000 + metric_batch_size: 5000 # -- Caches metric_buffer_limit metrics for each output, and flushes this # buffer on a successful write. This should be a multiple of metric_batch_size # and could not be less than 2 times metric_batch_size. - # @default -- 10000 - metric_buffer_limit: 10000 + # @default -- 100000 + metric_buffer_limit: 100000 # -- Data collection jitter. This is used to jitter the collection by a # random amount. Each plugin will sleep for a random time within jitter @@ -171,6 +171,12 @@ kafkaConsumers: # @default -- "20MB" consumer_fetch_default: "20MB" + # -- Maximum number of undelivered messages. + # Should be a multiple of metric_batch_size, setting it too low may never + # flush the broker's messages. + # @default -- 10000 + max_undelivered_messages: 10000 + influxdb: # -- URL of the InfluxDB v1 instance to write to url: "http://sasquatch-influxdb.sasquatch:8086" From 6df4c81aa7c7f413d3772e4e43c98d85461beb90 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 10 Sep 2024 14:18:10 -0700 Subject: [PATCH 072/193] Enable message compression - Use the LZ4 compression codec --- applications/sasquatch/README.md | 2 ++ .../sasquatch/charts/telegraf-kafka-consumer/README.md | 1 + .../charts/telegraf-kafka-consumer/templates/configmap.yaml | 1 + .../sasquatch/charts/telegraf-kafka-consumer/values.yaml | 4 ++++ 4 files changed, 8 insertions(+) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 821a6d07fa..70fd2715a2 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -410,6 +410,7 @@ Rubin Observatory's telemetry service | telegraf-kafka-consumer.influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to | | telegraf-kafka-consumer.influxdb.url | string | `"http://sasquatch-influxdb.sasquatch:8086"` | URL of the InfluxDB v1 instance to write to | | telegraf-kafka-consumer.kafkaConsumers.test.collection_jitter | string | "0s" | Data collection jitter. This is used to jitter the collection by a random amount. Each plugin will sleep for a random time within jitter before collecting. | +| telegraf-kafka-consumer.kafkaConsumers.test.compression_codec | int | 3 | Compression codec. 0 : None, 1 : Gzip, 2 : Snappy, 3 : LZ4, 4 : ZSTD | | telegraf-kafka-consumer.kafkaConsumers.test.consumer_fetch_default | string | "20MB" | Maximum amount of data the server should return for a fetch request. | | telegraf-kafka-consumer.kafkaConsumers.test.debug | bool | false | Run Telegraf in debug mode. | | telegraf-kafka-consumer.kafkaConsumers.test.enabled | bool | `false` | Enable the Telegraf Kafka consumer. | @@ -446,6 +447,7 @@ Rubin Observatory's telemetry service | telegraf-kafka-consumer-oss.influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to | | telegraf-kafka-consumer-oss.influxdb.url | string | `"http://sasquatch-influxdb.sasquatch:8086"` | URL of the InfluxDB v1 instance to write to | | telegraf-kafka-consumer-oss.kafkaConsumers.test.collection_jitter | string | "0s" | Data collection jitter. This is used to jitter the collection by a random amount. Each plugin will sleep for a random time within jitter before collecting. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.compression_codec | int | 3 | Compression codec. 0 : None, 1 : Gzip, 2 : Snappy, 3 : LZ4, 4 : ZSTD | | telegraf-kafka-consumer-oss.kafkaConsumers.test.consumer_fetch_default | string | "20MB" | Maximum amount of data the server should return for a fetch request. | | telegraf-kafka-consumer-oss.kafkaConsumers.test.debug | bool | false | Run Telegraf in debug mode. | | telegraf-kafka-consumer-oss.kafkaConsumers.test.enabled | bool | `false` | Enable the Telegraf Kafka consumer. | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md index 83e69f6047..24364be802 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md @@ -18,6 +18,7 @@ Telegraf is an agent written in Go for collecting, processing, aggregating, and | influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to | | influxdb.url | string | `"http://sasquatch-influxdb.sasquatch:8086"` | URL of the InfluxDB v1 instance to write to | | kafkaConsumers.test.collection_jitter | string | "0s" | Data collection jitter. This is used to jitter the collection by a random amount. Each plugin will sleep for a random time within jitter before collecting. | +| kafkaConsumers.test.compression_codec | int | 3 | Compression codec. 0 : None, 1 : Gzip, 2 : Snappy, 3 : LZ4, 4 : ZSTD | | kafkaConsumers.test.consumer_fetch_default | string | "20MB" | Maximum amount of data the server should return for a fetch request. | | kafkaConsumers.test.debug | bool | false | Run Telegraf in debug mode. | | kafkaConsumers.test.enabled | bool | `false` | Enable the Telegraf Kafka consumer. | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml index 6cc13266b5..5be588773d 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml @@ -63,6 +63,7 @@ data: max_processing_time = {{ default "5s" $value.max_processing_time | quote }} consumer_fetch_default = {{ default "20MB" $value.consumer_fetch_default | quote }} max_undelivered_messages = {{ default 10000 $value.max_undelivered_messages }} + compression_codec = {{ default 3 $value.compression_codec }} [[inputs.internal]] name_prefix = "telegraf_" diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml index 683c0fadc6..5307740270 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml @@ -177,6 +177,10 @@ kafkaConsumers: # @default -- 10000 max_undelivered_messages: 10000 + # -- Compression codec. 0 : None, 1 : Gzip, 2 : Snappy, 3 : LZ4, 4 : ZSTD + # @default -- 3 + compression_codec: 3 + influxdb: # -- URL of the InfluxDB v1 instance to write to url: "http://sasquatch-influxdb.sasquatch:8086" From d4ce565cb4f4238bdf6aac9553c9241069bc9cf8 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 10 Sep 2024 14:20:53 -0700 Subject: [PATCH 073/193] Incrase default flush interval MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - We noticed the error message [“outputs.influxdb”] did not complete within its flush interval in some instances of Telegraf, this means Telegraf is unable to write all the gathered data inside the defined interval. --- applications/sasquatch/README.md | 4 ++-- .../sasquatch/charts/telegraf-kafka-consumer/README.md | 2 +- .../sasquatch/charts/telegraf-kafka-consumer/values.yaml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 70fd2715a2..459c2f9459 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -415,7 +415,7 @@ Rubin Observatory's telemetry service | telegraf-kafka-consumer.kafkaConsumers.test.debug | bool | false | Run Telegraf in debug mode. | | telegraf-kafka-consumer.kafkaConsumers.test.enabled | bool | `false` | Enable the Telegraf Kafka consumer. | | telegraf-kafka-consumer.kafkaConsumers.test.fields | list | `[]` | List of Avro fields to be recorded as InfluxDB fields. If not specified, any Avro field that is not marked as a tag will become an InfluxDB field. | -| telegraf-kafka-consumer.kafkaConsumers.test.flush_interval | string | "1s" | Data flushing interval for all outputs. Don’t set this below interval. Maximum flush_interval is flush_interval + flush_jitter | +| telegraf-kafka-consumer.kafkaConsumers.test.flush_interval | string | "10s" | Data flushing interval for all outputs. Don’t set this below interval. Maximum flush_interval is flush_interval + flush_jitter | | telegraf-kafka-consumer.kafkaConsumers.test.flush_jitter | string | "0s" | Jitter the flush interval by a random amount. This is primarily to avoid large write spikes for users running a large number of telegraf instances. | | telegraf-kafka-consumer.kafkaConsumers.test.max_processing_time | string | "5s" | Maximum processing time for a single message. | | telegraf-kafka-consumer.kafkaConsumers.test.max_undelivered_messages | int | 10000 | Maximum number of undelivered messages. Should be a multiple of metric_batch_size, setting it too low may never flush the broker's messages. | @@ -452,7 +452,7 @@ Rubin Observatory's telemetry service | telegraf-kafka-consumer-oss.kafkaConsumers.test.debug | bool | false | Run Telegraf in debug mode. | | telegraf-kafka-consumer-oss.kafkaConsumers.test.enabled | bool | `false` | Enable the Telegraf Kafka consumer. | | telegraf-kafka-consumer-oss.kafkaConsumers.test.fields | list | `[]` | List of Avro fields to be recorded as InfluxDB fields. If not specified, any Avro field that is not marked as a tag will become an InfluxDB field. | -| telegraf-kafka-consumer-oss.kafkaConsumers.test.flush_interval | string | "1s" | Data flushing interval for all outputs. Don’t set this below interval. Maximum flush_interval is flush_interval + flush_jitter | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.flush_interval | string | "10s" | Data flushing interval for all outputs. Don’t set this below interval. Maximum flush_interval is flush_interval + flush_jitter | | telegraf-kafka-consumer-oss.kafkaConsumers.test.flush_jitter | string | "0s" | Jitter the flush interval by a random amount. This is primarily to avoid large write spikes for users running a large number of telegraf instances. | | telegraf-kafka-consumer-oss.kafkaConsumers.test.max_processing_time | string | "5s" | Maximum processing time for a single message. | | telegraf-kafka-consumer-oss.kafkaConsumers.test.max_undelivered_messages | int | 10000 | Maximum number of undelivered messages. Should be a multiple of metric_batch_size, setting it too low may never flush the broker's messages. | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md index 24364be802..e361988887 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md @@ -23,7 +23,7 @@ Telegraf is an agent written in Go for collecting, processing, aggregating, and | kafkaConsumers.test.debug | bool | false | Run Telegraf in debug mode. | | kafkaConsumers.test.enabled | bool | `false` | Enable the Telegraf Kafka consumer. | | kafkaConsumers.test.fields | list | `[]` | List of Avro fields to be recorded as InfluxDB fields. If not specified, any Avro field that is not marked as a tag will become an InfluxDB field. | -| kafkaConsumers.test.flush_interval | string | "1s" | Data flushing interval for all outputs. Don’t set this below interval. Maximum flush_interval is flush_interval + flush_jitter | +| kafkaConsumers.test.flush_interval | string | "10s" | Data flushing interval for all outputs. Don’t set this below interval. Maximum flush_interval is flush_interval + flush_jitter | | kafkaConsumers.test.flush_jitter | string | "0s" | Jitter the flush interval by a random amount. This is primarily to avoid large write spikes for users running a large number of telegraf instances. | | kafkaConsumers.test.max_processing_time | string | "5s" | Maximum processing time for a single message. | | kafkaConsumers.test.max_undelivered_messages | int | 10000 | Maximum number of undelivered messages. Should be a multiple of metric_batch_size, setting it too low may never flush the broker's messages. | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml index 5307740270..28b3081941 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml @@ -80,8 +80,8 @@ kafkaConsumers: # -- Data flushing interval for all outputs. # Don’t set this below interval. # Maximum flush_interval is flush_interval + flush_jitter - # @default -- "1s" - flush_interval: "1s" + # @default -- "10s" + flush_interval: "10s" # -- Jitter the flush interval by a random amount. This is primarily to # avoid large write spikes for users running a large number of telegraf From b41a6e63898884930d57ae7582a8f4f81eb582ff Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 10 Sep 2024 14:25:42 -0700 Subject: [PATCH 074/193] Enable debug mode - Enable debug mode and make sure we consume the oldest offsets for testing these changes --- applications/sasquatch/values-summit.yaml | 36 ++++++++++++++--------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index 2c377045ff..fd39ff3389 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -290,6 +290,7 @@ telegraf-kafka-consumer: timestamp_field: "timestamp" topicRegexps: | [ "lsst.backpack" ] + debug: true # CSC connectors maintel: enabled: true @@ -297,7 +298,7 @@ telegraf-kafka-consumer: timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTAOS", "lsst.sal.MTDome", "lsst.sal.MTDomeTrajectory", "lsst.sal.MTPtg" ] - offset: "newest" + debug: true mtmount: enabled: true database: "efd" @@ -305,21 +306,21 @@ telegraf-kafka-consumer: timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTMount" ] - offset: "newest" + debug: true comcam: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.CCCamera", "lsst.sal.CCHeaderService", "lsst.sal.CCOODS" ] - offset: "newest" + debug: true eas: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.DIMM", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.ESS", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] - offset: "newest" + debug: true m1m3: enabled: true database: "efd" @@ -327,94 +328,98 @@ telegraf-kafka-consumer: timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTM1M3" ] - offset: "newest" + debug: true m2: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTHexapod", "lsst.sal.MTM2", "lsst.sal.MTRotator" ] - offset: "newest" + debug: true obssys: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.Scheduler", "lsst.sal.Script", "lsst.sal.ScriptQueue", "lsst.sal.Watcher" ] - offset: "newest" + debug: true ocps: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.OCPS" ] - offset: "newest" + debug: true pmd: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.PMD" ] - offset: "newest" + debug: true calsys: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.ATMonochromator", "lsst.sal.ATWhiteLight", "lsst.sal.CBP", "lsst.sal.Electrometer", "lsst.sal.FiberSpectrograph", "lsst.sal.LinearStage", "lsst.sal.TunableLaser" ] - offset: "newest" + debug: true mtaircompressor: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTAirCompressor" ] - offset: "newest" + debug: true genericcamera: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.GCHeaderService", "lsst.sal.GenericCamera" ] - offset: "newest" + debug: true gis: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.GIS" ] - offset: "newest" + debug: true lsstcam: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTCamera", "lsst.sal.MTHeaderService", "lsst.sal.MTOODS" ] - offset: "newest" + debug: true auxtel: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.ATAOS", "lsst.sal.ATDome", "lsst.sal.ATDomeTrajectory", "lsst.sal.ATHexapod", "lsst.sal.ATPneumatics", "lsst.sal.ATPtg", "lsst.sal.ATMCS" ] + debug: true latiss: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.ATCamera", "lsst.sal.ATHeaderService", "lsst.sal.ATOODS", "lsst.sal.ATSpectrograph" ] + debug: true test: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.Test" ] + debug: true lasertracker: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.LaserTracker" ] + debug: true # CCS connectors (experimental) data is being written on separate databases for now atcamera: enabled: true @@ -425,6 +430,7 @@ telegraf-kafka-consumer: [ "Agent", "Aspic", "Location", "Raft", "Reb", "Sensor", "Source" ] topicRegexps: | [ "lsst.ATCamera" ] + debug: true cccamera: enabled: true database: "lsst.CCCamera" @@ -434,6 +440,7 @@ telegraf-kafka-consumer: [ "Agent", "Aspic", "Cold", "Cryo", "Hardware", "Location", "Ps", "RTD", "Raft", "Reb", "Segment", "Sensor", "Source" ] topicRegexps: | [ "lsst.CCCamera" ] + debug: true mtcamera: enabled: true database: "lsst.MTCamera" @@ -443,6 +450,7 @@ telegraf-kafka-consumer: [ "Agent", "Aspic", "Axis", "Canbus", "Cip", "Clamp", "Cold", "Controller", "Cryo", "Gateway", "Hardware", "Hip", "Hook", "Latch", "Location", "Ps", "RTD", "Raft", "Reb", "Segment", "Sensor", "Socket", "Source", "Truck" ] topicRegexps: | [ "lsst.MTCamera" ] + debug: true kafdrop: ingress: From 11c574375701c7921e51efb9ede4cb298f2464fd Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 10 Sep 2024 17:30:00 -0700 Subject: [PATCH 075/193] Run one connector replica - Run one connector replica for MTMount and M1M3 --- applications/sasquatch/values-summit.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index fd39ff3389..30b1873a25 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -302,7 +302,6 @@ telegraf-kafka-consumer: mtmount: enabled: true database: "efd" - replicaCount: 8 timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTMount" ] @@ -324,7 +323,6 @@ telegraf-kafka-consumer: m1m3: enabled: true database: "efd" - replicaCount: 8 timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTM1M3" ] From ed049c33e92cc99ba40deb2f4b5cd55a3d28e6ea Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 12:47:03 +0000 Subject: [PATCH 076/193] chore(deps): update helm release argo-workflows to v0.42.2 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index 285c51625d..fa244f7232 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.42.1 + version: 0.42.2 repository: https://argoproj.github.io/argo-helm From 022e1f6dbde6165e657af410c916ae86781a3cac Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 12:47:07 +0000 Subject: [PATCH 077/193] chore(deps): update helm release kubernetes-replicator to v2.10.2 --- applications/kubernetes-replicator/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/kubernetes-replicator/Chart.yaml b/applications/kubernetes-replicator/Chart.yaml index 27c1677bfb..335507f312 100644 --- a/applications/kubernetes-replicator/Chart.yaml +++ b/applications/kubernetes-replicator/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/mittwald/kubernetes-replicator dependencies: - name: kubernetes-replicator - version: 2.10.1 + version: 2.10.2 repository: https://helm.mittwald.de From 3a108e4489c17b4280ffe0e22aa01865bbbabac1 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 12 Sep 2024 16:43:51 -0700 Subject: [PATCH 078/193] Update vo-cutouts, add schema migration support Update vo-cutouts to 3.2.0 and add support for schema migrations. --- .../gafaelfawr/templates/serviceaccount.yaml | 1 - applications/vo-cutouts/Chart.yaml | 2 +- applications/vo-cutouts/README.md | 2 + .../vo-cutouts/templates/configmap.yaml | 6 + .../templates/job-schea-update.yaml | 130 ++++++++++++++++++ .../vo-cutouts/templates/serviceaccount.yaml | 5 + applications/vo-cutouts/values.yaml | 6 + 7 files changed, 150 insertions(+), 2 deletions(-) create mode 100644 applications/vo-cutouts/templates/job-schea-update.yaml diff --git a/applications/gafaelfawr/templates/serviceaccount.yaml b/applications/gafaelfawr/templates/serviceaccount.yaml index aa35285b29..acf07b2ed2 100644 --- a/applications/gafaelfawr/templates/serviceaccount.yaml +++ b/applications/gafaelfawr/templates/serviceaccount.yaml @@ -15,7 +15,6 @@ metadata: name: "gafaelfawr-schema-update" labels: {{- include "gafaelfawr.labels" . | nindent 4 }} - annotations: annotations: helm.sh/hook: "pre-install,pre-upgrade" helm.sh/hook-delete-policy: "hook-succeeded" diff --git a/applications/vo-cutouts/Chart.yaml b/applications/vo-cutouts/Chart.yaml index 873d9050c4..4aed5b2fe5 100644 --- a/applications/vo-cutouts/Chart.yaml +++ b/applications/vo-cutouts/Chart.yaml @@ -4,7 +4,7 @@ version: 1.0.0 description: "Image cutout service complying with IVOA SODA" sources: - "https://github.com/lsst-sqre/vo-cutouts" -appVersion: 3.1.0 +appVersion: 3.2.0 dependencies: - name: redis diff --git a/applications/vo-cutouts/README.md b/applications/vo-cutouts/README.md index 3c9f245a24..ad78a23274 100644 --- a/applications/vo-cutouts/README.md +++ b/applications/vo-cutouts/README.md @@ -13,6 +13,7 @@ Image cutout service complying with IVOA SODA | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with Cloud SQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | +| cloudsql.image.schemaUpdateTagSuffix | string | `"-alpine"` | Tag suffix to use for the proxy for schema updates | | cloudsql.image.tag | string | `"1.37.0"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL is used | Instance connection name for a Cloud SQL PostgreSQL instance | | cloudsql.resources | object | See `values.yaml` | Resource limits and requests for the Cloud SQL Proxy container | @@ -26,6 +27,7 @@ Image cutout service complying with IVOA SODA | config.storageBucketUrl | string | None, must be set | URL for the GCS bucket for results (must start with `gs`) | | config.syncTimeout | string | `"1m"` | Timeout for results from a sync cutout in Safir `parse_timedelta` format | | config.timeout | int | 600 (10 minutes) | Timeout for a single cutout job in seconds | +| config.updateSchema | bool | `false` | Whether to automatically update the vo-cutouts database schema | | cutoutWorker.affinity | object | `{}` | Affinity rules for the cutout worker pod | | cutoutWorker.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for cutout workers | | cutoutWorker.image.repository | string | `"ghcr.io/lsst-sqre/vo-cutouts-worker"` | Stack image to use for cutouts | diff --git a/applications/vo-cutouts/templates/configmap.yaml b/applications/vo-cutouts/templates/configmap.yaml index b933134f6b..8a0a3a4dc4 100644 --- a/applications/vo-cutouts/templates/configmap.yaml +++ b/applications/vo-cutouts/templates/configmap.yaml @@ -2,6 +2,12 @@ apiVersion: v1 kind: ConfigMap metadata: name: vo-cutouts + {{- if .Values.config.updateSchema }} + annotations: + helm.sh/hook: "pre-install,pre-upgrade" + helm.sh/hook-delete-policy: "before-hook-creation" + helm.sh/hook-weight: "0" + {{- end }} labels: {{- include "vo-cutouts.labels" . | nindent 4 }} data: diff --git a/applications/vo-cutouts/templates/job-schea-update.yaml b/applications/vo-cutouts/templates/job-schea-update.yaml new file mode 100644 index 0000000000..b59461e887 --- /dev/null +++ b/applications/vo-cutouts/templates/job-schea-update.yaml @@ -0,0 +1,130 @@ +{{- if .Values.config.updateSchema -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: "vo-cutouts-schema-update" + annotations: + annotations: + helm.sh/hook: "pre-install,pre-upgrade" + helm.sh/hook-delete-policy: "hook-succeeded" + helm.sh/hook-weight: "1" + labels: + {{- include "vo-cutouts.labels" . | nindent 4 }} +spec: + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "vo-cutouts.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: "schema-update" + vo-cutouts-redis-client: "true" + spec: + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.cloudsql.enabled }} + serviceAccountName: "vo-cutouts" + {{- else }} + automountServiceAccountToken: false + {{- end }} + containers: + {{- if .Values.cloudsql.enabled }} + - name: "cloud-sql-proxy" + # Running the sidecar as normal causes it to keep running and thus + # the Pod never exits, the Job never finishes, and the hook blocks + # the sync. Have the main pod signal the sidecar by writing to a + # file on a shared emptyDir file system, and use a simple watcher + # loop in shell in the sidecar container to terminate the proxy when + # the main container finishes. + # + # Based on https://stackoverflow.com/questions/41679364/ + command: + - "/bin/sh" + - "-c" + args: + - | + /cloud_sql_proxy -ip_address_types=PRIVATE -log_debug_stdout=true -structured_logs=true -instances={{ required "cloudsql.instanceConnectionName must be specified" .Values.cloudsql.instanceConnectionName }}=tcp:5432 & + PID=$! + while true; do + if [[ -f "/lifecycle/main-terminated" ]]; then + kill $PID + exit 0 + fi + sleep 1 + done + image: "{{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }}{{ .Values.cloudsql.image.schemaUpdateTagSuffix }}" + imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy | quote }} + {{- with .Values.cloudsql.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 + volumeMounts: + - name: "lifecycle" + mountPath: "/lifecycle" + {{- end }} + - name: "vo-cutouts" + command: + - "/bin/sh" + - "-c" + - | + vo-cutouts update-schema + touch /lifecycle/main-terminated + env: + - name: "CUTOUT_ARQ_QUEUE_PASSWORD" + valueFrom: + secretKeyRef: + name: "vo-cutouts" + key: "redis-password" + - name: "CUTOUT_DATABASE_PASSWORD" + valueFrom: + secretKeyRef: + name: "vo-cutouts" + key: "database-password" + envFrom: + - configMapRef: + name: "vo-cutouts" + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: true + volumeMounts: + - name: "lifecycle" + mountPath: "/lifecycle" + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + restartPolicy: "Never" + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: "lifecycle" + emptyDir: {} +{{- end }} diff --git a/applications/vo-cutouts/templates/serviceaccount.yaml b/applications/vo-cutouts/templates/serviceaccount.yaml index dfa2303153..c2c43cfbbd 100644 --- a/applications/vo-cutouts/templates/serviceaccount.yaml +++ b/applications/vo-cutouts/templates/serviceaccount.yaml @@ -6,5 +6,10 @@ metadata: labels: {{- include "vo-cutouts.labels" . | nindent 4 }} annotations: + {{- if .Values.config.updateSchema }} + helm.sh/hook: "pre-install,pre-upgrade" + helm.sh/hook-delete-policy: "before-hook-creation" + helm.sh/hook-weight: "0" + {{- end }} iam.gke.io/gcp-service-account: {{ required "config.serviceAccount must be set to a valid Google service account" .Values.config.serviceAccount | quote }} {{- end }} diff --git a/applications/vo-cutouts/values.yaml b/applications/vo-cutouts/values.yaml index f6852a7f6c..17e9ad9ba5 100644 --- a/applications/vo-cutouts/values.yaml +++ b/applications/vo-cutouts/values.yaml @@ -40,6 +40,9 @@ config: # @default -- 600 (10 minutes) timeout: 600 + # -- Whether to automatically update the vo-cutouts database schema + updateSchema: false + image: # -- vo-cutouts image to use for the frontend and database workers repository: "ghcr.io/lsst-sqre/vo-cutouts" @@ -93,6 +96,9 @@ cloudsql: # -- Cloud SQL Auth Proxy tag to use tag: "1.37.0" + # -- Tag suffix to use for the proxy for schema updates + schemaUpdateTagSuffix: "-alpine" + # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" From fa7b66a115980a91711ae67a9ef2ca904966d0a7 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 16 Sep 2024 15:43:55 -0700 Subject: [PATCH 079/193] Enable schema updates for all environments --- applications/vo-cutouts/values-idfdev.yaml | 1 + applications/vo-cutouts/values-idfint.yaml | 1 + applications/vo-cutouts/values-idfprod.yaml | 1 + 3 files changed, 3 insertions(+) diff --git a/applications/vo-cutouts/values-idfdev.yaml b/applications/vo-cutouts/values-idfdev.yaml index d65f4f8bbe..1ca562074c 100644 --- a/applications/vo-cutouts/values-idfdev.yaml +++ b/applications/vo-cutouts/values-idfdev.yaml @@ -1,6 +1,7 @@ config: serviceAccount: "vo-cutouts@science-platform-dev-7696.iam.gserviceaccount.com" storageBucketUrl: "gs://rubin-cutouts-dev-us-central1-output/" + upgradeSchema: true cloudsql: enabled: true diff --git a/applications/vo-cutouts/values-idfint.yaml b/applications/vo-cutouts/values-idfint.yaml index b7e41291fd..faca2b18da 100644 --- a/applications/vo-cutouts/values-idfint.yaml +++ b/applications/vo-cutouts/values-idfint.yaml @@ -1,6 +1,7 @@ config: serviceAccount: "vo-cutouts@science-platform-int-dc5d.iam.gserviceaccount.com" storageBucketUrl: "gs://rubin-cutouts-int-us-central1-output/" + upgradeSchema: true cloudsql: enabled: true diff --git a/applications/vo-cutouts/values-idfprod.yaml b/applications/vo-cutouts/values-idfprod.yaml index 461cb96fe5..53657a6e3c 100644 --- a/applications/vo-cutouts/values-idfprod.yaml +++ b/applications/vo-cutouts/values-idfprod.yaml @@ -1,6 +1,7 @@ config: serviceAccount: "vo-cutouts@science-platform-stable-6994.iam.gserviceaccount.com" storageBucketUrl: "gs://rubin-cutouts-stable-us-central1-output/" + updateSchema: true cloudsql: enabled: true From 744f78121e2fa7a3039fb49c645d076fdf785ebc Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 16 Sep 2024 15:49:53 -0700 Subject: [PATCH 080/193] Drop neophile support We're abandoning neophile in favor of other approaches to dependency management. Drop its GitHub Actions and tox support, and update Python and pre-commit dependencies. --- .github/workflows/dependencies.yaml | 35 ------- .pre-commit-config.yaml | 2 +- requirements/dev.txt | 143 +++++++++++++--------------- requirements/main.txt | 30 +++--- requirements/tox.txt | 44 ++++----- tox.ini | 7 -- 6 files changed, 106 insertions(+), 155 deletions(-) delete mode 100644 .github/workflows/dependencies.yaml diff --git a/.github/workflows/dependencies.yaml b/.github/workflows/dependencies.yaml deleted file mode 100644 index 49b52fbb6d..0000000000 --- a/.github/workflows/dependencies.yaml +++ /dev/null @@ -1,35 +0,0 @@ -name: Dependency Update - -"on": - schedule: - - cron: "0 12 * * 1" - workflow_dispatch: {} - -jobs: - update: - runs-on: ubuntu-latest - timeout-minutes: 10 - - steps: - - uses: actions/checkout@v4 - - # Omit pre-commit updates for now until neophile looks only at releases - # so that it doesn't pick up an old helm-docs release. - - name: Run neophile - uses: lsst-sqre/run-neophile@v1 - with: - python-version: "3.12" - mode: pr - types: python - app-id: ${{ secrets.NEOPHILE_APP_ID }} - app-secret: ${{ secrets.NEOPHILE_PRIVATE_KEY }} - - - name: Report status - if: always() - uses: ravsamhq/notify-slack-action@v2 - with: - status: ${{ job.status }} - notify_when: "failure" - notification_title: "Periodic dependency update for {repo} failed" - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_ALERT_WEBHOOK }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 99ad10ee7c..32bdfa9de8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -46,7 +46,7 @@ repos: - --template-files=../helm-docs.md.gotmpl - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.4 + rev: v0.6.5 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] diff --git a/requirements/dev.txt b/requirements/dev.txt index 515080cba5..fd796c0977 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -446,9 +446,9 @@ greenlet==3.1.0 ; (python_full_version < '3.13' and platform_machine == 'AMD64') --hash=sha256:fad7a051e07f64e297e6e8399b4d6a3bdcad3d7297409e9a06ef8cbccff4f501 \ --hash=sha256:ffb08f2a1e59d38c7b8b9ac8083c9c8b9875f0955b1e9b9b9a965607a51f8e54 # via sqlalchemy -idna==3.8 \ - --hash=sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac \ - --hash=sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603 +idna==3.10 \ + --hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \ + --hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 # via # -c requirements/main.txt # requests @@ -689,9 +689,9 @@ pexpect==4.9.0 ; sys_platform != 'emscripten' and sys_platform != 'win32' \ --hash=sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523 \ --hash=sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f # via ipython -platformdirs==4.3.2 \ - --hash=sha256:9e5e27a08aa095dd127b9f2e764d74254f482fef22b0970773bfba79d091ab8c \ - --hash=sha256:eb1c8582560b34ed4ba105009a4badf7f6f85768b30126f351328507b2beb617 +platformdirs==4.3.3 \ + --hash=sha256:50a5450e2e84f44539718293cbb1da0a0885c9d14adf21b77bae4e66fc99d9b5 \ + --hash=sha256:d4e0b7d8ec176b341fb03cb11ca12d0276faa8c485f9cd218f613840463fc2c0 # via jupyter-core pluggy==1.5.0 \ --hash=sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1 \ @@ -1201,10 +1201,12 @@ rpds-py==0.20.0 \ # via # jsonschema # referencing -setuptools==74.1.2 \ - --hash=sha256:5f4c08aa4d3ebcb57a50c33b1b07e94315d7fc7230f7115e47fc99776c8ce308 \ - --hash=sha256:95b40ed940a1c67eb70fc099094bd6e99c6ee7c23aa2306f4d2697ba7916f9c6 - # via documenteer +setuptools==75.1.0 \ + --hash=sha256:35ab7fd3bcd95e6b7fd704e4a1539513edad446c097797f2985e0e4b960772f2 \ + --hash=sha256:d59a21b17a275fb872a9c3dae73963160ae079f1049ed956880cd7c09b120538 + # via + # documenteer + # sphinxcontrib-bibtex six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 @@ -1250,13 +1252,13 @@ sphinx==8.0.2 \ # sphinxcontrib-youtube # sphinxext-opengraph # sphinxext-rediraffe -sphinx-autodoc-typehints==2.4.0 \ - --hash=sha256:8f8281654ddf5709875429b7120d367f4edee39a131e13d5806e4f779a81bf0f \ - --hash=sha256:c9774d47e7d304cf975e073df49ebf19763dca94ac0295e7013b522b26cb18de +sphinx-autodoc-typehints==2.4.1 \ + --hash=sha256:af37abb816ebd2cf56c7a8174fd2f34d0f2f84fbf58265f89429ae107212fe6f \ + --hash=sha256:cfe410920cecf08ade046bb387b0007edb83e992de59686c62d194c762f1e45c # via documenteer -sphinx-automodapi==0.17.0 \ - --hash=sha256:4d029cb79eef29413e94ab01bb0177ebd2d5ba86e9789b73575afe9c06ae1501 \ - --hash=sha256:7ccdadad57add4aa9149d9f2bb5cf28c8f8b590280b4735b1156ea8355c423a1 +sphinx-automodapi==0.18.0 \ + --hash=sha256:022860385590768f52d4f6e19abb83b2574772d2721fb4050ecdb6e593a1a440 \ + --hash=sha256:7bf9d9a2cb67a5389c51071cfd86674ca3892ca5d5943f95de4553d6f35dddae # via documenteer sphinx-click==6.0.0 \ --hash=sha256:1e0a3c83bcb7c55497751b19d07ebe56b5d7b85eb76dd399cf9061b497adc317 \ @@ -1288,9 +1290,9 @@ sphinxcontrib-applehelp==2.0.0 \ --hash=sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1 \ --hash=sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5 # via sphinx -sphinxcontrib-bibtex==2.6.2 \ - --hash=sha256:10d45ebbb19207c5665396c9446f8012a79b8a538cb729f895b5910ab2d0b2da \ - --hash=sha256:f487af694336f28bfb7d6a17070953a7d264bec43000a2379724274f5f8d70ae +sphinxcontrib-bibtex==2.6.3 \ + --hash=sha256:7c790347ef1cb0edf30de55fc324d9782d085e89c52c2b8faafa082e08e23946 \ + --hash=sha256:ff016b738fcc867df0f75c29e139b3b2158d26a2c802db27963cb128be3b75fb # via documenteer sphinxcontrib-devhelp==2.0.0 \ --hash=sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad \ @@ -1335,56 +1337,47 @@ sphinxext-rediraffe==0.2.7 \ --hash=sha256:651dcbfae5ffda9ffd534dfb8025f36120e5efb6ea1a33f5420023862b9f725d \ --hash=sha256:9e430a52d4403847f4ffb3a8dd6dfc34a9fe43525305131f52ed899743a5fd8c # via documenteer -sqlalchemy==2.0.34 \ - --hash=sha256:10d8f36990dd929690666679b0f42235c159a7051534adb135728ee52828dd22 \ - --hash=sha256:13be2cc683b76977a700948411a94c67ad8faf542fa7da2a4b167f2244781cf3 \ - --hash=sha256:165bbe0b376541092bf49542bd9827b048357f4623486096fc9aaa6d4e7c59a2 \ - --hash=sha256:173f5f122d2e1bff8fbd9f7811b7942bead1f5e9f371cdf9e670b327e6703ebd \ - --hash=sha256:196958cde924a00488e3e83ff917be3b73cd4ed8352bbc0f2989333176d1c54d \ - --hash=sha256:203d46bddeaa7982f9c3cc693e5bc93db476ab5de9d4b4640d5c99ff219bee8c \ - --hash=sha256:220574e78ad986aea8e81ac68821e47ea9202b7e44f251b7ed8c66d9ae3f4278 \ - --hash=sha256:243f92596f4fd4c8bd30ab8e8dd5965afe226363d75cab2468f2c707f64cd83b \ - --hash=sha256:24af3dc43568f3780b7e1e57c49b41d98b2d940c1fd2e62d65d3928b6f95f021 \ - --hash=sha256:25691f4adfb9d5e796fd48bf1432272f95f4bbe5f89c475a788f31232ea6afba \ - --hash=sha256:2e6965346fc1491a566e019a4a1d3dfc081ce7ac1a736536367ca305da6472a8 \ - --hash=sha256:3166dfff2d16fe9be3241ee60ece6fcb01cf8e74dd7c5e0b64f8e19fab44911b \ - --hash=sha256:413c85cd0177c23e32dee6898c67a5f49296640041d98fddb2c40888fe4daa2e \ - --hash=sha256:430093fce0efc7941d911d34f75a70084f12f6ca5c15d19595c18753edb7c33b \ - --hash=sha256:43f28005141165edd11fbbf1541c920bd29e167b8bbc1fb410d4fe2269c1667a \ - --hash=sha256:526ce723265643dbc4c7efb54f56648cc30e7abe20f387d763364b3ce7506c82 \ - --hash=sha256:53e68b091492c8ed2bd0141e00ad3089bcc6bf0e6ec4142ad6505b4afe64163e \ - --hash=sha256:5bc08e75ed11693ecb648b7a0a4ed80da6d10845e44be0c98c03f2f880b68ff4 \ - --hash=sha256:6831a78bbd3c40f909b3e5233f87341f12d0b34a58f14115c9e94b4cdaf726d3 \ - --hash=sha256:6a1e03db964e9d32f112bae36f0cc1dcd1988d096cfd75d6a588a3c3def9ab2b \ - --hash=sha256:6daeb8382d0df526372abd9cb795c992e18eed25ef2c43afe518c73f8cccb721 \ - --hash=sha256:6e7cde3a2221aa89247944cafb1b26616380e30c63e37ed19ff0bba5e968688d \ - --hash=sha256:707c8f44931a4facd4149b52b75b80544a8d824162602b8cd2fe788207307f9a \ - --hash=sha256:7286c353ee6475613d8beff83167374006c6b3e3f0e6491bfe8ca610eb1dec0f \ - --hash=sha256:79cb400c360c7c210097b147c16a9e4c14688a6402445ac848f296ade6283bbc \ - --hash=sha256:7cee4c6917857fd6121ed84f56d1dc78eb1d0e87f845ab5a568aba73e78adf83 \ - --hash=sha256:80bd73ea335203b125cf1d8e50fef06be709619eb6ab9e7b891ea34b5baa2287 \ - --hash=sha256:895184dfef8708e15f7516bd930bda7e50ead069280d2ce09ba11781b630a434 \ - --hash=sha256:8fddde2368e777ea2a4891a3fb4341e910a056be0bb15303bf1b92f073b80c02 \ - --hash=sha256:95d0b2cf8791ab5fb9e3aa3d9a79a0d5d51f55b6357eecf532a120ba3b5524db \ - --hash=sha256:9661268415f450c95f72f0ac1217cc6f10256f860eed85c2ae32e75b60278ad8 \ - --hash=sha256:97b850f73f8abbffb66ccbab6e55a195a0eb655e5dc74624d15cff4bfb35bd74 \ - --hash=sha256:9ea54f7300553af0a2a7235e9b85f4204e1fc21848f917a3213b0e0818de9a24 \ - --hash=sha256:9ebc11c54c6ecdd07bb4efbfa1554538982f5432dfb8456958b6d46b9f834bb7 \ - --hash=sha256:a17d8fac6df9835d8e2b4c5523666e7051d0897a93756518a1fe101c7f47f2f0 \ - --hash=sha256:ae92bebca3b1e6bd203494e5ef919a60fb6dfe4d9a47ed2453211d3bd451b9f5 \ - --hash=sha256:b68094b165a9e930aedef90725a8fcfafe9ef95370cbb54abc0464062dbf808f \ - --hash=sha256:b75b00083e7fe6621ce13cfce9d4469c4774e55e8e9d38c305b37f13cf1e874c \ - --hash=sha256:bcd18441a49499bf5528deaa9dee1f5c01ca491fc2791b13604e8f972877f812 \ - --hash=sha256:bd90c221ed4e60ac9d476db967f436cfcecbd4ef744537c0f2d5291439848768 \ - --hash=sha256:c29d03e0adf3cc1a8c3ec62d176824972ae29b67a66cbb18daff3062acc6faa8 \ - --hash=sha256:c3330415cd387d2b88600e8e26b510d0370db9b7eaf984354a43e19c40df2e2b \ - --hash=sha256:c7db3db284a0edaebe87f8f6642c2b2c27ed85c3e70064b84d1c9e4ec06d5d84 \ - --hash=sha256:ce119fc4ce0d64124d37f66a6f2a584fddc3c5001755f8a49f1ca0a177ef9796 \ - --hash=sha256:dbcdf987f3aceef9763b6d7b1fd3e4ee210ddd26cac421d78b3c206d07b2700b \ - --hash=sha256:e54ef33ea80d464c3dcfe881eb00ad5921b60f8115ea1a30d781653edc2fd6a2 \ - --hash=sha256:e60ed6ef0a35c6b76b7640fe452d0e47acc832ccbb8475de549a5cc5f90c2c06 \ - --hash=sha256:fb1b30f31a36c7f3fee848391ff77eebdd3af5750bf95fbf9b8b5323edfdb4ec \ - --hash=sha256:fbb034f565ecbe6c530dff948239377ba859420d146d5f62f0271407ffb8c580 +sqlalchemy==2.0.35 \ + --hash=sha256:032d979ce77a6c2432653322ba4cbeabf5a6837f704d16fa38b5a05d8e21fa00 \ + --hash=sha256:0375a141e1c0878103eb3d719eb6d5aa444b490c96f3fedab8471c7f6ffe70ee \ + --hash=sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6 \ + --hash=sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf \ + --hash=sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8 \ + --hash=sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b \ + --hash=sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc \ + --hash=sha256:2a275a806f73e849e1c309ac11108ea1a14cd7058577aba962cd7190e27c9e3c \ + --hash=sha256:3655af10ebcc0f1e4e06c5900bb33e080d6a1fa4228f502121f28a3b1753cde5 \ + --hash=sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90 \ + --hash=sha256:4c31943b61ed8fdd63dfd12ccc919f2bf95eefca133767db6fbbd15da62078ec \ + --hash=sha256:4fdcd72a789c1c31ed242fd8c1bcd9ea186a98ee8e5408a50e610edfef980d71 \ + --hash=sha256:627dee0c280eea91aed87b20a1f849e9ae2fe719d52cbf847c0e0ea34464b3f7 \ + --hash=sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b \ + --hash=sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468 \ + --hash=sha256:69683e02e8a9de37f17985905a5eca18ad651bf592314b4d3d799029797d0eb3 \ + --hash=sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e \ + --hash=sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff \ + --hash=sha256:890da8cd1941fa3dab28c5bac3b9da8502e7e366f895b3b8e500896f12f94d11 \ + --hash=sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01 \ + --hash=sha256:8a6219108a15fc6d24de499d0d515c7235c617b2540d97116b663dade1a54d62 \ + --hash=sha256:8cdf1a0dbe5ced887a9b127da4ffd7354e9c1a3b9bb330dce84df6b70ccb3a8d \ + --hash=sha256:8d625eddf7efeba2abfd9c014a22c0f6b3796e0ffb48f5d5ab106568ef01ff5a \ + --hash=sha256:93a71c8601e823236ac0e5d087e4f397874a421017b3318fd92c0b14acf2b6db \ + --hash=sha256:9509c4123491d0e63fb5e16199e09f8e262066e58903e84615c301dde8fa2e87 \ + --hash=sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e \ + --hash=sha256:a62dd5d7cc8626a3634208df458c5fe4f21200d96a74d122c83bc2015b333bc1 \ + --hash=sha256:aee110e4ef3c528f3abbc3c2018c121e708938adeeff9006428dd7c8555e9b3f \ + --hash=sha256:b76d63495b0508ab9fc23f8152bac63205d2a704cd009a2b0722f4c8e0cba8e0 \ + --hash=sha256:c41411e192f8d3ea39ea70e0fae48762cd11a2244e03751a98bd3c0ca9a4e936 \ + --hash=sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8 \ + --hash=sha256:cc32b2990fc34380ec2f6195f33a76b6cdaa9eecf09f0c9404b74fc120aef36f \ + --hash=sha256:ccae5de2a0140d8be6838c331604f91d6fafd0735dbdcee1ac78fc8fbaba76b4 \ + --hash=sha256:d299797d75cd747e7797b1b41817111406b8b10a4f88b6e8fe5b5e59598b43b0 \ + --hash=sha256:e04b622bb8a88f10e439084486f2f6349bf4d50605ac3e445869c7ea5cf0fa8c \ + --hash=sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f \ + --hash=sha256:e21f66748ab725ade40fa7af8ec8b5019c68ab00b929f6643e1b1af461eddb60 \ + --hash=sha256:eb60b026d8ad0c97917cb81d3662d0b39b8ff1335e3fabb24984c6acd0c900a2 \ + --hash=sha256:f021d334f2ca692523aaf7bbf7592ceff70c8594fad853416a81d66b35e3abf9 \ + --hash=sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33 # via jupyter-cache stack-data==0.6.3 \ --hash=sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9 \ @@ -1490,9 +1483,9 @@ uc-micro-py==1.0.3 \ --hash=sha256:d321b92cff673ec58027c04015fcaa8bb1e005478643ff4a500882eaab88c48a \ --hash=sha256:db1dffff340817673d7b466ec86114a9dc0e9d4d9b5ba229d9d60e5c12600cd5 # via linkify-it-py -urllib3==2.2.2 \ - --hash=sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472 \ - --hash=sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168 +urllib3==2.2.3 \ + --hash=sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac \ + --hash=sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9 # via # -c requirements/main.txt # documenteer @@ -1502,7 +1495,7 @@ wcwidth==0.2.13 \ --hash=sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859 \ --hash=sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5 # via prompt-toolkit -zipp==3.20.1 \ - --hash=sha256:9960cd8967c8f85a56f920d5d507274e74f9ff813a0ab8889a5b5be2daf44064 \ - --hash=sha256:c22b14cc4763c5a5b04134207736c107db42e9d3ef2d9779d465f5f1bcba572b +zipp==3.20.2 \ + --hash=sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350 \ + --hash=sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29 # via importlib-metadata diff --git a/requirements/main.txt b/requirements/main.txt index 2bbd0d4a85..f8cb7df176 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -249,9 +249,9 @@ cryptography==43.0.1 \ # phalanx (pyproject.toml) # pyjwt # safir -fastapi==0.114.1 \ - --hash=sha256:1d7bbbeabbaae0acb0c22f0ab0b040f642d3093ca3645f8c876b6f91391861d8 \ - --hash=sha256:5d4746f6e4b7dff0b4f6b6c6d5445645285f662fe75886e99af7ee2d6b58bb3e +fastapi==0.114.2 \ + --hash=sha256:0adb148b62edb09e8c6eeefa3ea934e8f276dabc038c5a82989ea6346050c3da \ + --hash=sha256:44474a22913057b1acb973ab90f4b671ba5200482e7622816d79105dcece1ac5 # via safir gidgethub==5.3.0 \ --hash=sha256:4dd92f2252d12756b13f9dd15cde322bfb0d625b6fb5d680da1567ec74b462c0 \ @@ -283,9 +283,9 @@ hvac==2.3.0 \ --hash=sha256:1b85e3320e8642dd82f234db63253cda169a817589e823713dc5fca83119b1e2 \ --hash=sha256:a3afc5710760b6ee9b3571769df87a0333da45da05a5f9f963e1d3925a84be7d # via phalanx (pyproject.toml) -idna==3.8 \ - --hash=sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac \ - --hash=sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603 +idna==3.10 \ + --hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \ + --hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 # via # anyio # requests @@ -535,13 +535,13 @@ rfc3986==1.5.0 \ --hash=sha256:270aaf10d87d0d4e095063c65bf3ddbc6ee3d0b226328ce21e036f946e421835 \ --hash=sha256:a86d6e1f5b1dc238b218b012df0aa79409667bb209e58da56d0b94704e712a97 # via httpx -safir==6.3.0 \ - --hash=sha256:2fcd64bf37dd42eacedd6378341b2487cd06dbaf1f28403301b8d80f60a4fb56 \ - --hash=sha256:6ad7dad520d87d853628849ef95a348c55dbd0180ad3f15c1cf2f7f8fe32f915 +safir==6.4.0 \ + --hash=sha256:ba7af071eab0d198e6e15a2117028566f3f4237e02e2278e8bfc2633a7c68228 \ + --hash=sha256:f38c3f1d7d76d304984b572288826510e5c7a0e1f965b2eabdd7f3bace07c48a # via phalanx (pyproject.toml) -safir-logging==6.3.0 \ - --hash=sha256:491dfe85de89a3f2daa29c491a22a0551f0961444490418d91ec50c040ae16eb \ - --hash=sha256:e14754ab0bba6cfa248c3fc4cb5ca28410d97ff3965e831eab6581ed37485e79 +safir-logging==6.4.0 \ + --hash=sha256:4031a430d738b8fe5bfd29125dce6cbf4e4949879307ba4146648afa3d24cd0a \ + --hash=sha256:e2dbf0b5d9dabecd70c27bff9bf01629bf0724b05b0f0087a1fe4f45c702215f # via safir six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ @@ -581,7 +581,7 @@ uritemplate==4.1.1 \ --hash=sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0 \ --hash=sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e # via gidgethub -urllib3==2.2.2 \ - --hash=sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472 \ - --hash=sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168 +urllib3==2.2.3 \ + --hash=sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac \ + --hash=sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9 # via requests diff --git a/requirements/tox.txt b/requirements/tox.txt index 5a2eb7afd2..0db59a8534 100644 --- a/requirements/tox.txt +++ b/requirements/tox.txt @@ -33,9 +33,9 @@ packaging==24.1 \ # pyproject-api # tox # tox-uv -platformdirs==4.3.2 \ - --hash=sha256:9e5e27a08aa095dd127b9f2e764d74254f482fef22b0970773bfba79d091ab8c \ - --hash=sha256:eb1c8582560b34ed4ba105009a4badf7f6f85768b30126f351328507b2beb617 +platformdirs==4.3.3 \ + --hash=sha256:50a5450e2e84f44539718293cbb1da0a0885c9d14adf21b77bae4e66fc99d9b5 \ + --hash=sha256:d4e0b7d8ec176b341fb03cb11ca12d0276faa8c485f9cd218f613840463fc2c0 # via # -c requirements/dev.txt # tox @@ -60,25 +60,25 @@ tox-uv==1.11.3 \ --hash=sha256:316f559ae5525edec12791d9e1f393e405ded5b7e7d50fbaee4726676951f49a \ --hash=sha256:d434787406ff2854600c1ceaa555519080026208cf7f65bb5d4b2d7c9c4776de # via -r requirements/tox.in -uv==0.4.9 \ - --hash=sha256:0340d2c7bf9afe0098e3301c1885de10e317232cfa346f0ac16374cee284a4cb \ - --hash=sha256:060af185481ef46ab97008cad330f3cd7a7aa1ce3d219b67d27c5a2a551ac2ea \ - --hash=sha256:1a8acc7abb2174bd3c8f5fc98345f2bb602f31b7558e37f3d23bef99ddd58dec \ - --hash=sha256:34bce9f4892130b01a7605d27bbeb71395e9b031d793123c250b79187ee307ca \ - --hash=sha256:45bf0cead2436b1977f71669e945db19990ca70a7765111fb951545815467bb6 \ - --hash=sha256:52101bc8652b4284b78fac52ed7878f3bae414bc4076c377735962666b309dde \ - --hash=sha256:5422680436f4cebef945bb2e562e01c02a4fa0a95f85d1b8010f2ee868a0b8c1 \ - --hash=sha256:55cf2522262ef663114bda5d80375ddc7f7af0d054df89426372a0d494380875 \ - --hash=sha256:566d4d7a475aacd21dbb4aba053cd4f4f52d65acdef2c83c59bcdff08756701e \ - --hash=sha256:5b66a52cb60a2882a882bc5f13afa6daf3172a54fe9fb998529d19418d5aed18 \ - --hash=sha256:630a6fe215829f734278e618c1633c2bb88ee03dc6a92ae9890fabd98ee810a9 \ - --hash=sha256:69529b6bf5de6ec8fbe8e022f5bcbaef778e76136fc37fae6ec7a8b18b3f9024 \ - --hash=sha256:71e87038fcc9f61b2d6f66c4a92354c6d0abe4baae21bb90241693f161ddeaa1 \ - --hash=sha256:8869637ea6231f66fe643be22f9334874db3496844b3d8bfd8efd4227ded3d44 \ - --hash=sha256:9c9b70f016f28cc05633b564d8690cfdb7ebac4d2210d9158819947841e00347 \ - --hash=sha256:b54a9022e9e1fdbf3ae15ef340a0d1d1847dd739df5023896aa8d97d88af1efe \ - --hash=sha256:bf834f7f360a192372d879eda86f6a1dd94195faf68154dcf7c90247098d2bb2 \ - --hash=sha256:f50cbdfbc8399e1211c580e47f42650a184541ee398af95ad29bf9a2e977baba +uv==0.4.10 \ + --hash=sha256:0784f75093a75390d8d480cc8a444516e78f08849db9a13c21791a5f651df4a1 \ + --hash=sha256:0f8b9ba4ecfbea343a00e46d509669606e55fe233d800752c4c25650473df358 \ + --hash=sha256:1b6b6c6b8cc0c4e54ab25e3b46e49d1e583e26c194572eb42bfeebf71b39cca2 \ + --hash=sha256:1ff5130b6f3af79c4e47f63db03215aed15e78cb4f1f51682af6f9949c2bcf00 \ + --hash=sha256:2ff29a2f55a697e78d787a41ab41d4b26421d200728289b88b6241d3b486c436 \ + --hash=sha256:30d1f8348a2b18e21a35c97ce42528781f242d0303881fc92fbacdcb653c8bca \ + --hash=sha256:3be73788db9ceacb94a521cf67ca5cc08bac512aef71145b904ab62a3acabdae \ + --hash=sha256:444e1cdb36d7ef103e52185f918800527c255dc369c9f90eb1f198dfa3f4d5bc \ + --hash=sha256:6ba1cc3070e5c63ce0a1421fbed28bd1b3ff520671d7badda11a501504c78394 \ + --hash=sha256:8fa510dfbbde4f8ad5cd2769568c7b0c3e867b74deaf4beabcca79e74e7550cc \ + --hash=sha256:97a1187e11a9df70d55bc577721ad4a19441cda56e4d69fb2f38d88c7650d2a0 \ + --hash=sha256:99954a94dd6c4bff8a9a963c05bc3988214ea39e7511a52fda35112e1a478447 \ + --hash=sha256:a9dc1f8fca5c4a2f73054d9f56c7397e9fc6ba43baefc503d6f0128d72ea662f \ + --hash=sha256:b89dfd213359a23797155ff8175e5202ed6b84aadeb20df92132127608d46acf \ + --hash=sha256:bc87d6c581cfed0979e0f5ee93383d46006c6d4a5e4eb9f43ef13bce61b50cc2 \ + --hash=sha256:bc99e6b45303f0881a8dc199f0b7ea8261dd1779e576e8477a7721ceeeaafcc7 \ + --hash=sha256:e99e3f761875962942e0743b868bd666021d5e14c3df494e820ef8f45fb88578 \ + --hash=sha256:ff9046a8c5e836e892ac7741e672ee016e92e55c659fa8195595df65a1f3accf # via tox-uv virtualenv==20.26.4 \ --hash=sha256:48f2695d9809277003f30776d155615ffc11328e6a0a8c1f0ec80188d7874a55 \ diff --git a/tox.ini b/tox.ini index 87733c00bd..0a26029174 100644 --- a/tox.ini +++ b/tox.ini @@ -45,13 +45,6 @@ deps = pre-commit commands = pre-commit run --all-files -[testenv:neophile-update] -description = Run neophile to update dependencies -skip_install = true -deps = - neophile -commands = neophile update {posargs} - [testenv:phalanx-lint-change] description = Lint application chart changes determined by Git commands = From da531a97b17c3ea72234202d7d63ce4d691288b2 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 16 Sep 2024 17:17:58 -0700 Subject: [PATCH 081/193] Fix spelling of updateSchema vo-cutouts setting --- applications/vo-cutouts/values-idfdev.yaml | 1 - applications/vo-cutouts/values-idfint.yaml | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/applications/vo-cutouts/values-idfdev.yaml b/applications/vo-cutouts/values-idfdev.yaml index 1ca562074c..d65f4f8bbe 100644 --- a/applications/vo-cutouts/values-idfdev.yaml +++ b/applications/vo-cutouts/values-idfdev.yaml @@ -1,7 +1,6 @@ config: serviceAccount: "vo-cutouts@science-platform-dev-7696.iam.gserviceaccount.com" storageBucketUrl: "gs://rubin-cutouts-dev-us-central1-output/" - upgradeSchema: true cloudsql: enabled: true diff --git a/applications/vo-cutouts/values-idfint.yaml b/applications/vo-cutouts/values-idfint.yaml index faca2b18da..9239f30c7d 100644 --- a/applications/vo-cutouts/values-idfint.yaml +++ b/applications/vo-cutouts/values-idfint.yaml @@ -1,7 +1,7 @@ config: serviceAccount: "vo-cutouts@science-platform-int-dc5d.iam.gserviceaccount.com" storageBucketUrl: "gs://rubin-cutouts-int-us-central1-output/" - upgradeSchema: true + updateSchema: true cloudsql: enabled: true From c44a5aa41579edc4e84f85f7fc629710fd6d6250 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Tue, 17 Sep 2024 09:56:12 -0400 Subject: [PATCH 082/193] Increase memory for Times Square redis We're experiencing OOMKilled with 2Gi memory limits for the Times Square redis on usdf-rsp-dev --- applications/times-square/values.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/times-square/values.yaml b/applications/times-square/values.yaml index 1a26a01356..e6cdc61f51 100644 --- a/applications/times-square/values.yaml +++ b/applications/times-square/values.yaml @@ -200,10 +200,10 @@ redis: resources: limits: cpu: "1" - memory: "2Gi" + memory: "4Gi" requests: cpu: "6m" - memory: "50Mi" + memory: "1Gi" # -- Pod annotations for the Redis pod podAnnotations: {} From 0690c25921e4115f150485885e4f557deef00fdd Mon Sep 17 00:00:00 2001 From: Dan Fuchs Date: Tue, 17 Sep 2024 12:13:41 -0500 Subject: [PATCH 083/193] DM-45522: Enable strimzi-access-operator in idfdev --- applications/strimzi-access-operator/values-idfdev.yaml | 0 environments/values-idfdev.yaml | 1 + 2 files changed, 1 insertion(+) create mode 100644 applications/strimzi-access-operator/values-idfdev.yaml diff --git a/applications/strimzi-access-operator/values-idfdev.yaml b/applications/strimzi-access-operator/values-idfdev.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/environments/values-idfdev.yaml b/environments/values-idfdev.yaml index c9b76b5e85..b0a52056de 100644 --- a/environments/values-idfdev.yaml +++ b/environments/values-idfdev.yaml @@ -29,6 +29,7 @@ applications: squareone: true sqlproxy-cross-project: true strimzi: true + strimzi-access-operator: true tap: true telegraf: true telegraf-ds: true From 8342bd28af7ea7ff2c1b0c0499b985a0d72c6ff9 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 17 Sep 2024 17:09:03 -0700 Subject: [PATCH 084/193] Enable OpenID Connect on idfprod Enable the Gafaelfawr OpenID Connect server on idfprod and configure it with a data rights mapping. Add documentation for how to add new OpenID Connect clients. --- applications/gafaelfawr/values-idfprod.yaml | 8 ++ .../gafaelfawr/add-oidc-client.rst | 102 ++++++++++++++++++ docs/applications/gafaelfawr/index.rst | 3 +- 3 files changed, 112 insertions(+), 1 deletion(-) create mode 100644 docs/applications/gafaelfawr/add-oidc-client.rst diff --git a/applications/gafaelfawr/values-idfprod.yaml b/applications/gafaelfawr/values-idfprod.yaml index ef48fe0314..f9148ef05d 100644 --- a/applications/gafaelfawr/values-idfprod.yaml +++ b/applications/gafaelfawr/values-idfprod.yaml @@ -28,6 +28,14 @@ config: firestore: project: "rsp-firestore-stable-e8eb" + # This environment provides authentication services to IDACs. + oidcServer: + enabled: true + dataRightsMapping: + g_users: + - "dp0.2" + - "dp0.3" + # Support generating user metadata for CADC authentication code. cadcBaseUuid: "5f0eb655-0e72-4948-a6a5-a94c0be9019f" diff --git a/docs/applications/gafaelfawr/add-oidc-client.rst b/docs/applications/gafaelfawr/add-oidc-client.rst new file mode 100644 index 0000000000..013319b66c --- /dev/null +++ b/docs/applications/gafaelfawr/add-oidc-client.rst @@ -0,0 +1,102 @@ +############################# +Add new OpenID Connect client +############################# + +Gafaelfawr can also serve as an OpenID Connect server, allowing third-party applications running inside Phalanx and OpenID Connect clients outside of Phalanx environments to authenticate users in the same way that the Science Platform does. + +Each OpenID Connect client of Gafaelfawr must be pre-registered and assigned a ``client_id`` and password. +To complete an authentication, the client must authenticate with that ``client_id`` and password. +See `the Gafaelfawr documentation `__. + +This page describes how to register a new client of Gafaelfawr. +You will need the following information: + +* The Phalanx environment to which you'll be adding the new client. +* A short, human-readable name of the new client you're adding. +* The return URL to which the user will be sent after authentication. + +.. note:: + + The instructions here are specific to SQuaRE-managed Phalanx environments. + For other environments, you can update the ``oidc-server-secrets`` Gafaelfawr secret key however you maintain static secrets. + +Add secret +========== + +OpenID Connect clients are configured in the ``oidc-server-secrets`` key of the ``gafaelfawr`` secret. +The value of this key is, unfortunately, a JSON representation of all of the clients. +We currently maintain two parallel records of the clients, one in a structured 1Password secret that is not currently used, and separately in the ``gafaelfawr`` secret. +The goal is to eventually add automation to Phalanx to generate the latter from the former. + +#. Open 1Password. + Go to the 1Password vault for static secrets for the Phalanx environment where you want to add an OpenID Connect client. + +#. Create or edit an item named ``oidc-clients``. + If it doesn't already exist, create it as an item of type :menuselection:`Server`. + +#. Add a new section for the new client. + Set the section title to a short, human-readable name for the OpenID Connect client. + This name should be enough to tell someone looking at this secret what this client is used for. + +#. Add a text field to the new section. + Change the label to ``id``. + Change the contents to :samp:`{random-id}.clients.{fqdn}` where the random ID is the results of ``os.urandom(16).hex()`` in Python and the FQDN is the FQDN of the environment. + For example, ``de5dd2c1fbf648e11d50b6cf3aa72277.clients.data.lsst.cloud``. + +#. Add a password field to the new section, leaving the label as ``password``. + You can let 1Password generate a random 20-character password if you want, or generate one of equivalent entropy however you choose. + +#. Add a final text field to the new section. + Change the label to ``return_uri``. + Set the value to the return URL of the client. + This should be provided by the OpenID Connect client and will be the URL to which the user is sent after authentication. + +#. Now, you will need to copy this data into the ``gafaelfawr`` secret under the ``oidc-server-secrets`` key, creating that key if it doesn't already exist. + Unfortunately, you currently have to construct the JSON by hand. + The value of this key should be a JSON-encoded list of objects, and each object should have keys ``id``, ``password``, and ``return_uri`` with the information above. + Be sure to include all the clients, not just the new one that you're adding. + +Share the secret with the client +================================ + +You now need to convey the ``client_id`` (the ``id`` value above) and the ``client_secret`` (the ``password`` value above) to the OpenID Connect client. +They will need to configure their client software to use that ``client_id`` and ``client_secret`` whenever performing an OpenID Connect authentication. + +The easiest way to do this is often to create a separate 1Password secret and share it with the client. + +.. warning:: + + **DO NOT SHARE THE SECRETS CREATED ABOVE.** + The client should not have access to the ``oidc-clients`` or ``gafaelfawr`` secrets. + +#. Go to the SQuaRE vault and create a new secret. + Use a name like ``Gafaelfawr OIDC``, replacing ```` with a *short* human-readable name for the client. + Use the :menuselection:`Server` item type. + +#. Add the information above. + It's best to call the fields ``client_id``, ``client_secret``, and ``return_uri``, since those are the field names in the OpenID Connect standard and therefore what is usually used in software documentation. + Enter the same information as above. + +When sharing with someone who is managing multiple related clients, feel free to put all of the secrets in the same 1Password item in separate sections. + +Now, you can create a one-time 1Password link for this secret and share it with the user in Slack or via email. + +Configure Gafaelfawr +==================== + +If this is the first OpenID Connect client for Gafaelfawr, you will need to enable OpenID Connect server support. +Do this by setting ``config.oidcServer.enabled`` to true in the Gafaelfawr :file:`values-{environment}.yaml` file. +See `the Gafaelfawr documentation `__ for more details. + +If the purpose of this OpenID Connect client is to provide services to an IDAC or another external client that may need data rights information (see :dmtn:`253`), ensure the configuration of the Gafaelfawr OpenID Connect server is correct and has a ``dataRightsMapping`` setting. +See `the Gafaelfawr documentation `__ for more information. + +Then, whether or not you needed to make configuration changes, you will need to sync secrets for this environment. +Follow the normal process (:doc:`/admin/sync-secrets`) to do that. + +Finally, you will need to restart Gafaelfawr to pick up the new secret. +Do this by selecting :menuselection:`Restart` on the deployment in Argo CD (see :ref:`branch-deploy-restart`). + +.. note:: + + Since this requires a Gafaelfawr restart, and since you are changing a secret that contains manually-formatted JSON that is prone to syntax errors that will prevent Gafaelfawr from starting, you will normally want to do this during a maintenance window for a production environment. diff --git a/docs/applications/gafaelfawr/index.rst b/docs/applications/gafaelfawr/index.rst index 93546861eb..f921f7e1f2 100644 --- a/docs/applications/gafaelfawr/index.rst +++ b/docs/applications/gafaelfawr/index.rst @@ -8,7 +8,7 @@ Gafaelfawr provides authentication and identity management services for the Rubi It is primarily used as an NGINX ``auth_request`` handler configured via annotations on the ``Ingress`` resources of Science Platform services. In that role, it requires a user have the required access scope to use that service, rejects users who do not have that scope, and redirects users who are not authenticated to the authentication process. -Gafaelfawr supports authentication via either OpenID Connect (often through CILogon_ or GitHub). +Gafaelfawr supports authentication via either OpenID Connect (often through CILogon_) or GitHub. Gafaelfawr also provides a token management API and (currently) UI for users of the Science Platform. @@ -24,6 +24,7 @@ Guides bootstrap manage-schema recreate-token + add-oidc-client github-organizations troubleshoot values From 57cb3da173f9c27d52d7fec3e3e617d4c1bbb675 Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Fri, 9 Aug 2024 11:42:53 -0700 Subject: [PATCH 085/193] Remove float support from Prompt Processing timeouts. Gunicorn and Knative both require that all timeouts be integers, so it does not make sense to force floating-point math. --- charts/prompt-proto-service/templates/prompt-proto-service.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/prompt-proto-service/templates/prompt-proto-service.yaml b/charts/prompt-proto-service/templates/prompt-proto-service.yaml index 690c5b34aa..326a3e523f 100644 --- a/charts/prompt-proto-service/templates/prompt-proto-service.yaml +++ b/charts/prompt-proto-service/templates/prompt-proto-service.yaml @@ -47,7 +47,7 @@ spec: - name: WORKER_GRACE_PERIOD value: {{ .Values.worker.grace_period | toString | quote }} {{- /* Knative not configured for timeouts longer than 1200 seconds, and shouldn't need to be. */ -}} - {{- $knative_timeout := minf 1200 (addf (mulf 2 (coalesce .Values.worker.timeout 600)) .Values.knative.extraTimeout) }} + {{- $knative_timeout := min 1200 (add (mul 2 (coalesce .Values.worker.timeout 600)) .Values.knative.extraTimeout) }} - name: RUBIN_INSTRUMENT value: {{ .Values.instrument.name }} - name: PREPROCESSING_PIPELINES_CONFIG From c368d22f7c0e6b354f256b13871e0f2634760d25 Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Fri, 9 Aug 2024 12:06:51 -0700 Subject: [PATCH 086/193] Support multiple workers per Prompt Processing container. The existing `containerConcurrency` flag is now exposed in the container as an environment variable, and can be used to configure workers. I've kept the existing resource requests as per-pod, not per-worker, because managing units like "8Gi" in the template would become messy. --- .../prompt-proto-service-hsc-gpu/README.md | 15 ++++++++------- .../prompt-proto-service-hsc-gpu/values.yaml | 15 ++++++++++----- applications/prompt-proto-service-hsc/README.md | 15 ++++++++------- applications/prompt-proto-service-hsc/values.yaml | 15 ++++++++++----- .../prompt-proto-service-latiss/README.md | 15 ++++++++------- .../prompt-proto-service-latiss/values.yaml | 15 ++++++++++----- .../prompt-proto-service-lsstcam/README.md | 15 ++++++++------- .../prompt-proto-service-lsstcam/values.yaml | 15 ++++++++++----- .../prompt-proto-service-lsstcomcam/README.md | 15 ++++++++------- .../prompt-proto-service-lsstcomcam/values.yaml | 15 ++++++++++----- .../prompt-proto-service-lsstcomcamsim/README.md | 15 ++++++++------- .../values.yaml | 15 ++++++++++----- charts/prompt-proto-service/README.md | 14 +++++++------- .../templates/prompt-proto-service.yaml | 2 ++ charts/prompt-proto-service/values.yaml | 12 +++++++----- 15 files changed, 124 insertions(+), 84 deletions(-) diff --git a/applications/prompt-proto-service-hsc-gpu/README.md b/applications/prompt-proto-service-hsc-gpu/README.md index b97ddaa42a..76ce7c399a 100644 --- a/applications/prompt-proto-service-hsc-gpu/README.md +++ b/applications/prompt-proto-service-hsc-gpu/README.md @@ -21,6 +21,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | | prompt-proto-service.cache.patchesPerImage | int | `4` | A factor by which to multiply `baseSize` for templates and other patch-based datasets. | | prompt-proto-service.cache.refcatsPerImage | int | `4` | A factor by which to multiply `baseSize` for refcat datasets. | +| prompt-proto-service.containerConcurrency | int | `1` | The number of Knative requests that can be handled simultaneously by one container | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | @@ -32,16 +33,16 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | | prompt-proto-service.instrument.skymap | string | `"hsc_rings_v1"` | Skymap to use with the instrument | -| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores. | -| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested. | -| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). | -| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). | +| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | +| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | | prompt-proto-service.knative.extraTimeout | int | `10` | To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. This parameter adds extra time to that minimum (seconds). | | prompt-proto-service.knative.gpu | bool | `true` | GPUs enabled. | -| prompt-proto-service.knative.gpuRequest | int | `1` | The number of GPUs to request. | +| prompt-proto-service.knative.gpuRequest | int | `1` | The number of GPUs to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.idleTimeout | int | `0` | Maximum time that a container can send nothing to Knative (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | -| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit. | -| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request. | +| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | diff --git a/applications/prompt-proto-service-hsc-gpu/values.yaml b/applications/prompt-proto-service-hsc-gpu/values.yaml index b8cc85249d..c838c1475a 100644 --- a/applications/prompt-proto-service-hsc-gpu/values.yaml +++ b/applications/prompt-proto-service-hsc-gpu/values.yaml @@ -121,21 +121,23 @@ prompt-proto-service: auth_env: true knative: - # -- The cpu cores requested. + # -- The cpu cores requested for the full pod (see `containerConcurrency`). cpuRequest: 1 - # -- The maximum cpu cores. + # -- The maximum cpu cores for the full pod (see `containerConcurrency`). cpuLimit: 1 # -- The storage space reserved for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageRequest: "5Gi" # -- The maximum storage space allowed for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageLimit: "5Gi" # -- GPUs enabled. gpu: true - # -- The number of GPUs to request. + # -- The number of GPUs to request for the full pod (see `containerConcurrency`). gpuRequest: 1 - # -- The minimum memory to request. + # -- The minimum memory to request for the full pod (see `containerConcurrency`). memoryRequest: "2Gi" - # -- The maximum memory limit. + # -- The maximum memory limit for the full pod (see `containerConcurrency`). memoryLimit: "8Gi" # -- To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. # This parameter adds extra time to that minimum (seconds). @@ -149,6 +151,9 @@ prompt-proto-service: # If 0, idle timeout is ignored. responseStartTimeout: 0 + # -- The number of Knative requests that can be handled simultaneously by one container + containerConcurrency: 1 + # -- Kubernetes YAML configs for extra container volume(s). # Any volumes required by other config options are automatically handled by the Helm chart. additionalVolumeMounts: [] diff --git a/applications/prompt-proto-service-hsc/README.md b/applications/prompt-proto-service-hsc/README.md index 1d6c810a2c..a463a85160 100644 --- a/applications/prompt-proto-service-hsc/README.md +++ b/applications/prompt-proto-service-hsc/README.md @@ -21,6 +21,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | | prompt-proto-service.cache.patchesPerImage | int | `4` | A factor by which to multiply `baseSize` for templates and other patch-based datasets. | | prompt-proto-service.cache.refcatsPerImage | int | `4` | A factor by which to multiply `baseSize` for refcat datasets. | +| prompt-proto-service.containerConcurrency | int | `1` | The number of Knative requests that can be handled simultaneously by one container | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | @@ -32,16 +33,16 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | | prompt-proto-service.instrument.skymap | string | `"hsc_rings_v1"` | Skymap to use with the instrument | -| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores. | -| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested. | -| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). | -| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). | +| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | +| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | | prompt-proto-service.knative.extraTimeout | int | `10` | To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. This parameter adds extra time to that minimum (seconds). | | prompt-proto-service.knative.gpu | bool | `false` | GPUs enabled. | -| prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request. | +| prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.idleTimeout | int | `0` | Maximum time that a container can send nothing to Knative (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | -| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit. | -| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request. | +| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | diff --git a/applications/prompt-proto-service-hsc/values.yaml b/applications/prompt-proto-service-hsc/values.yaml index 3f4b799c67..1361c25215 100644 --- a/applications/prompt-proto-service-hsc/values.yaml +++ b/applications/prompt-proto-service-hsc/values.yaml @@ -121,21 +121,23 @@ prompt-proto-service: auth_env: true knative: - # -- The cpu cores requested. + # -- The cpu cores requested for the full pod (see `containerConcurrency`). cpuRequest: 1 - # -- The maximum cpu cores. + # -- The maximum cpu cores for the full pod (see `containerConcurrency`). cpuLimit: 1 # -- The storage space reserved for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageRequest: "5Gi" # -- The maximum storage space allowed for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageLimit: "5Gi" # -- GPUs enabled. gpu: false - # -- The number of GPUs to request. + # -- The number of GPUs to request for the full pod (see `containerConcurrency`). gpuRequest: 0 - # -- The minimum memory to request. + # -- The minimum memory to request for the full pod (see `containerConcurrency`). memoryRequest: "2Gi" - # -- The maximum memory limit. + # -- The maximum memory limit for the full pod (see `containerConcurrency`). memoryLimit: "8Gi" # -- To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. # This parameter adds extra time to that minimum (seconds). @@ -149,6 +151,9 @@ prompt-proto-service: # If 0, idle timeout is ignored. responseStartTimeout: 0 + # -- The number of Knative requests that can be handled simultaneously by one container + containerConcurrency: 1 + # -- Kubernetes YAML configs for extra container volume(s). # Any volumes required by other config options are automatically handled by the Helm chart. additionalVolumeMounts: [] diff --git a/applications/prompt-proto-service-latiss/README.md b/applications/prompt-proto-service-latiss/README.md index 17da7029ab..579207cc66 100644 --- a/applications/prompt-proto-service-latiss/README.md +++ b/applications/prompt-proto-service-latiss/README.md @@ -21,6 +21,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | | prompt-proto-service.cache.patchesPerImage | int | `6` | A factor by which to multiply `baseSize` for templates and other patch-based datasets. | | prompt-proto-service.cache.refcatsPerImage | int | `4` | A factor by which to multiply `baseSize` for refcat datasets. | +| prompt-proto-service.containerConcurrency | int | `1` | The number of Knative requests that can be handled simultaneously by one container | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | @@ -32,16 +33,16 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | | prompt-proto-service.instrument.skymap | string | `"latiss_v1"` | Skymap to use with the instrument | -| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores. | -| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested. | -| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). | -| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). | +| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | +| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | | prompt-proto-service.knative.extraTimeout | int | `10` | To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. This parameter adds extra time to that minimum (seconds). | | prompt-proto-service.knative.gpu | bool | `false` | GPUs enabled. | -| prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request. | +| prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.idleTimeout | int | `0` | Maximum time that a container can send nothing to Knative (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | -| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit. | -| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request. | +| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | diff --git a/applications/prompt-proto-service-latiss/values.yaml b/applications/prompt-proto-service-latiss/values.yaml index 9768a1c05d..5b82a11fed 100644 --- a/applications/prompt-proto-service-latiss/values.yaml +++ b/applications/prompt-proto-service-latiss/values.yaml @@ -121,21 +121,23 @@ prompt-proto-service: auth_env: true knative: - # -- The cpu cores requested. + # -- The cpu cores requested for the full pod (see `containerConcurrency`). cpuRequest: 1 - # -- The maximum cpu cores. + # -- The maximum cpu cores for the full pod (see `containerConcurrency`). cpuLimit: 1 # -- The storage space reserved for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageRequest: "5Gi" # -- The maximum storage space allowed for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageLimit: "5Gi" # -- GPUs enabled. gpu: false - # -- The number of GPUs to request. + # -- The number of GPUs to request for the full pod (see `containerConcurrency`). gpuRequest: 0 - # -- The minimum memory to request. + # -- The minimum memory to request for the full pod (see `containerConcurrency`). memoryRequest: "2Gi" - # -- The maximum memory limit. + # -- The maximum memory limit for the full pod (see `containerConcurrency`). memoryLimit: "8Gi" # -- To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. # This parameter adds extra time to that minimum (seconds). @@ -149,6 +151,9 @@ prompt-proto-service: # If 0, idle timeout is ignored. responseStartTimeout: 0 + # -- The number of Knative requests that can be handled simultaneously by one container + containerConcurrency: 1 + # -- Kubernetes YAML configs for extra container volume(s). # Any volumes required by other config options are automatically handled by the Helm chart. additionalVolumeMounts: [] diff --git a/applications/prompt-proto-service-lsstcam/README.md b/applications/prompt-proto-service-lsstcam/README.md index 20834485da..419a466c0d 100644 --- a/applications/prompt-proto-service-lsstcam/README.md +++ b/applications/prompt-proto-service-lsstcam/README.md @@ -21,6 +21,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | | prompt-proto-service.cache.patchesPerImage | int | `4` | A factor by which to multiply `baseSize` for templates and other patch-based datasets. | | prompt-proto-service.cache.refcatsPerImage | int | `4` | A factor by which to multiply `baseSize` for refcat datasets. | +| prompt-proto-service.containerConcurrency | int | `1` | The number of Knative requests that can be handled simultaneously by one container | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | @@ -32,16 +33,16 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | | prompt-proto-service.instrument.skymap | string | `""` | Skymap to use with the instrument | -| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores. | -| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested. | -| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). | -| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). | +| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | +| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | | prompt-proto-service.knative.extraTimeout | int | `10` | To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. This parameter adds extra time to that minimum (seconds). | | prompt-proto-service.knative.gpu | bool | `false` | GPUs enabled. | -| prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request. | +| prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.idleTimeout | int | `0` | Maximum time that a container can send nothing to Knative (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | -| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit. | -| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request. | +| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | diff --git a/applications/prompt-proto-service-lsstcam/values.yaml b/applications/prompt-proto-service-lsstcam/values.yaml index 6221360a93..c0d79823c9 100644 --- a/applications/prompt-proto-service-lsstcam/values.yaml +++ b/applications/prompt-proto-service-lsstcam/values.yaml @@ -121,21 +121,23 @@ prompt-proto-service: auth_env: true knative: - # -- The cpu cores requested. + # -- The cpu cores requested for the full pod (see `containerConcurrency`). cpuRequest: 1 - # -- The maximum cpu cores. + # -- The maximum cpu cores for the full pod (see `containerConcurrency`). cpuLimit: 1 # -- The storage space reserved for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageRequest: "5Gi" # -- The maximum storage space allowed for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageLimit: "5Gi" # -- GPUs enabled. gpu: false - # -- The number of GPUs to request. + # -- The number of GPUs to request for the full pod (see `containerConcurrency`). gpuRequest: 0 - # -- The minimum memory to request. + # -- The minimum memory to request for the full pod (see `containerConcurrency`). memoryRequest: "2Gi" - # -- The maximum memory limit. + # -- The maximum memory limit for the full pod (see `containerConcurrency`). memoryLimit: "8Gi" # -- To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. # This parameter adds extra time to that minimum (seconds). @@ -149,6 +151,9 @@ prompt-proto-service: # If 0, idle timeout is ignored. responseStartTimeout: 0 + # -- The number of Knative requests that can be handled simultaneously by one container + containerConcurrency: 1 + # -- Kubernetes YAML configs for extra container volume(s). # Any volumes required by other config options are automatically handled by the Helm chart. additionalVolumeMounts: [] diff --git a/applications/prompt-proto-service-lsstcomcam/README.md b/applications/prompt-proto-service-lsstcomcam/README.md index ca625a5b66..71a9b5713d 100644 --- a/applications/prompt-proto-service-lsstcomcam/README.md +++ b/applications/prompt-proto-service-lsstcomcam/README.md @@ -21,6 +21,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | | prompt-proto-service.cache.patchesPerImage | int | `4` | A factor by which to multiply `baseSize` for templates and other patch-based datasets. | | prompt-proto-service.cache.refcatsPerImage | int | `4` | A factor by which to multiply `baseSize` for refcat datasets. | +| prompt-proto-service.containerConcurrency | int | `1` | The number of Knative requests that can be handled simultaneously by one container | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | @@ -32,16 +33,16 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | | prompt-proto-service.instrument.skymap | string | `""` | Skymap to use with the instrument | -| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores. | -| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested. | -| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). | -| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). | +| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | +| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | | prompt-proto-service.knative.extraTimeout | int | `10` | To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. This parameter adds extra time to that minimum (seconds). | | prompt-proto-service.knative.gpu | bool | `false` | GPUs enabled. | -| prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request. | +| prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.idleTimeout | int | `0` | Maximum time that a container can send nothing to Knative (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | -| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit. | -| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request. | +| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | diff --git a/applications/prompt-proto-service-lsstcomcam/values.yaml b/applications/prompt-proto-service-lsstcomcam/values.yaml index 67fc0978a9..83d6a9616b 100644 --- a/applications/prompt-proto-service-lsstcomcam/values.yaml +++ b/applications/prompt-proto-service-lsstcomcam/values.yaml @@ -121,21 +121,23 @@ prompt-proto-service: auth_env: true knative: - # -- The cpu cores requested. + # -- The cpu cores requested for the full pod (see `containerConcurrency`). cpuRequest: 1 - # -- The maximum cpu cores. + # -- The maximum cpu cores for the full pod (see `containerConcurrency`). cpuLimit: 1 # -- The storage space reserved for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageRequest: "5Gi" # -- The maximum storage space allowed for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageLimit: "5Gi" # -- GPUs enabled. gpu: false - # -- The number of GPUs to request. + # -- The number of GPUs to request for the full pod (see `containerConcurrency`). gpuRequest: 0 - # -- The minimum memory to request. + # -- The minimum memory to request for the full pod (see `containerConcurrency`). memoryRequest: "2Gi" - # -- The maximum memory limit. + # -- The maximum memory limit for the full pod (see `containerConcurrency`). memoryLimit: "8Gi" # -- To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. # This parameter adds extra time to that minimum (seconds). @@ -149,6 +151,9 @@ prompt-proto-service: # If 0, idle timeout is ignored. responseStartTimeout: 0 + # -- The number of Knative requests that can be handled simultaneously by one container + containerConcurrency: 1 + # -- Kubernetes YAML configs for extra container volume(s). # Any volumes required by other config options are automatically handled by the Helm chart. additionalVolumeMounts: [] diff --git a/applications/prompt-proto-service-lsstcomcamsim/README.md b/applications/prompt-proto-service-lsstcomcamsim/README.md index 55d6b814c6..0bf22395e9 100644 --- a/applications/prompt-proto-service-lsstcomcamsim/README.md +++ b/applications/prompt-proto-service-lsstcomcamsim/README.md @@ -21,6 +21,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | | prompt-proto-service.cache.patchesPerImage | int | `16` | A factor by which to multiply `baseSize` for templates and other patch-based datasets. | | prompt-proto-service.cache.refcatsPerImage | int | `6` | A factor by which to multiply `baseSize` for refcat datasets. | +| prompt-proto-service.containerConcurrency | int | `1` | The number of Knative requests that can be handled simultaneously by one container | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | @@ -32,16 +33,16 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | | prompt-proto-service.instrument.skymap | string | `"ops_rehersal_prep_2k_v1"` | Skymap to use with the instrument | -| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores. | -| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested. | -| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). | -| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). | +| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | +| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | | prompt-proto-service.knative.extraTimeout | int | `10` | To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. This parameter adds extra time to that minimum (seconds). | | prompt-proto-service.knative.gpu | bool | `false` | GPUs enabled. | -| prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request. | +| prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.idleTimeout | int | `0` | Maximum time that a container can send nothing to Knative (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | -| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit. | -| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request. | +| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | diff --git a/applications/prompt-proto-service-lsstcomcamsim/values.yaml b/applications/prompt-proto-service-lsstcomcamsim/values.yaml index 47815b63cf..ae5879d20a 100644 --- a/applications/prompt-proto-service-lsstcomcamsim/values.yaml +++ b/applications/prompt-proto-service-lsstcomcamsim/values.yaml @@ -121,21 +121,23 @@ prompt-proto-service: auth_env: true knative: - # -- The cpu cores requested. + # -- The cpu cores requested for the full pod (see `containerConcurrency`). cpuRequest: 1 - # -- The maximum cpu cores. + # -- The maximum cpu cores for the full pod (see `containerConcurrency`). cpuLimit: 1 # -- The storage space reserved for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageRequest: "5Gi" # -- The maximum storage space allowed for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageLimit: "5Gi" # -- GPUs enabled. gpu: false - # -- The number of GPUs to request. + # -- The number of GPUs to request for the full pod (see `containerConcurrency`). gpuRequest: 0 - # -- The minimum memory to request. + # -- The minimum memory to request for the full pod (see `containerConcurrency`). memoryRequest: "2Gi" - # -- The maximum memory limit. + # -- The maximum memory limit for the full pod (see `containerConcurrency`). memoryLimit: "8Gi" # -- To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. # This parameter adds extra time to that minimum (seconds). @@ -149,6 +151,9 @@ prompt-proto-service: # If 0, idle timeout is ignored. responseStartTimeout: 0 + # -- The number of Knative requests that can be handled simultaneously by one container + containerConcurrency: 1 + # -- Kubernetes YAML configs for extra container volume(s). # Any volumes required by other config options are automatically handled by the Helm chart. additionalVolumeMounts: [] diff --git a/charts/prompt-proto-service/README.md b/charts/prompt-proto-service/README.md index 5f3f2efadb..03390726d6 100644 --- a/charts/prompt-proto-service/README.md +++ b/charts/prompt-proto-service/README.md @@ -36,16 +36,16 @@ Event-driven processing of camera images | instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits' raws. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | | instrument.skymap | string | `""` | Skymap to use with the instrument | -| knative.cpuLimit | int | `1` | The maximum cpu cores. | -| knative.cpuRequest | int | `1` | The cpu cores requested. | -| knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). | -| knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). | +| knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | +| knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | +| knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | +| knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | | knative.extraTimeout | int | `10` | To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. This parameter adds extra time to that minimum (seconds). | | knative.gpu | bool | `false` | GPUs enabled. | -| knative.gpuRequest | int | `0` | The number of GPUs to request. | +| knative.gpuRequest | int | `0` | The number of GPUs to request for the full pod (see `containerConcurrency`). | | knative.idleTimeout | int | `0` | Maximum time that a container can send nothing to Knative (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | -| knative.memoryLimit | string | `"8Gi"` | The maximum memory limit. | -| knative.memoryRequest | string | `"2Gi"` | The minimum memory to request. | +| knative.memoryLimit | string | `"8Gi"` | The maximum memory limit for the full pod (see `containerConcurrency`). | +| knative.memoryRequest | string | `"2Gi"` | The minimum memory to request for the full pod (see `containerConcurrency`). | | knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, startup timeout is ignored. | | logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | nameOverride | string | `""` | Override the base name for resources | diff --git a/charts/prompt-proto-service/templates/prompt-proto-service.yaml b/charts/prompt-proto-service/templates/prompt-proto-service.yaml index 326a3e523f..841be47dcb 100644 --- a/charts/prompt-proto-service/templates/prompt-proto-service.yaml +++ b/charts/prompt-proto-service/templates/prompt-proto-service.yaml @@ -40,6 +40,8 @@ spec: imagePullPolicy: {{ .Values.image.pullPolicy | quote }} name: user-container env: + - name: WORKER_COUNT + value: {{ .Values.containerConcurrency | toString | quote }} - name: WORKER_RESTART_FREQ value: {{ .Values.worker.restart | toString | quote }} - name: WORKER_TIMEOUT diff --git a/charts/prompt-proto-service/values.yaml b/charts/prompt-proto-service/values.yaml index 7751ab89ea..954c50e7d1 100644 --- a/charts/prompt-proto-service/values.yaml +++ b/charts/prompt-proto-service/values.yaml @@ -124,21 +124,23 @@ sasquatch: auth_env: true knative: - # -- The cpu cores requested. + # -- The cpu cores requested for the full pod (see `containerConcurrency`). cpuRequest: 1 - # -- The maximum cpu cores. + # -- The maximum cpu cores for the full pod (see `containerConcurrency`). cpuLimit: 1 # -- The storage space reserved for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageRequest: "5Gi" # -- The maximum storage space allowed for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageLimit: "5Gi" # -- GPUs enabled. gpu: false - # -- The number of GPUs to request. + # -- The number of GPUs to request for the full pod (see `containerConcurrency`). gpuRequest: 0 - # -- The minimum memory to request. + # -- The minimum memory to request for the full pod (see `containerConcurrency`). memoryRequest: "2Gi" - # -- The maximum memory limit. + # -- The maximum memory limit for the full pod (see `containerConcurrency`). memoryLimit: "8Gi" # -- To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. # This parameter adds extra time to that minimum (seconds). From d506dc23ddb680fe17036d0f74753b19bedd1321 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Wed, 18 Sep 2024 10:32:06 -0700 Subject: [PATCH 087/193] afausti dev alert-stream-broker rbac --- applications/argocd/values-usdfdev-alert-stream-broker.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/argocd/values-usdfdev-alert-stream-broker.yaml b/applications/argocd/values-usdfdev-alert-stream-broker.yaml index 482984f9b7..8298470022 100644 --- a/applications/argocd/values-usdfdev-alert-stream-broker.yaml +++ b/applications/argocd/values-usdfdev-alert-stream-broker.yaml @@ -33,6 +33,7 @@ argo-cd: g, smart@slac.stanford.edu, role:admin g, ebellm@slac.stanford.edu, role:admin g, hchiang2@slac.stanford.edu, role:admin + g, afausti@slac.stanford.edu, role:admin scopes: "[email]" server: From 9102bc2458431ec36846a1466b23b95c709b6bca Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Tue, 17 Sep 2024 17:33:16 -0400 Subject: [PATCH 088/193] Create KafkaUser for templatebot - Make consumer group a prefix rule because faststream seems to need each consumer to have a different group ID. - Drop access to unneeded topics --- .../templates/templatebot-user.yaml | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 applications/sasquatch/charts/square-events/templates/templatebot-user.yaml diff --git a/applications/sasquatch/charts/square-events/templates/templatebot-user.yaml b/applications/sasquatch/charts/square-events/templates/templatebot-user.yaml new file mode 100644 index 0000000000..fb46b65e2b --- /dev/null +++ b/applications/sasquatch/charts/square-events/templates/templatebot-user.yaml @@ -0,0 +1,44 @@ +--- +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaUser +metadata: + name: templatebot + labels: + strimzi.io/cluster: {{ .Values.cluster.name }} +spec: + template: + secret: + metadata: + annotations: + replicator.v1.mittwald.de/replication-allowed: "true" + replicator.v1.mittwald.de/replication-allowed-namespaces: "templatebot" + authentication: + type: tls + authorization: + type: simple + acls: + - resource: + type: group + name: "templatebot" + patternType: prefix + operations: + - "Read" + host: "*" + - resource: + type: topic + name: "lsst.square-events.squarebot.slack.app.mention" + patternType: literal + type: allow + host: "*" + operations: + - "Read" + - "Describe" + - resource: + type: topic + name: "lsst.square-events.squarebot.slack.message.im" + patternType: literal + type: allow + host: "*" + operations: + - "Read" + - "Describe" From f55be4988f72e58ec8c5318f466e3a92898a1cf9 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Tue, 17 Sep 2024 17:34:01 -0400 Subject: [PATCH 089/193] Add Phalanx app for templatebot This adds [templatebot](https://github.com/lsst-sqre/templatebot) into Phalanx for deployment with the modern Roundtable clusters. Templatebot works with the Squarebot message bus, so it works similarly to apps like Unfurlbot. --- applications/templatebot/.helmignore | 23 ++++ applications/templatebot/Chart.yaml | 8 ++ applications/templatebot/README.md | 30 +++++ applications/templatebot/secrets.yaml | 26 ++++ .../templatebot/templates/_helpers.tpl | 26 ++++ .../templatebot/templates/configmap.yaml | 13 ++ .../templatebot/templates/deployment.yaml | 111 ++++++++++++++++++ .../templatebot/templates/kafkaaccess.yaml | 14 +++ .../templatebot/templates/networkpolicy.yaml | 21 ++++ .../templatebot/templates/service.yaml | 15 +++ .../templatebot/templates/vaultsecret.yaml | 9 ++ .../templatebot/values-roundtable-dev.yaml | 5 + .../templatebot/values-roundtable-prod.yaml | 0 applications/templatebot/values.yaml | 70 +++++++++++ docs/applications/roundtable.rst | 1 + docs/applications/templatebot/index.rst | 16 +++ docs/applications/templatebot/values.md | 12 ++ environments/README.md | 1 + .../applications/roundtable/templatebot.yaml | 34 ++++++ environments/values-roundtable-dev.yaml | 1 + environments/values.yaml | 3 + 21 files changed, 439 insertions(+) create mode 100644 applications/templatebot/.helmignore create mode 100644 applications/templatebot/Chart.yaml create mode 100644 applications/templatebot/README.md create mode 100644 applications/templatebot/secrets.yaml create mode 100644 applications/templatebot/templates/_helpers.tpl create mode 100644 applications/templatebot/templates/configmap.yaml create mode 100644 applications/templatebot/templates/deployment.yaml create mode 100644 applications/templatebot/templates/kafkaaccess.yaml create mode 100644 applications/templatebot/templates/networkpolicy.yaml create mode 100644 applications/templatebot/templates/service.yaml create mode 100644 applications/templatebot/templates/vaultsecret.yaml create mode 100644 applications/templatebot/values-roundtable-dev.yaml create mode 100644 applications/templatebot/values-roundtable-prod.yaml create mode 100644 applications/templatebot/values.yaml create mode 100644 docs/applications/templatebot/index.rst create mode 100644 docs/applications/templatebot/values.md create mode 100644 environments/templates/applications/roundtable/templatebot.yaml diff --git a/applications/templatebot/.helmignore b/applications/templatebot/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/applications/templatebot/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/applications/templatebot/Chart.yaml b/applications/templatebot/Chart.yaml new file mode 100644 index 0000000000..c8a3e6c9b1 --- /dev/null +++ b/applications/templatebot/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v2 +appVersion: "tickets-DM-43699" +description: Create new projects +name: templatebot +sources: + - https://github.com/lsst-sqre/templatebot +type: application +version: 1.0.0 diff --git a/applications/templatebot/README.md b/applications/templatebot/README.md new file mode 100644 index 0000000000..c743d3c467 --- /dev/null +++ b/applications/templatebot/README.md @@ -0,0 +1,30 @@ +# templatebot + +Create new projects + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the templatebot deployment pod | +| config.logLevel | string | `"INFO"` | Logging level | +| config.logProfile | string | `"production"` | Logging profile (`production` for JSON, `development` for human-friendly) | +| config.pathPrefix | string | `"/templatebot"` | URL path prefix | +| config.topics.slackAppMention | string | `"lsst.square-events.squarebot.slack.app.mention"` | Kafka topic name for the Slack `app_mention` events | +| config.topics.slackMessageIm | string | `"lsst.square-events.squarebot.slack.message.im"` | Kafka topic name for the Slack `message.im` events (direct message channels) | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the templatebot image | +| image.repository | string | `"ghcr.io/lsst-sqre/templatebot"` | Image to use in the templatebot deployment | +| image.tag | string | The appVersion of the chart | Tag of image to use | +| ingress.annotations | object | `{}` | Additional annotations for the ingress rule | +| nodeSelector | object | `{}` | Node selection rules for the templatebot deployment pod | +| podAnnotations | object | `{}` | Annotations for the templatebot deployment pod | +| replicaCount | int | `1` | Number of web deployment pods to start | +| resources | object | See `values.yaml` | Resource limits and requests for the templatebot deployment pod | +| tolerations | list | `[]` | Tolerations for the templatebot deployment pod | diff --git a/applications/templatebot/secrets.yaml b/applications/templatebot/secrets.yaml new file mode 100644 index 0000000000..7e672c9ecf --- /dev/null +++ b/applications/templatebot/secrets.yaml @@ -0,0 +1,26 @@ +TEMPLATEBOT_GITHUB_APP_ID: + description: >- + The ID of the GitHub App shared by all Squarebot services. + copy: + application: squarebot + key: SQUAREBOT_GITHUB_APP_ID +TEMPLATEBOT_GITHUB_APP_PRIVATE_KEY: + description: >- + The private key for the GitHub App shared by all Squarebot services. + onepassword: + encoded: true + copy: + application: squarebot + key: SQUAREBOT_GITHUB_APP_PRIVATE_KEY +TEMPLATEBOT_SLACK_APP_ID: + description: >- + The ID of the Slack App shared by all Squarebot services. + copy: + application: squarebot + key: SQUAREBOT_SLACK_APP_ID +TEMPLATEBOT_SLACK_TOKEN: + description: >- + The Slack bot user oauth token for the Slack App shared by all Squarebot services. + copy: + application: squarebot + key: SQUAREBOT_SLACK_TOKEN diff --git a/applications/templatebot/templates/_helpers.tpl b/applications/templatebot/templates/_helpers.tpl new file mode 100644 index 0000000000..22ab8421e4 --- /dev/null +++ b/applications/templatebot/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "templatebot.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "templatebot.labels" -}} +helm.sh/chart: {{ include "templatebot.chart" . }} +{{ include "templatebot.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "templatebot.selectorLabels" -}} +app.kubernetes.io/name: "templatebot" +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/templatebot/templates/configmap.yaml b/applications/templatebot/templates/configmap.yaml new file mode 100644 index 0000000000..81782fd7e0 --- /dev/null +++ b/applications/templatebot/templates/configmap.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "templatebot" + labels: + {{- include "templatebot.labels" . | nindent 4 }} +data: + TEMPLATEBOT_LOG_LEVEL: {{ .Values.config.logLevel | quote }} + TEMPLATEBOT_ENVIRONMENT_URL: {{ .Values.global.baseUrl | quote }} + TEMPLATEBOT_PATH_PREFIX: {{ .Values.config.pathPrefix | quote }} + TEMPLATEBOT_PROFILE: {{ .Values.config.logProfile | quote }} + TEMPLATEBOT_APP_MENTION_TOPIC: {{ .Values.config.topics.slackAppMention | quote }} + TEMPLATEBOT_MESSAGE_IM_TOPIC: {{ .Values.config.topics.slackMessageIm | quote }} diff --git a/applications/templatebot/templates/deployment.yaml b/applications/templatebot/templates/deployment.yaml new file mode 100644 index 0000000000..79888b1aff --- /dev/null +++ b/applications/templatebot/templates/deployment.yaml @@ -0,0 +1,111 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "templatebot" + labels: + {{- include "templatebot.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "templatebot.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "templatebot.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + automountServiceAccountToken: false + containers: + - name: {{ .Chart.Name }} + envFrom: + - configMapRef: + name: "templatebot" + env: + # Writeable directory for concatenating certs. See "tmp" volume. + - name: "KAFKA_CERT_TEMP_DIR" + value: "/tmp/kafka_certs" + - name: "KAFKA_SECURITY_PROTOCOL" + value: "SSL" + # From KafkaAccess + - name: "KAFKA_BOOTSTRAP_SERVERS" + valueFrom: + secretKeyRef: + name: templatebot-kafka + key: "bootstrapServers" + - name: "KAFKA_CLUSTER_CA_PATH" + value: "/etc/kafkacluster/ca.crt" + - name: "KAFKA_CLIENT_CERT_PATH" + value: "/etc/kafkauser/user.crt" + - name: "KAFKA_CLIENT_KEY_PATH" + value: "/etc/kafkauser/user.key" + # From Vault secrets + - name: "TEMPLATEBOT_SLACK_APP_ID" + valueFrom: + secretKeyRef: + name: "templatebot" + key: "TEMPLATEBOT_SLACK_APP_ID" + - name: "TEMPLATEBOT_SLACK_TOKEN" + valueFrom: + secretKeyRef: + name: "templatebot" + key: "TEMPLATEBOT_SLACK_TOKEN" + volumeMounts: + - name: "kafka" + mountPath: "/etc/kafkacluster/ca.crt" + subPath: "ssl.truststore.crt" # CA cert from the Kafka cluster + - name: "kafka" + mountPath: "/etc/kafkauser/user.crt" + subPath: "ssl.keystore.crt" # User cert from the Kafka cluster signed by the clients' CA + - name: "kafka" + mountPath: "/etc/kafkauser/user.key" + subPath: "ssl.keystore.key" # private key for the consuming client + - name: "tmp" + mountPath: "/tmp/kafka_certs" + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: "http" + containerPort: 8080 + protocol: "TCP" + readinessProbe: + httpGet: + path: "/" + port: "http" + resources: + {{- toYaml .Values.resources | nindent 12 }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: true + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + volumes: + - name: "kafka" + secret: + secretName: templatebot-kafka + - name: "templatebot" + secret: + secretName: "templatebot" + - name: "tmp" + emptyDir: {} diff --git a/applications/templatebot/templates/kafkaaccess.yaml b/applications/templatebot/templates/kafkaaccess.yaml new file mode 100644 index 0000000000..8ca9095ac8 --- /dev/null +++ b/applications/templatebot/templates/kafkaaccess.yaml @@ -0,0 +1,14 @@ +apiVersion: access.strimzi.io/v1alpha1 +kind: KafkaAccess +metadata: + name: templatebot-kafka +spec: + kafka: + name: sasquatch + namespace: sasquatch + listener: tls + user: + kind: KafkaUser + apiGroup: kafka.strimzi.io + name: templatebot + namespace: sasquatch diff --git a/applications/templatebot/templates/networkpolicy.yaml b/applications/templatebot/templates/networkpolicy.yaml new file mode 100644 index 0000000000..ca1c1e87a1 --- /dev/null +++ b/applications/templatebot/templates/networkpolicy.yaml @@ -0,0 +1,21 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "templatebot" +spec: + podSelector: + matchLabels: + {{- include "templatebot.selectorLabels" . | nindent 6 }} + policyTypes: + - "Ingress" + ingress: + # Allow inbound access from pods (in any namespace) labeled + # gafaelfawr.lsst.io/ingress: true. + - from: + - namespaceSelector: {} + podSelector: + matchLabels: + gafaelfawr.lsst.io/ingress: "true" + ports: + - protocol: "TCP" + port: 8080 diff --git a/applications/templatebot/templates/service.yaml b/applications/templatebot/templates/service.yaml new file mode 100644 index 0000000000..2ad67bccf8 --- /dev/null +++ b/applications/templatebot/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: "templatebot" + labels: + {{- include "templatebot.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - port: 8080 + targetPort: "http" + protocol: "TCP" + name: "http" + selector: + {{- include "templatebot.selectorLabels" . | nindent 4 }} diff --git a/applications/templatebot/templates/vaultsecret.yaml b/applications/templatebot/templates/vaultsecret.yaml new file mode 100644 index 0000000000..defc7709fe --- /dev/null +++ b/applications/templatebot/templates/vaultsecret.yaml @@ -0,0 +1,9 @@ +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: templatebot + labels: + {{- include "templatebot.labels" . | nindent 4 }} +spec: + path: "{{ .Values.global.vaultSecretsPath }}/templatebot" + type: Opaque diff --git a/applications/templatebot/values-roundtable-dev.yaml b/applications/templatebot/values-roundtable-dev.yaml new file mode 100644 index 0000000000..91a3f6a1c6 --- /dev/null +++ b/applications/templatebot/values-roundtable-dev.yaml @@ -0,0 +1,5 @@ +image: + pullPolicy: Always + +config: + logLevel: "DEBUG" diff --git a/applications/templatebot/values-roundtable-prod.yaml b/applications/templatebot/values-roundtable-prod.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/applications/templatebot/values.yaml b/applications/templatebot/values.yaml new file mode 100644 index 0000000000..cf65f9bab7 --- /dev/null +++ b/applications/templatebot/values.yaml @@ -0,0 +1,70 @@ +# Default values for templatebot. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Number of web deployment pods to start +replicaCount: 1 + +image: + # -- Image to use in the templatebot deployment + repository: "ghcr.io/lsst-sqre/templatebot" + + # -- Pull policy for the templatebot image + pullPolicy: "IfNotPresent" + + # -- Tag of image to use + # @default -- The appVersion of the chart + tag: null + +config: + # -- Logging level + logLevel: "INFO" + + # -- Logging profile (`production` for JSON, `development` for + # human-friendly) + logProfile: "production" + + # -- URL path prefix + pathPrefix: "/templatebot" + + topics: + # -- Kafka topic name for the Slack `app_mention` events + slackAppMention: "lsst.square-events.squarebot.slack.app.mention" + + # -- Kafka topic name for the Slack `message.im` events (direct message channels) + slackMessageIm: "lsst.square-events.squarebot.slack.message.im" + +ingress: + # -- Additional annotations for the ingress rule + annotations: {} + +# -- Affinity rules for the templatebot deployment pod +affinity: {} + +# -- Node selection rules for the templatebot deployment pod +nodeSelector: {} + +# -- Annotations for the templatebot deployment pod +podAnnotations: {} + +# -- Resource limits and requests for the templatebot deployment pod +# @default -- See `values.yaml` +resources: {} + +# -- Tolerations for the templatebot deployment pod +tolerations: [] + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: null + + # -- Host name for ingress + # @default -- Set by Argo CD + host: null + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: null diff --git a/docs/applications/roundtable.rst b/docs/applications/roundtable.rst index df4f559bbd..8d3ecce818 100644 --- a/docs/applications/roundtable.rst +++ b/docs/applications/roundtable.rst @@ -19,6 +19,7 @@ Argo CD project: ``roundtable`` ook/index sqrbot-sr/index squarebot/index + templatebot/index unfurlbot/index vault/index diff --git a/docs/applications/templatebot/index.rst b/docs/applications/templatebot/index.rst new file mode 100644 index 0000000000..9b2f2ce3a4 --- /dev/null +++ b/docs/applications/templatebot/index.rst @@ -0,0 +1,16 @@ +.. px-app:: templatebot + +################################# +templatebot — Create new projects +################################# + +.. jinja:: templatebot + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/templatebot/values.md b/docs/applications/templatebot/values.md new file mode 100644 index 0000000000..ad83245bf4 --- /dev/null +++ b/docs/applications/templatebot/values.md @@ -0,0 +1,12 @@ +```{px-app-values} templatebot +``` + +# templatebot Helm values reference + +Helm values reference table for the {px-app}`templatebot` application. + +```{include} ../../../applications/templatebot/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/environments/README.md b/environments/README.md index 100d733ea5..56c5604c22 100644 --- a/environments/README.md +++ b/environments/README.md @@ -67,6 +67,7 @@ | applications.tap | bool | `false` | Enable the tap application | | applications.telegraf | bool | `false` | Enable the telegraf application | | applications.telegraf-ds | bool | `false` | Enable the telegraf-ds application | +| applications.templatebot | bool | `false` | Enable the templatebot application | | applications.times-square | bool | `false` | Enable the times-square application | | applications.unfurlbot | bool | `false` | Enable the unfurlbot application | | applications.uws | bool | `false` | Enable the uws application. This includes the dmocps control system application. | diff --git a/environments/templates/applications/roundtable/templatebot.yaml b/environments/templates/applications/roundtable/templatebot.yaml new file mode 100644 index 0000000000..f0f34810ce --- /dev/null +++ b/environments/templates/applications/roundtable/templatebot.yaml @@ -0,0 +1,34 @@ +{{- if (index .Values "applications" "templatebot") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "templatebot" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "templatebot" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "templatebot" + server: "https://kubernetes.default.svc" + project: "roundtable" + source: + path: "applications/templatebot" + repoURL: {{ .Values.repoUrl | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} \ No newline at end of file diff --git a/environments/values-roundtable-dev.yaml b/environments/values-roundtable-dev.yaml index 1ff4738824..a11686b579 100644 --- a/environments/values-roundtable-dev.yaml +++ b/environments/values-roundtable-dev.yaml @@ -27,5 +27,6 @@ applications: strimzi-access-operator: true telegraf: true telegraf-ds: true + templatebot: true unfurlbot: true vault: true diff --git a/environments/values.yaml b/environments/values.yaml index e613ee06f5..fa0b156f5d 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -225,6 +225,9 @@ applications: # -- Enable the telegraf-ds application telegraf-ds: false + # -- Enable the templatebot application + templatebot: false + # -- Enable the times-square application times-square: false From df29eab47f50d0138a8282618dd2590ba4de0652 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 18 Sep 2024 16:10:55 -0700 Subject: [PATCH 090/193] Stop using fullname macro in docs The new starters no longer define a fullname macro since it's not needed for Phalanx Helm charts. Remove the last reference to it in the documentation. --- docs/developers/helm-chart/define-secrets.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/developers/helm-chart/define-secrets.rst b/docs/developers/helm-chart/define-secrets.rst index ac904f1f80..ad68a7f831 100644 --- a/docs/developers/helm-chart/define-secrets.rst +++ b/docs/developers/helm-chart/define-secrets.rst @@ -136,7 +136,7 @@ A typical ``VaultSecret`` Helm template for an application looks like this (repl apiVersion: ricoberger.de/v1alpha1 kind: VaultSecret metadata: - name: {{ include "myapp.fullname" . }} + name: "myapp" labels: {{- include "myapp.labels" . | nindent 4 }} spec: From d26aff31dc2fba33900e5859c1730a9434b823e8 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 18 Sep 2024 17:09:08 -0700 Subject: [PATCH 091/193] Add a COmanage link to environment pages If Gafaelfawr is configured with CILogon and there's an enrollment URL, assume that it points to COmanage and extract its hostname. Add a COmanage link to the environment page for this environment. --- docs/environments/_summary.rst.jinja | 2 +- src/phalanx/models/environments.py | 5 +++++ src/phalanx/storage/config.py | 6 ++++++ 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/docs/environments/_summary.rst.jinja b/docs/environments/_summary.rst.jinja index 72e40a18e8..d83a2d6051 100644 --- a/docs/environments/_summary.rst.jinja +++ b/docs/environments/_summary.rst.jinja @@ -5,7 +5,7 @@ * - Root domain - `{{ env.fqdn }} `__ * - Identity provider - - {{ env.gafaelfawr.provider.value }}{% if env.gafaelfawr.provider_hostname %} ({{ env.gafaelfawr.provider_hostname }}){% endif %} + - {{ env.gafaelfawr.provider.value }}{% if env.gafaelfawr.provider_hostname %} ({{ env.gafaelfawr.provider_hostname }}){% endif %}{% if env.gafaelfawr.comanage_hostname %} (COmanage: `{{ env.gafaelfawr.comanage_hostname }} `__){% endif %} {%- if env.argocd.url %} * - Argo CD - {{ env.argocd.url }} diff --git a/src/phalanx/models/environments.py b/src/phalanx/models/environments.py index ab1df52d87..bc15ff58c3 100644 --- a/src/phalanx/models/environments.py +++ b/src/phalanx/models/environments.py @@ -23,12 +23,14 @@ from .secrets import Secret __all__ = [ + "ArgoCDDetails", "ControlSystemConfig", "Environment", "EnvironmentBaseConfig", "EnvironmentConfig", "EnvironmentDetails", "GCPMetadata", + "GafaelfawrDetails", "GafaelfawrGitHubGroup", "GafaelfawrGitHubTeam", "GafaelfawrScope", @@ -467,6 +469,9 @@ class GafaelfawrDetails(BaseModel): provider_hostname: str | None = None """Hostname of upstream identity provider, if meaningful.""" + comanage_hostname: str | None = None + """Hostname of COmanage instance, if COmanage is in use.""" + scopes: list[GafaelfawrScope] = [] """Gafaelfawr scopes and their associated groups.""" diff --git a/src/phalanx/storage/config.py b/src/phalanx/storage/config.py index 99b4a62966..b66e01e49d 100644 --- a/src/phalanx/storage/config.py +++ b/src/phalanx/storage/config.py @@ -786,9 +786,14 @@ def _build_gafaelfawr_details( # Determine the upstream identity provider. provider_hostname = None + comanage_hostname = None if gafaelfawr: if gafaelfawr.values["config"]["cilogon"]["clientId"]: provider = IdentityProvider.CILOGON + cilogon_config = gafaelfawr.values["config"]["cilogon"] + if cilogon_config["enrollmentUrl"]: + url = cilogon_config["enrollmentUrl"] + comanage_hostname = urlparse(url).hostname elif gafaelfawr.values["config"]["github"]["clientId"]: provider = IdentityProvider.GITHUB elif gafaelfawr.values["config"]["oidc"]["clientId"]: @@ -828,6 +833,7 @@ def _build_gafaelfawr_details( return GafaelfawrDetails( provider=provider, provider_hostname=provider_hostname, + comanage_hostname=comanage_hostname, scopes=sorted(gafaelfawr_scopes, key=lambda s: s.scope), ) From bd394e746c95db9b2d8352b87db34f0f6e0422e8 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 18 Sep 2024 17:23:59 -0700 Subject: [PATCH 092/193] Document restoring user with same UID/GID Document that UID/GID assignment is only based on username, and a user can be deleted from COmanage and then recreated with the same username and they will retain the same UID and GID and thus the same file access. --- docs/admin/troubleshooting.rst | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docs/admin/troubleshooting.rst b/docs/admin/troubleshooting.rst index 5383f06074..cb50958d6d 100644 --- a/docs/admin/troubleshooting.rst +++ b/docs/admin/troubleshooting.rst @@ -81,3 +81,17 @@ Even when you want to be prompted. **Solution:** Have the user go to `https://cilogin.org/me `__ and choose "Delete ALL". This will clear their remembered selection. They can they retry whatever operation they were attempting. + +User deleted from COmanage and needs to be restored +=================================================== + +**Symptoms**: In a Phalanx environment that uses CILogon and COmanage, a user was deleted from COmanage, possibly because their identity record or authentication configuration was irrevocably broken. +The user needs to be reinstated with their previously existing files. + +**Solution**: The user should create their account again and choose the same username that they used previously. +This will assign them the same UID and GID that they had previously. +Currently, we don't delete files for deleted users, so all of their files should still be intact. + +UID and GID for users is tracked in Google Filestore and is assigned solely based on the user's username. +Any user in the environment with the same username will get the same UID and GID, and UIDs and GIDs are never reused. +Therefore, the same UID and GID can be retained by keeping the same username. From d88d93a2ae7d1587195197e29fdc3ebdd7b937be Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Thu, 19 Sep 2024 11:48:56 -0500 Subject: [PATCH 093/193] Update Kafka version to 3.8.0 --- applications/sasquatch/README.md | 2 +- applications/sasquatch/charts/strimzi-kafka/README.md | 2 +- applications/sasquatch/charts/strimzi-kafka/values.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 459c2f9459..daaf651fd7 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -365,7 +365,7 @@ Rubin Observatory's telemetry service | strimzi-kafka.kafka.storage.size | string | `"500Gi"` | Size of the backing storage disk for each of the Kafka brokers | | strimzi-kafka.kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes | | strimzi-kafka.kafka.tolerations | list | `[]` | Tolerations for Kafka broker pod assignment | -| strimzi-kafka.kafka.version | string | `"3.7.1"` | Version of Kafka to deploy | +| strimzi-kafka.kafka.version | string | `"3.8.0"` | Version of Kafka to deploy | | strimzi-kafka.kafkaController.enabled | bool | `false` | Enable Kafka Controller | | strimzi-kafka.kafkaController.resources | object | See `values.yaml` | Kubernetes requests and limits for the Kafka Controller | | strimzi-kafka.kafkaController.storage.size | string | `"20Gi"` | Size of the backing storage disk for each of the Kafka controllers | diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index 4e844c02a3..fd425d5279 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -41,7 +41,7 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | kafka.storage.size | string | `"500Gi"` | Size of the backing storage disk for each of the Kafka brokers | | kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes | | kafka.tolerations | list | `[]` | Tolerations for Kafka broker pod assignment | -| kafka.version | string | `"3.7.1"` | Version of Kafka to deploy | +| kafka.version | string | `"3.8.0"` | Version of Kafka to deploy | | kafkaController.enabled | bool | `false` | Enable Kafka Controller | | kafkaController.resources | object | See `values.yaml` | Kubernetes requests and limits for the Kafka Controller | | kafkaController.storage.size | string | `"20Gi"` | Size of the backing storage disk for each of the Kafka controllers | diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index f43fd60e4c..fa0deaa57b 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -11,7 +11,7 @@ cluster: kafka: # -- Version of Kafka to deploy - version: "3.7.1" + version: "3.8.0" # -- Number of Kafka broker replicas to run replicas: 3 From 172f4b62bf52f1ce0e01c43513e4a7684db5edd5 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 9 Sep 2024 12:06:56 -0700 Subject: [PATCH 094/193] Make sure all connector offsets are set o oldest by default --- applications/sasquatch/values-usdfprod.yaml | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index 4a1503f939..18ebd80615 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -146,7 +146,6 @@ telegraf-kafka-consumer: timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTAOS", "lsst.sal.MTDome", "lsst.sal.MTDomeTrajectory", "lsst.sal.MTPtg" ] - offset: "newest" mtmount: enabled: true database: "efd" @@ -154,21 +153,18 @@ telegraf-kafka-consumer: timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTMount" ] - offset: "newest" comcam: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.CCCamera", "lsst.sal.CCHeaderService", "lsst.sal.CCOODS" ] - offset: "newest" eas: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.DIMM", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.ESS", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] - offset: "newest" m1m3: enabled: true database: "efd" @@ -176,70 +172,60 @@ telegraf-kafka-consumer: timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTM1M3" ] - offset: "newest" m2: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTHexapod", "lsst.sal.MTM2", "lsst.sal.MTRotator" ] - offset: "newest" obssys: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.Scheduler", "lsst.sal.Script", "lsst.sal.ScriptQueue", "lsst.sal.Watcher" ] - offset: "newest" ocps: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.OCPS" ] - offset: "newest" pmd: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.PMD" ] - offset: "newest" calsys: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.ATMonochromator", "lsst.sal.ATWhiteLight", "lsst.sal.CBP", "lsst.sal.Electrometer", "lsst.sal.FiberSpectrograph", "lsst.sal.LinearStage", "lsst.sal.TunableLaser" ] - offset: "newest" mtaircompressor: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTAirCompressor" ] - offset: "newest" genericcamera: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.GCHeaderService", "lsst.sal.GenericCamera" ] - offset: "newest" gis: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.GIS" ] - offset: "newest" lsstcam: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTCamera", "lsst.sal.MTHeaderService", "lsst.sal.MTOODS" ] - offset: "newest" auxtel: enabled: true database: "efd" From b6fefe55644231ee9376fef978ed6b49d57e2599 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 9 Sep 2024 13:06:43 -0700 Subject: [PATCH 095/193] Split auxtel connector There's a race condition that crashes the connector one way to alleviate this problem is splitting the topics among multiple connectors. --- applications/sasquatch/values-usdfprod.yaml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index 18ebd80615..1c35d0f145 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -231,7 +231,15 @@ telegraf-kafka-consumer: database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | - [ "lsst.sal.ATAOS", "lsst.sal.ATDome", "lsst.sal.ATDomeTrajectory", "lsst.sal.ATHexapod", "lsst.sal.ATPneumatics", "lsst.sal.ATPtg", "lsst.sal.ATMCS" ] + [ "lsst.sal.ATAOS", "lsst.sal.ATDome", "lsst.sal.ATDomeTrajectory" ] + debug: true + auxtel2: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.ATHexapod", "lsst.sal.ATPneumatics", "lsst.sal.ATPtg", "lsst.sal.ATMCS" ] + debug: true latiss: enabled: true database: "efd" From 3a6e3ec13b224395c072016d31d0dcc71ded9f21 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Sat, 14 Sep 2024 13:41:02 -0700 Subject: [PATCH 096/193] Split eas connector There's a race condition that crashes the connector one way to alleviate this problem is splitting the topics among multiple connectors. --- applications/sasquatch/values-usdfprod.yaml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index 1c35d0f145..38204d5e30 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -164,7 +164,13 @@ telegraf-kafka-consumer: database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | - [ "lsst.sal.DIMM", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.ESS", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] + [ "lsst.sal.ESS" ] + eas2: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.DIMM", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] m1m3: enabled: true database: "efd" From b232082a6412705d8342eccec765c0f1a9d3d0b2 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Thu, 19 Sep 2024 11:45:44 -0500 Subject: [PATCH 097/193] Enable debug for all connectors --- applications/sasquatch/values-usdfprod.yaml | 22 +++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index 38204d5e30..8fbe3b0b4a 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -139,6 +139,7 @@ telegraf-kafka-consumer: timestamp_field: "timestamp" topicRegexps: | [ "lsst.backpack" ] + debug: true # CSC connectors maintel: enabled: true @@ -146,6 +147,7 @@ telegraf-kafka-consumer: timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTAOS", "lsst.sal.MTDome", "lsst.sal.MTDomeTrajectory", "lsst.sal.MTPtg" ] + debug: true mtmount: enabled: true database: "efd" @@ -153,24 +155,28 @@ telegraf-kafka-consumer: timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTMount" ] + debug: true comcam: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.CCCamera", "lsst.sal.CCHeaderService", "lsst.sal.CCOODS" ] + debug: true eas: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.ESS" ] + debug: true eas2: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.DIMM", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] + debug: true m1m3: enabled: true database: "efd" @@ -178,60 +184,70 @@ telegraf-kafka-consumer: timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTM1M3" ] + debug: true m2: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTHexapod", "lsst.sal.MTM2", "lsst.sal.MTRotator" ] + debug: true obssys: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.Scheduler", "lsst.sal.Script", "lsst.sal.ScriptQueue", "lsst.sal.Watcher" ] + debug: true ocps: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.OCPS" ] + debug: true pmd: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.PMD" ] + debug: true calsys: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.ATMonochromator", "lsst.sal.ATWhiteLight", "lsst.sal.CBP", "lsst.sal.Electrometer", "lsst.sal.FiberSpectrograph", "lsst.sal.LinearStage", "lsst.sal.TunableLaser" ] + debug: true mtaircompressor: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTAirCompressor" ] + debug: true genericcamera: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.GCHeaderService", "lsst.sal.GenericCamera" ] + debug: true gis: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.GIS" ] + debug: true lsstcam: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTCamera", "lsst.sal.MTHeaderService", "lsst.sal.MTOODS" ] + debug: true auxtel: enabled: true database: "efd" @@ -252,18 +268,21 @@ telegraf-kafka-consumer: timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.ATCamera", "lsst.sal.ATHeaderService", "lsst.sal.ATOODS", "lsst.sal.ATSpectrograph" ] + debug: true test: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.Test" ] + debug: true lasertracker: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.LaserTracker" ] + debug: true # CCS connectors (experimental) data is being written on separate databases for now atcamera: enabled: true @@ -274,6 +293,7 @@ telegraf-kafka-consumer: [ "Agent", "Aspic", "Location", "Raft", "Reb", "Sensor", "Source" ] topicRegexps: | [ "lsst.ATCamera" ] + debug: true cccamera: enabled: true database: "lsst.CCCamera" @@ -283,6 +303,7 @@ telegraf-kafka-consumer: [ "Agent", "Aspic", "Cold", "Cryo", "Hardware", "Location", "Ps", "RTD", "Raft", "Reb", "Segment", "Sensor", "Source" ] topicRegexps: | [ "lsst.CCCamera" ] + debug: true mtcamera: enabled: true database: "lsst.MTCamera" @@ -292,6 +313,7 @@ telegraf-kafka-consumer: [ "Agent", "Aspic", "Axis", "Canbus", "Cip", "Clamp", "Cold", "Controller", "Cryo", "Gateway", "Hardware", "Hip", "Hook", "Latch", "Location", "Ps", "RTD", "Raft", "Reb", "Segment", "Sensor", "Socket", "Source", "Truck" ] topicRegexps: | [ "lsst.MTCamera" ] + debug: true kafdrop: ingress: From 991b4ba16422e4c656c903312c536054ca8c3e4f Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Thu, 19 Sep 2024 12:38:47 -0500 Subject: [PATCH 098/193] Run mtmount and m1m3 with one replica --- applications/sasquatch/values-usdfprod.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index 8fbe3b0b4a..4dfe10f35f 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -151,7 +151,6 @@ telegraf-kafka-consumer: mtmount: enabled: true database: "efd" - replicaCount: 8 timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTMount" ] @@ -180,7 +179,6 @@ telegraf-kafka-consumer: m1m3: enabled: true database: "efd" - replicaCount: 8 timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTM1M3" ] From 509e3591dbf270b674e2be900423320ed866b8c0 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Thu, 19 Sep 2024 13:36:16 -0500 Subject: [PATCH 099/193] Increase readiness probe initial delay - InfluxDB Enterprise needs more time to read the shards from disk when restarting. The readiness probe was killing the data pods too early preventing it to restart. --- .../charts/influxdb-enterprise/templates/data-statefulset.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml index fa28e08cf4..1cc01f575a 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml @@ -90,7 +90,7 @@ spec: path: /ping port: http readinessProbe: - initialDelaySeconds: 30 + initialDelaySeconds: 60 httpGet: path: /ping port: http From 99d61ef6e49df379a9f19c6d9ff86c2a39c8b48b Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 19 Sep 2024 17:35:35 -0700 Subject: [PATCH 100/193] Fix naming of OpenID Connect client secrets The instructions for setting up a new OpenID Connect client secret didn't match the expected contents of that secret. Fix them to match. --- docs/applications/gafaelfawr/add-oidc-client.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/applications/gafaelfawr/add-oidc-client.rst b/docs/applications/gafaelfawr/add-oidc-client.rst index 013319b66c..e981115f8d 100644 --- a/docs/applications/gafaelfawr/add-oidc-client.rst +++ b/docs/applications/gafaelfawr/add-oidc-client.rst @@ -43,7 +43,7 @@ The goal is to eventually add automation to Phalanx to generate the latter from Change the contents to :samp:`{random-id}.clients.{fqdn}` where the random ID is the results of ``os.urandom(16).hex()`` in Python and the FQDN is the FQDN of the environment. For example, ``de5dd2c1fbf648e11d50b6cf3aa72277.clients.data.lsst.cloud``. -#. Add a password field to the new section, leaving the label as ``password``. +#. Add a password field to the new section, changing the label as ``secret``. You can let 1Password generate a random 20-character password if you want, or generate one of equivalent entropy however you choose. #. Add a final text field to the new section. @@ -53,13 +53,13 @@ The goal is to eventually add automation to Phalanx to generate the latter from #. Now, you will need to copy this data into the ``gafaelfawr`` secret under the ``oidc-server-secrets`` key, creating that key if it doesn't already exist. Unfortunately, you currently have to construct the JSON by hand. - The value of this key should be a JSON-encoded list of objects, and each object should have keys ``id``, ``password``, and ``return_uri`` with the information above. + The value of this key should be a JSON-encoded list of objects, and each object should have keys ``id``, ``secret``, and ``return_uri`` with the information above. Be sure to include all the clients, not just the new one that you're adding. Share the secret with the client ================================ -You now need to convey the ``client_id`` (the ``id`` value above) and the ``client_secret`` (the ``password`` value above) to the OpenID Connect client. +You now need to convey the ``client_id`` (the ``id`` value above) and the ``client_secret`` (the ``secret`` value above) to the OpenID Connect client. They will need to configure their client software to use that ``client_id`` and ``client_secret`` whenever performing an OpenID Connect authentication. The easiest way to do this is often to create a separate 1Password secret and share it with the client. From 8ed7d469f978f6994577643e60e3789a14a2ba97 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 20 Sep 2024 10:35:23 -0700 Subject: [PATCH 101/193] Run m1m3 and mtmount connectors with one replica - We optimized the connector configuration for throughput and we should run a single instance of Telegraf per connector. --- applications/sasquatch/values-base.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index f818146427..0f9e2e631c 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -147,7 +147,6 @@ telegraf-kafka-consumer: mtmount: enabled: true database: "efd" - replicaCount: 8 topicRegexps: | [ "lsst.sal.MTMount" ] eas: @@ -163,7 +162,6 @@ telegraf-kafka-consumer: m1m3: enabled: true database: "efd" - replicaCount: 8 topicRegexps: | [ "lsst.sal.MTM1M3" ] m2: From 207add1742b1cf5da929a8a861bad177253e62d4 Mon Sep 17 00:00:00 2001 From: "David H. Irving" Date: Wed, 11 Sep 2024 15:51:06 -0700 Subject: [PATCH 102/193] Make Butler client/server default on IDF int/dev dp02 To facilitate testing by CST and start getting stability testing from Mobu, switch the default dp02 alias on IDF dev and int to point to client/server Butler instead of DirectButler. --- applications/butler/README.md | 1 + applications/butler/templates/configmap.yaml | 18 ++++++++++++------ applications/butler/values-idfdev.yaml | 1 + applications/butler/values-idfint.yaml | 1 + applications/butler/values.yaml | 4 ++++ 5 files changed, 19 insertions(+), 6 deletions(-) diff --git a/applications/butler/README.md b/applications/butler/README.md index 73d7c17812..a3d2d49811 100644 --- a/applications/butler/README.md +++ b/applications/butler/README.md @@ -16,6 +16,7 @@ Server for Butler data abstraction service | autoscaling.minReplicas | int | `1` | Minimum number of butler deployment pods | | autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of butler deployment pods | | config.additionalS3ProfileName | string | No second S3 profile is available. | Profile name identifying a second S3 endpoint and set of credentials to use for accessing files in the datastore. | +| config.dp02ClientServerIsDefault | bool | `false` | True if the 'dp02' Butler repository alias should use client/server Butler. False if it should use DirectButler. | | config.dp02PostgresUri | string | No configuration file for DP02 will be generated. | Postgres connection string pointing to the registry database hosting Data Preview 0.2 data. | | config.pathPrefix | string | `"/api/butler"` | The prefix of the path portion of the URL where the Butler service will be exposed. For example, if the service should be exposed at `https://data.lsst.cloud/api/butler`, this should be set to `/api/butler` | | config.pguser | string | Use values specified in per-repository Butler config files. | Postgres username used to connect to the Butler DB | diff --git a/applications/butler/templates/configmap.yaml b/applications/butler/templates/configmap.yaml index 3a815fc6e6..8529fa2ba0 100644 --- a/applications/butler/templates/configmap.yaml +++ b/applications/butler/templates/configmap.yaml @@ -46,11 +46,17 @@ data: # connecting to the Butler server. # # We provide both DirectButler and RemoteButler versions of dp02 because some - # users rely on functionality not yet available via RemoteButler. The default is currently - # DirectButler because the Community Science team has not had the opportunity to test RemoteButler, - # and RemoteButler is not available in the current "recommended" RSP image. + # users rely on functionality not yet available via RemoteButler. The default in production is + # DirectButler because RemoteButler is not available in the current recommended RSP image. + # On dev and int it is RemoteButler -- the Community Science team is testing the new system. idf-repositories.yaml: | - dp02: {{ .Values.global.baseUrl }}{{ .Values.config.pathPrefix }}/configs/dp02.yaml - dp02-direct: {{ .Values.global.baseUrl }}{{ .Values.config.pathPrefix }}/configs/dp02.yaml - dp02-remote: {{ .Values.global.baseUrl }}{{ .Values.config.pathPrefix }}/repo/dp02/butler.yaml + {{- $dp02Direct := print .Values.global.baseUrl .Values.config.pathPrefix "/configs/dp02.yaml" -}} + {{- $dp02Remote := print .Values.global.baseUrl .Values.config.pathPrefix "/repo/dp02/butler.yaml" -}} + {{- if .Values.config.dp02ClientServerIsDefault }} + dp02: {{ $dp02Remote }} + {{- else }} + dp02: {{ $dp02Direct }} + {{- end }} + dp02-direct: {{ $dp02Direct }} + dp02-remote: {{ $dp02Remote }} {{- end }} diff --git a/applications/butler/values-idfdev.yaml b/applications/butler/values-idfdev.yaml index e70e31b433..92cc0e6897 100644 --- a/applications/butler/values-idfdev.yaml +++ b/applications/butler/values-idfdev.yaml @@ -2,6 +2,7 @@ image: pullPolicy: Always config: + dp02ClientServerIsDefault: true dp02PostgresUri: postgresql://postgres@sqlproxy-butler-int.sqlproxy-cross-project:5432/dp02 s3EndpointUrl: "https://storage.googleapis.com" additionalS3ProfileName: "ir2" diff --git a/applications/butler/values-idfint.yaml b/applications/butler/values-idfint.yaml index 5f16d776da..fc3fcb6a8f 100644 --- a/applications/butler/values-idfint.yaml +++ b/applications/butler/values-idfint.yaml @@ -1,4 +1,5 @@ config: + dp02ClientServerIsDefault: true dp02PostgresUri: postgresql://postgres@sqlproxy-butler-int.sqlproxy-cross-project:5432/dp02 s3EndpointUrl: "https://storage.googleapis.com" repositories: diff --git a/applications/butler/values.yaml b/applications/butler/values.yaml index 18086ea1c0..51ec757201 100644 --- a/applications/butler/values.yaml +++ b/applications/butler/values.yaml @@ -85,6 +85,10 @@ config: # @default -- No configuration file for DP02 will be generated. dp02PostgresUri: "" + # -- True if the 'dp02' Butler repository alias should use client/server + # Butler. False if it should use DirectButler. + dp02ClientServerIsDefault: false + # -- Postgres username used to connect to the Butler DB # @default -- Use values specified in per-repository Butler config files. pguser: "" From 2b8c4a9001b43ce261cbd44c9e9dafcbf90982c4 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 20 Sep 2024 10:36:28 -0700 Subject: [PATCH 103/193] Update Telegraf image - This version (not released yet) fixes a race condition bug we found in the Telegraf Avro parser. --- applications/sasquatch/values-base.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 0f9e2e631c..4cde0064d7 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -133,6 +133,9 @@ influxdb: telegraf-kafka-consumer: enabled: true + image: + repo: "docker.io/lsstsqre/telegraf" + tag: "avro-mutex" kafkaConsumers: auxtel: enabled: true From f0b4eb4451b2a223036dc2670134f591ac0ea82b Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 20 Sep 2024 10:36:49 -0700 Subject: [PATCH 104/193] Enable debug logs for all connectors --- applications/sasquatch/values-base.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 4cde0064d7..576b753ae3 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -142,71 +142,85 @@ telegraf-kafka-consumer: database: "efd" topicRegexps: | [ "lsst.sal.ATAOS", "lsst.sal.ATDome", "lsst.sal.ATDomeTrajectory", "lsst.sal.ATHexapod", "lsst.sal.ATPneumatics", "lsst.sal.ATPtg", "lsst.sal.ATMCS" ] + debug: true maintel: enabled: true database: "efd" topicRegexps: | [ "lsst.sal.MTAOS", "lsst.sal.MTDome", "lsst.sal.MTDomeTrajectory", "lsst.sal.MTPtg" ] + debug: true mtmount: enabled: true database: "efd" topicRegexps: | [ "lsst.sal.MTMount" ] + debug: true eas: enabled: true database: "efd" topicRegexps: | [ "lsst.sal.DIMM", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.ESS", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] + debug: true latiss: enabled: true database: "efd" topicRegexps: | [ "lsst.sal.ATCamera", "lsst.sal.ATHeaderService", "lsst.sal.ATOODS", "lsst.sal.ATSpectrograph" ] + debug: true m1m3: enabled: true database: "efd" topicRegexps: | [ "lsst.sal.MTM1M3" ] + debug: true m2: enabled: true database: "efd" topicRegexps: | [ "lsst.sal.MTHexapod", "lsst.sal.MTM2", "lsst.sal.MTRotator" ] + debug: true obssys: enabled: true database: "efd" topicRegexps: | [ "lsst.sal.Scheduler", "lsst.sal.Script", "lsst.sal.ScriptQueue", "lsst.sal.Watcher" ] + debug: true ocps: enabled: true database: "efd" topicRegexps: | [ "lsst.sal.OCPS" ] + debug: true test: enabled: true database: "efd" topicRegexps: | [ "lsst.sal.Test" ] + debug: true mtaircompressor: enabled: true database: "efd" topicRegexps: | [ "lsst.sal.MTAirCompressor" ] + debug: true lasertracker: enabled: true database: "efd" topicRegexps: | [ "lsst.sal.LaserTracker" ] + debug: true genericcamera: enabled: true database: "efd" topicRegexps: | [ "lsst.sal.GCHeaderService", "lsst.sal.GenericCamera" ] + debug: true lsstcam: enabled: true database: "efd" topicRegexps: | [ "lsst.sal.MTCamera", "lsst.sal.MTHeaderService", "lsst.sal.MTOODS" ] + debug: true kafdrop: cmdArgs: "--message.format=AVRO --message.keyFormat=DEFAULT --topic.deleteEnabled=false --topic.createEnabled=false" From 84c4b4c229803ed3fd5fa022f2ab0330bd8e538b Mon Sep 17 00:00:00 2001 From: Fritz Mueller Date: Sun, 22 Sep 2024 21:13:16 -0700 Subject: [PATCH 105/193] cm-service: move initial deployment from prod to dev vcluster --- .../cm-service/values-usdf-cm-dev.yaml | 31 +++++++++++++++++++ environments/values-usdf-cm-dev.yaml | 2 ++ environments/values-usdf-cm.yaml | 2 -- 3 files changed, 33 insertions(+), 2 deletions(-) create mode 100644 applications/cm-service/values-usdf-cm-dev.yaml diff --git a/applications/cm-service/values-usdf-cm-dev.yaml b/applications/cm-service/values-usdf-cm-dev.yaml new file mode 100644 index 0000000000..e7b42a3f33 --- /dev/null +++ b/applications/cm-service/values-usdf-cm-dev.yaml @@ -0,0 +1,31 @@ +config: + logLevel: "INFO" + logProfile: "development" + databaseEcho: true + outputVolume: + storageClassName: "sdf-data-rubin" + subPath: "shared/campaigns/users/usdf-cm-prod" +worker: + htcondor: + config: + mountPath: "/home/lsstsvc1/stack/conda/envs/lsst-scipipe-9.0.0/etc/condor/config.d" + contents: | + CONDOR_HOST = sdfiana012.sdf.slac.stanford.edu + COLLECTOR_HOST = sdfiana012.sdf.slac.stanford.edu + SEC_CLIENT_AUTHENTICATION_METHODS = FS, FS_REMOTE + use security:recommended_v9_0 + SEC_DEFAULT_AUTHENTICATION_METHODS = FS_REMOTE, IDTOKENS, FS + SEC_DAEMON_AUTHENTICATION_METHODS = FS_REMOTE, IDTOKENS, FS + SEC_READ_AUTHENTICATION_METHODS = FS_REMOTE, IDTOKENS, FS + FS_REMOTE_DIR = /sdf/group/rubin/services/htcondor/shared + SCHEDD_ADDRESS_FILE = /config/schedd-address + fsRemoteDir: + storageClassName: "sdf-group-rubin" + subPath: "services/htcondor/shared" + mountPath: "/sdf/group/rubin/services/htcondor/shared" + scheddAddress: + mountPath: "/config" + contents: | + <172.24.49.173:5935?addrs=172.24.49.173-5935&alias=sdfiana012.sdf.slac.stanford.edu> + $CondorVersion: 23.0.12 2024-06-13 BuildID: 739441 PackageID: 23.0.12-1 $ + $CondorPlatform: x86_64_AlmaLinux8 $ diff --git a/environments/values-usdf-cm-dev.yaml b/environments/values-usdf-cm-dev.yaml index 365566c1f2..7ca7a8afd7 100644 --- a/environments/values-usdf-cm-dev.yaml +++ b/environments/values-usdf-cm-dev.yaml @@ -10,3 +10,5 @@ applications: cert-manager: false gafaelfawr: false ingress-nginx: false + + cm-service: true diff --git a/environments/values-usdf-cm.yaml b/environments/values-usdf-cm.yaml index 3eac3a6f9d..2fb30966d6 100644 --- a/environments/values-usdf-cm.yaml +++ b/environments/values-usdf-cm.yaml @@ -10,5 +10,3 @@ applications: cert-manager: false gafaelfawr: false ingress-nginx: false - - cm-service: true From e0310aa9096188418b8477b616f461b9043f312b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 23 Sep 2024 07:18:35 +0000 Subject: [PATCH 106/193] Update Helm release argo-workflows to v0.42.3 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index fa244f7232..28c6a47cac 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.42.2 + version: 0.42.3 repository: https://argoproj.github.io/argo-helm From b81297c4dbea4fd3cf8fe92c58f7a6e4d1a301c9 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 23 Sep 2024 07:18:40 +0000 Subject: [PATCH 107/193] Update Helm release telegraf to v1.8.54 --- applications/telegraf/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/telegraf/Chart.yaml b/applications/telegraf/Chart.yaml index 407d6c1e7b..33c097cea8 100644 --- a/applications/telegraf/Chart.yaml +++ b/applications/telegraf/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf - version: 1.8.53 + version: 1.8.54 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | From a492c69081e1abb594c251013c4f6220e2610efe Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 23 Sep 2024 10:16:05 +0000 Subject: [PATCH 108/193] Update Helm release telegraf-ds to v1.1.34 --- applications/telegraf-ds/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/telegraf-ds/Chart.yaml b/applications/telegraf-ds/Chart.yaml index b6e5adade2..8cb53aec89 100644 --- a/applications/telegraf-ds/Chart.yaml +++ b/applications/telegraf-ds/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf-ds - version: 1.1.33 + version: 1.1.34 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | From f56eda3bf6eefb66e3fb12a69e954f9d9f65fc4f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 23 Sep 2024 10:16:09 +0000 Subject: [PATCH 109/193] Update Helm release argo-cd to v7.6.1 --- applications/argocd/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index 81ca4bd156..56c9e07f2f 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 7.5.2 + version: 7.6.1 repository: https://argoproj.github.io/argo-helm From 6ae010f8857684a2053c02be832a5ef29ad0f280 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 23 Sep 2024 15:43:00 +0000 Subject: [PATCH 110/193] Update Helm release connect to v1.16.0 --- applications/onepassword-connect/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/onepassword-connect/Chart.yaml b/applications/onepassword-connect/Chart.yaml index ea91cd2183..7cb6ff21d9 100644 --- a/applications/onepassword-connect/Chart.yaml +++ b/applications/onepassword-connect/Chart.yaml @@ -6,7 +6,7 @@ version: 1.0.0 dependencies: - name: connect - version: 1.15.1 + version: 1.16.0 repository: https://1password.github.io/connect-helm-charts/ annotations: From ed68f98695a2c9a1968ad6fd8f88d23aa6b9cf8b Mon Sep 17 00:00:00 2001 From: Adam Thornton Date: Tue, 3 Sep 2024 14:25:22 -0700 Subject: [PATCH 111/193] Add ghostwriter --- applications/ghostwriter/.helmignore | 23 ++++++ applications/ghostwriter/Chart.yaml | 8 ++ applications/ghostwriter/secret.yaml | 8 ++ .../ghostwriter/templates/_helpers.tpl | 26 ++++++ .../ghostwriter/templates/configmap.yaml | 9 +++ .../ghostwriter/templates/deployment.yaml | 81 +++++++++++++++++++ .../ghostwriter/templates/ingress.yaml | 30 +++++++ .../ghostwriter/templates/networkpolicy.yaml | 21 +++++ .../ghostwriter/templates/service.yaml | 15 ++++ .../ghostwriter/templates/vault-secrets.yaml | 11 +++ applications/ghostwriter/values-idfdev.yaml | 10 +++ applications/ghostwriter/values.yaml | 73 +++++++++++++++++ docs/applications/ghostwriter/index.rst | 16 ++++ docs/applications/ghostwriter/values.md | 12 +++ docs/applications/infrastructure.rst | 1 + environments/README.md | 1 + .../infrastructure/ghostwriter.yaml | 34 ++++++++ environments/values-idfdev.yaml | 1 + environments/values.yaml | 3 + 19 files changed, 383 insertions(+) create mode 100644 applications/ghostwriter/.helmignore create mode 100644 applications/ghostwriter/Chart.yaml create mode 100644 applications/ghostwriter/secret.yaml create mode 100644 applications/ghostwriter/templates/_helpers.tpl create mode 100644 applications/ghostwriter/templates/configmap.yaml create mode 100644 applications/ghostwriter/templates/deployment.yaml create mode 100644 applications/ghostwriter/templates/ingress.yaml create mode 100644 applications/ghostwriter/templates/networkpolicy.yaml create mode 100644 applications/ghostwriter/templates/service.yaml create mode 100644 applications/ghostwriter/templates/vault-secrets.yaml create mode 100644 applications/ghostwriter/values-idfdev.yaml create mode 100644 applications/ghostwriter/values.yaml create mode 100644 docs/applications/ghostwriter/index.rst create mode 100644 docs/applications/ghostwriter/values.md create mode 100644 environments/templates/applications/infrastructure/ghostwriter.yaml diff --git a/applications/ghostwriter/.helmignore b/applications/ghostwriter/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/applications/ghostwriter/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/applications/ghostwriter/Chart.yaml b/applications/ghostwriter/Chart.yaml new file mode 100644 index 0000000000..0ea46c3e3d --- /dev/null +++ b/applications/ghostwriter/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v2 +appVersion: 0.1.0 +description: URL rewriter/personalizer +name: ghostwriter +sources: +- https://github.com/lsst-sqre/ghostwriter +type: application +version: 1.0.0 diff --git a/applications/ghostwriter/secret.yaml b/applications/ghostwriter/secret.yaml new file mode 100644 index 0000000000..e0f4154904 --- /dev/null +++ b/applications/ghostwriter/secret.yaml @@ -0,0 +1,8 @@ +slack-webhook: + description: >- + Slack web hook used to report internal errors to Slack. This secret may be + changed at any time. + if: config.slackAlerts + copy: + application: mobu + key: app-alert-webhook diff --git a/applications/ghostwriter/templates/_helpers.tpl b/applications/ghostwriter/templates/_helpers.tpl new file mode 100644 index 0000000000..51a900690a --- /dev/null +++ b/applications/ghostwriter/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ghostwriter.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "ghostwriter.labels" -}} +helm.sh/chart: {{ include "ghostwriter.chart" . }} +{{ include "ghostwriter.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "ghostwriter.selectorLabels" -}} +app.kubernetes.io/name: "ghostwriter" +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/ghostwriter/templates/configmap.yaml b/applications/ghostwriter/templates/configmap.yaml new file mode 100644 index 0000000000..151a90e1e5 --- /dev/null +++ b/applications/ghostwriter/templates/configmap.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "ghostwriter-config" + labels: + {{- include "ghostwriter.labels" . | nindent 4 }} +data: + routing.yaml: |- + {{- toYaml .Values.mapping | nindent 4 }} diff --git a/applications/ghostwriter/templates/deployment.yaml b/applications/ghostwriter/templates/deployment.yaml new file mode 100644 index 0000000000..8e3ddf9fa5 --- /dev/null +++ b/applications/ghostwriter/templates/deployment.yaml @@ -0,0 +1,81 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "ghostwriter" + labels: + {{- include "ghostwriter.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "ghostwriter.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "ghostwriter.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + automountServiceAccountToken: false + containers: + - name: {{ .Chart.Name }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + env: + - name: "GHOSTWRITER_ENVIRONMENT_URL" + value: {{ .Values.global.baseUrl | quote }} + {{- if .Values.config.slackAlerts }} + - name: "GHOSTWRITER_ALERT_HOOK" + valueFrom: + secretKeyRef: + name: "ghostwriter-secret" + key: "slack-webhook" + {{- end }} + {{- if .Values.config.debug }} + - name: GHOSTWRITER_LOG_LEVEL + value: "DEBUG" + - name: GHOSTWRITER_LOGGING_PROFILE + value: "development" + {{- end }} + ports: + - name: "http" + containerPort: 8080 + protocol: "TCP" + readinessProbe: + httpGet: + path: "/" + port: "http" + resources: + {{- toYaml .Values.resources | nindent 12 }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: true + volumeMounts: + - name: "config" + mountPath: "/etc/ghostwriter" + readOnly: true + volumes: + - name: "config" + configMap: + name: "ghostwriter-config" + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 diff --git a/applications/ghostwriter/templates/ingress.yaml b/applications/ghostwriter/templates/ingress.yaml new file mode 100644 index 0000000000..0d18079b28 --- /dev/null +++ b/applications/ghostwriter/templates/ingress.yaml @@ -0,0 +1,30 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "ghostwriter" + labels: + {{- include "ghostwriter.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + all: + - "read:image" +template: + metadata: + name: "ghostwriter" + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: "/ghostwriter" + pathType: "Prefix" + backend: + service: + name: "ghostwriter" + port: + number: 8080 diff --git a/applications/ghostwriter/templates/networkpolicy.yaml b/applications/ghostwriter/templates/networkpolicy.yaml new file mode 100644 index 0000000000..b4a5ecb1e5 --- /dev/null +++ b/applications/ghostwriter/templates/networkpolicy.yaml @@ -0,0 +1,21 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "ghostwriter" +spec: + podSelector: + matchLabels: + {{- include "ghostwriter.selectorLabels" . | nindent 6 }} + policyTypes: + - "Ingress" + ingress: + # Allow inbound access from pods (in any namespace) labeled + # gafaelfawr.lsst.io/ingress: true. + - from: + - namespaceSelector: {} + podSelector: + matchLabels: + gafaelfawr.lsst.io/ingress: "true" + ports: + - protocol: "TCP" + port: 8080 diff --git a/applications/ghostwriter/templates/service.yaml b/applications/ghostwriter/templates/service.yaml new file mode 100644 index 0000000000..ced6204a96 --- /dev/null +++ b/applications/ghostwriter/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: "ghostwriter" + labels: + {{- include "ghostwriter.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - port: 8080 + targetPort: "http" + protocol: "TCP" + name: "http" + selector: + {{- include "ghostwriter.selectorLabels" . | nindent 4 }} diff --git a/applications/ghostwriter/templates/vault-secrets.yaml b/applications/ghostwriter/templates/vault-secrets.yaml new file mode 100644 index 0000000000..785ca96b8a --- /dev/null +++ b/applications/ghostwriter/templates/vault-secrets.yaml @@ -0,0 +1,11 @@ +{{- if .Values.config.slackAlerts }} +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: ghostwriter-secret + labels: + {{- include "ghostwriter.labels" . | nindent 4 }} +spec: + path: "{{ .Values.global.vaultSecretsPath }}/ghostwriter" + type: "Opaque" +{{- end }} diff --git a/applications/ghostwriter/values-idfdev.yaml b/applications/ghostwriter/values-idfdev.yaml new file mode 100644 index 0000000000..b220357eef --- /dev/null +++ b/applications/ghostwriter/values-idfdev.yaml @@ -0,0 +1,10 @@ +image: + # pullPolicy: "Always" + tag: "tickets-dm-46010" +config: + slackAlerts: true + debug: true +mapping: + routes: + - source_prefix: "/tutorials/" + target: "${base_url}/nb/user/${user}/lab/tree/${path}.ipynb" diff --git a/applications/ghostwriter/values.yaml b/applications/ghostwriter/values.yaml new file mode 100644 index 0000000000..75247c6676 --- /dev/null +++ b/applications/ghostwriter/values.yaml @@ -0,0 +1,73 @@ +# Default values for ghostwriter. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- ghostwriter configuration +config: + # -- Whether to send alerts and status to Slack. + slackAlerts: false + + # -- If set to true, enable verbose logging and disable structured JSON + # logging + debug: false + +# -- ghostwriter URL mapping +mapping: + # routes for URL rewriting + # @default -- None; must be set for each environment + routes: [] + +# -- Number of web deployment pods to start +replicaCount: 1 + +image: + # -- Image to use in the ghostwriter deployment + repository: "ghcr.io/lsst-sqre/ghostwriter" + + # -- Pull policy for the ghostwriter image + pullPolicy: "IfNotPresent" + + # -- Tag of image to use + # @default -- The appVersion of the chart + tag: null + +ingress: + # -- Additional annotations for the ingress rule + annotations: {} + +# -- Affinity rules for the ghostwriter deployment pod +affinity: {} + +# -- Node selection rules for the ghostwriter deployment pod +nodeSelector: {} + +# -- Annotations for the ghostwriter deployment pod +podAnnotations: {} + +# -- Resource limits and requests for the ghostwriter deployment pod +# @default -- See `values.yaml` +resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "10m" + memory: "128Mi" + +# -- Tolerations for the ghostwriter deployment pod +tolerations: [] + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: null + + # -- Host name for ingress + # @default -- Set by Argo CD + host: null + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: null diff --git a/docs/applications/ghostwriter/index.rst b/docs/applications/ghostwriter/index.rst new file mode 100644 index 0000000000..fc1adf2cc9 --- /dev/null +++ b/docs/applications/ghostwriter/index.rst @@ -0,0 +1,16 @@ +.. px-app:: ghostwriter + +####################################### +ghostwriter — URL rewriter/personalizer +####################################### + +.. jinja:: ghostwriter + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/ghostwriter/values.md b/docs/applications/ghostwriter/values.md new file mode 100644 index 0000000000..0f3a1e3f75 --- /dev/null +++ b/docs/applications/ghostwriter/values.md @@ -0,0 +1,12 @@ +```{px-app-values} ghostwriter +``` + +# ghostwriter Helm values reference + +Helm values reference table for the {px-app}`ghostwriter` application. + +```{include} ../../../applications/ghostwriter/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/infrastructure.rst b/docs/applications/infrastructure.rst index 158629e5e5..bb614908d1 100644 --- a/docs/applications/infrastructure.rst +++ b/docs/applications/infrastructure.rst @@ -12,6 +12,7 @@ Argo CD project: ``infrastructure`` argocd/index cert-manager/index + ghostwriter/index ingress-nginx/index gafaelfawr/index mobu/index diff --git a/environments/README.md b/environments/README.md index 56c5604c22..9ec20b4fda 100644 --- a/environments/README.md +++ b/environments/README.md @@ -21,6 +21,7 @@ | applications.fastapi-bootcamp | bool | `false` | Enable the fastapi-bootcamp application | | applications.filestore-backup | bool | `false` | Enable the filestore-backup application | | applications.gafaelfawr | bool | `true` | Enable the Gafaelfawr application. This is required by Phalanx since most other applications use `GafaelfawrIngress` | +| applications.ghostwriter | bool | `false` | Enable the ghostwriter application | | applications.giftless | bool | `false` | Enable the giftless application | | applications.hips | bool | `false` | Enable the HiPS application | | applications.ingress-nginx | bool | `true` | Enable the ingress-nginx application. This is required for all environments, but is still configurable because currently USDF uses an unsupported configuration with ingress-nginx deployed in a different cluster. | diff --git a/environments/templates/applications/infrastructure/ghostwriter.yaml b/environments/templates/applications/infrastructure/ghostwriter.yaml new file mode 100644 index 0000000000..5d993e0b88 --- /dev/null +++ b/environments/templates/applications/infrastructure/ghostwriter.yaml @@ -0,0 +1,34 @@ +{{- if (index .Values "applications" "ghostwriter") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "ghostwriter" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "ghostwriter" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "ghostwriter" + server: "https://kubernetes.default.svc" + project: "infrastructure" + source: + path: "applications/ghostwriter" + repoURL: {{ .Values.repoUrl | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} \ No newline at end of file diff --git a/environments/values-idfdev.yaml b/environments/values-idfdev.yaml index b0a52056de..39c028701b 100644 --- a/environments/values-idfdev.yaml +++ b/environments/values-idfdev.yaml @@ -16,6 +16,7 @@ applications: butler: true datalinker: true filestore-backup: true + ghostwriter: true hips: true jira-data-proxy: true mobu: true diff --git a/environments/values.yaml b/environments/values.yaml index fa0b156f5d..b7774a1d1e 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -82,6 +82,9 @@ applications: # most other applications use `GafaelfawrIngress` gafaelfawr: true + # -- Enable the ghostwriter application + ghostwriter: false + # -- Enable the giftless application giftless: false From d031befc8372556b2dad92e18a4ebe6bde5c0707 Mon Sep 17 00:00:00 2001 From: Adam Thornton Date: Tue, 3 Sep 2024 14:36:15 -0700 Subject: [PATCH 112/193] no slack alerts ghostwriter/idfdev --- applications/ghostwriter/values-idfdev.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/ghostwriter/values-idfdev.yaml b/applications/ghostwriter/values-idfdev.yaml index b220357eef..34ad1d9ae8 100644 --- a/applications/ghostwriter/values-idfdev.yaml +++ b/applications/ghostwriter/values-idfdev.yaml @@ -1,8 +1,8 @@ image: - # pullPolicy: "Always" + pullPolicy: "Always" tag: "tickets-dm-46010" config: - slackAlerts: true + # slackAlerts: true debug: true mapping: routes: From d43325d3a4e3a80e97d248506a14b6214b960827 Mon Sep 17 00:00:00 2001 From: Adam Thornton Date: Tue, 3 Sep 2024 14:43:55 -0700 Subject: [PATCH 113/193] Add delegated scopes --- applications/ghostwriter/templates/ingress.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/applications/ghostwriter/templates/ingress.yaml b/applications/ghostwriter/templates/ingress.yaml index 0d18079b28..00b1749a52 100644 --- a/applications/ghostwriter/templates/ingress.yaml +++ b/applications/ghostwriter/templates/ingress.yaml @@ -9,6 +9,16 @@ config: scopes: all: - "read:image" + delegate: + internal: + service: "ghostwriter" + scopes: + - "read:image" + - "exec:notebook" + - "exec:portal" + - "read:image" + - "read:tap" + - "write:files" template: metadata: name: "ghostwriter" From e1fbc45760d4600669878872da8a7b9637a02fea Mon Sep 17 00:00:00 2001 From: Adam Thornton Date: Tue, 3 Sep 2024 14:46:31 -0700 Subject: [PATCH 114/193] Add dummy config.yaml --- applications/ghostwriter/templates/configmap.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/applications/ghostwriter/templates/configmap.yaml b/applications/ghostwriter/templates/configmap.yaml index 151a90e1e5..ba63bffdea 100644 --- a/applications/ghostwriter/templates/configmap.yaml +++ b/applications/ghostwriter/templates/configmap.yaml @@ -7,3 +7,5 @@ metadata: data: routing.yaml: |- {{- toYaml .Values.mapping | nindent 4 }} + config.yaml: |- + # Empty: values will be taken from environment From c0107daaf130d8b296053a7fa8fdc35fd9ab2ece Mon Sep 17 00:00:00 2001 From: Adam Thornton Date: Tue, 3 Sep 2024 15:13:18 -0700 Subject: [PATCH 115/193] try 'notebook' style delegation --- applications/ghostwriter/templates/ingress.yaml | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/applications/ghostwriter/templates/ingress.yaml b/applications/ghostwriter/templates/ingress.yaml index 00b1749a52..1570a890a7 100644 --- a/applications/ghostwriter/templates/ingress.yaml +++ b/applications/ghostwriter/templates/ingress.yaml @@ -9,16 +9,8 @@ config: scopes: all: - "read:image" - delegate: - internal: - service: "ghostwriter" - scopes: - - "read:image" - - "exec:notebook" - - "exec:portal" - - "read:image" - - "read:tap" - - "write:files" + delegate: + notebook: {} template: metadata: name: "ghostwriter" From dfdb68a262fc170e4ea31840a2b66e53ec4b298d Mon Sep 17 00:00:00 2001 From: Adam Thornton Date: Tue, 3 Sep 2024 16:05:57 -0700 Subject: [PATCH 116/193] Add path for tutorial nb rewrite --- applications/ghostwriter/values-idfdev.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/ghostwriter/values-idfdev.yaml b/applications/ghostwriter/values-idfdev.yaml index 34ad1d9ae8..2ec32258e6 100644 --- a/applications/ghostwriter/values-idfdev.yaml +++ b/applications/ghostwriter/values-idfdev.yaml @@ -7,4 +7,4 @@ config: mapping: routes: - source_prefix: "/tutorials/" - target: "${base_url}/nb/user/${user}/lab/tree/${path}.ipynb" + target: "${base_url}/nb/user/${user}/lab/tree/notebooks/tutorial-notebooks/${path}.ipynb" From d51328aae6af5c7e78060c9dbb404c37415ceb95 Mon Sep 17 00:00:00 2001 From: Adam Thornton Date: Tue, 3 Sep 2024 16:45:17 -0700 Subject: [PATCH 117/193] Add top-level ingresses to ghostwriter --- applications/ghostwriter/README.md | 29 +++++++++++++++ .../ghostwriter/templates/deployment.yaml | 3 +- .../templates/ingress-toplevel.yaml | 36 +++++++++++++++++++ 3 files changed, 67 insertions(+), 1 deletion(-) create mode 100644 applications/ghostwriter/README.md create mode 100644 applications/ghostwriter/templates/ingress-toplevel.yaml diff --git a/applications/ghostwriter/README.md b/applications/ghostwriter/README.md new file mode 100644 index 0000000000..ef7ad713f4 --- /dev/null +++ b/applications/ghostwriter/README.md @@ -0,0 +1,29 @@ +# ghostwriter + +URL rewriter/personalizer + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the ghostwriter deployment pod | +| config | object | `{"debug":false,"slackAlerts":false}` | ghostwriter configuration | +| config.debug | bool | `false` | If set to true, enable verbose logging and disable structured JSON logging | +| config.slackAlerts | bool | `false` | Whether to send alerts and status to Slack. | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the ghostwriter image | +| image.repository | string | `"ghcr.io/lsst-sqre/ghostwriter"` | Image to use in the ghostwriter deployment | +| image.tag | string | The appVersion of the chart | Tag of image to use | +| ingress.annotations | object | `{}` | Additional annotations for the ingress rule | +| mapping | object | `{"routes":[]}` | ghostwriter URL mapping | +| nodeSelector | object | `{}` | Node selection rules for the ghostwriter deployment pod | +| podAnnotations | object | `{}` | Annotations for the ghostwriter deployment pod | +| replicaCount | int | `1` | Number of web deployment pods to start | +| resources | object | See `values.yaml` | Resource limits and requests for the ghostwriter deployment pod | +| tolerations | list | `[]` | Tolerations for the ghostwriter deployment pod | diff --git a/applications/ghostwriter/templates/deployment.yaml b/applications/ghostwriter/templates/deployment.yaml index 8e3ddf9fa5..8c945dc7b4 100644 --- a/applications/ghostwriter/templates/deployment.yaml +++ b/applications/ghostwriter/templates/deployment.yaml @@ -11,8 +11,9 @@ spec: {{- include "ghostwriter.selectorLabels" . | nindent 6 }} template: metadata: - {{- with .Values.podAnnotations }} annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} {{- toYaml . | nindent 8 }} {{- end }} labels: diff --git a/applications/ghostwriter/templates/ingress-toplevel.yaml b/applications/ghostwriter/templates/ingress-toplevel.yaml new file mode 100644 index 0000000000..b659bc7cec --- /dev/null +++ b/applications/ghostwriter/templates/ingress-toplevel.yaml @@ -0,0 +1,36 @@ +{{- $root := . -}} +{{- range $route := $root.Values.mapping.routes }} +{{- $source := $route.source_prefix | trimAll "/" }} +{{- $res_src := trimPrefix "/" $source | replace "/" "-" }} +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "ghostwriter-{{ $res_src }}" +config: + baseUrl: {{ $root.Values.global.baseUrl | quote }} + scopes: + all: + - "read:image" + delegate: + notebook: {} +template: + metadata: + name: "ghostwriter-{{ $res_src }}" + annotations: + nginx.ingress.kubernetes.io/rewrite-target: "/ghostwriter/rewrite/$1" + {{- with $root.Values.ingress.annotations }} + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" $root.Values.global.host | quote }} + http: + paths: + - path: "/({{ $source }}/.*)" + pathType: "ImplementationSpecific" + backend: + service: + name: "ghostwriter" + port: + number: 8080 +{{- end }} From 78464434850a20fcf6813bacf19ea064274bd615 Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 4 Sep 2024 12:54:47 -0700 Subject: [PATCH 118/193] Try adding a hook for ghostwriter --- applications/ghostwriter/values-idfdev.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/applications/ghostwriter/values-idfdev.yaml b/applications/ghostwriter/values-idfdev.yaml index 2ec32258e6..235801a95c 100644 --- a/applications/ghostwriter/values-idfdev.yaml +++ b/applications/ghostwriter/values-idfdev.yaml @@ -8,3 +8,5 @@ mapping: routes: - source_prefix: "/tutorials/" target: "${base_url}/nb/user/${user}/lab/tree/notebooks/tutorial-notebooks/${path}.ipynb" + hooks: + - "ensure_running_lab" From 4ecd1f226007f3023da57e4685aa9516616609ee Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 4 Sep 2024 15:55:51 -0700 Subject: [PATCH 119/193] Add query target for ghostwriter --- applications/ghostwriter/values-idfdev.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/applications/ghostwriter/values-idfdev.yaml b/applications/ghostwriter/values-idfdev.yaml index 235801a95c..f97559f62e 100644 --- a/applications/ghostwriter/values-idfdev.yaml +++ b/applications/ghostwriter/values-idfdev.yaml @@ -10,3 +10,8 @@ mapping: target: "${base_url}/nb/user/${user}/lab/tree/notebooks/tutorial-notebooks/${path}.ipynb" hooks: - "ensure_running_lab" + - source_prefix: "/queries/" + target: "${base_url}/nb/user/${user}/lab/tree/queries/portal_${path}.ipynb" + hooks: + - "ensure_running_lab" + - "portal_query" From f3b772a51218f9b635e67378e3d227f1eca74497 Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 4 Sep 2024 16:27:23 -0700 Subject: [PATCH 120/193] separate multiple ingresses --- applications/ghostwriter/templates/ingress-toplevel.yaml | 1 + applications/ghostwriter/values-idfdev.yaml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/applications/ghostwriter/templates/ingress-toplevel.yaml b/applications/ghostwriter/templates/ingress-toplevel.yaml index b659bc7cec..fb8ebbf3b5 100644 --- a/applications/ghostwriter/templates/ingress-toplevel.yaml +++ b/applications/ghostwriter/templates/ingress-toplevel.yaml @@ -33,4 +33,5 @@ template: name: "ghostwriter" port: number: 8080 +--- {{- end }} diff --git a/applications/ghostwriter/values-idfdev.yaml b/applications/ghostwriter/values-idfdev.yaml index f97559f62e..3386860209 100644 --- a/applications/ghostwriter/values-idfdev.yaml +++ b/applications/ghostwriter/values-idfdev.yaml @@ -11,7 +11,7 @@ mapping: hooks: - "ensure_running_lab" - source_prefix: "/queries/" - target: "${base_url}/nb/user/${user}/lab/tree/queries/portal_${path}.ipynb" + target: "${base_url}/nb/user/${user}/lab/tree/notebooks/queries/portal_${path}.ipynb" hooks: - "ensure_running_lab" - "portal_query" From 388f8bb8d8ca0eaa5c266afd5850161441c9b043 Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 4 Sep 2024 16:35:04 -0700 Subject: [PATCH 121/193] Add exec:notebook to delegated portal scope, so they can hit '/queries/query-id' --- applications/portal/templates/ingress.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/portal/templates/ingress.yaml b/applications/portal/templates/ingress.yaml index 0d7d6fc957..4edef4ab7b 100644 --- a/applications/portal/templates/ingress.yaml +++ b/applications/portal/templates/ingress.yaml @@ -18,6 +18,7 @@ config: - "read:image" - "read:tap" - "write:files" + - "exec:notebook" template: metadata: name: {{ include "portal.fullname" . }} From 7dd7939bf742b50e171609ed23a2bd9d2e8acad1 Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 9 Sep 2024 12:02:03 -0700 Subject: [PATCH 122/193] Add tutorial-on-demand to ghostwriter --- applications/ghostwriter/values-idfdev.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/applications/ghostwriter/values-idfdev.yaml b/applications/ghostwriter/values-idfdev.yaml index 3386860209..4bd576564d 100644 --- a/applications/ghostwriter/values-idfdev.yaml +++ b/applications/ghostwriter/values-idfdev.yaml @@ -15,3 +15,8 @@ mapping: hooks: - "ensure_running_lab" - "portal_query" + - source_prefix: "/tutorials-on-demand/" + target: "${base_url}/nb/user/${user}/lab/tree/notebooks/tutorials-on-demand/${path}.ipynb" + hooks: + - "ensure_running_lab" + - "tutorial_on_demand" From f0a70ee0908ef73c45d110314c8f237dc970e9c9 Mon Sep 17 00:00:00 2001 From: Adam Thornton Date: Wed, 11 Sep 2024 15:54:09 -0700 Subject: [PATCH 123/193] Add system-test to ghostwriter --- applications/ghostwriter/values-idfdev.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/applications/ghostwriter/values-idfdev.yaml b/applications/ghostwriter/values-idfdev.yaml index 4bd576564d..d00bee4314 100644 --- a/applications/ghostwriter/values-idfdev.yaml +++ b/applications/ghostwriter/values-idfdev.yaml @@ -20,3 +20,8 @@ mapping: hooks: - "ensure_running_lab" - "tutorial_on_demand" + - source_prefix: "/system-test/" + target: "${base_url}/nb/user/${user}/lab/tree/notebooks/on-demand/${path}.ipynb" + hooks: + - "ensure_running_lab" + - "system_test" From dd8a5600e217dbc2fbe13b0291029c5247b80869 Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 11 Sep 2024 21:45:50 -0700 Subject: [PATCH 124/193] Add generic github notebook hook --- applications/ghostwriter/values-idfdev.yaml | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/applications/ghostwriter/values-idfdev.yaml b/applications/ghostwriter/values-idfdev.yaml index d00bee4314..706121ea96 100644 --- a/applications/ghostwriter/values-idfdev.yaml +++ b/applications/ghostwriter/values-idfdev.yaml @@ -6,22 +6,18 @@ config: debug: true mapping: routes: - - source_prefix: "/tutorials/" - target: "${base_url}/nb/user/${user}/lab/tree/notebooks/tutorial-notebooks/${path}.ipynb" - hooks: - - "ensure_running_lab" - source_prefix: "/queries/" target: "${base_url}/nb/user/${user}/lab/tree/notebooks/queries/portal_${path}.ipynb" hooks: - "ensure_running_lab" - "portal_query" - - source_prefix: "/tutorials-on-demand/" - target: "${base_url}/nb/user/${user}/lab/tree/notebooks/tutorials-on-demand/${path}.ipynb" + - source_prefix: "/notebooks/github.com/" + target: "${base_url}/nb/user/${user}/lab/tree/notebooks/on-demand/github.com/${path}.ipynb" hooks: - "ensure_running_lab" - - "tutorial_on_demand" + - "github_notebook" + # Two convenience routes that themselves just use the github_notebook hook - source_prefix: "/system-test/" - target: "${base_url}/nb/user/${user}/lab/tree/notebooks/on-demand/${path}.ipynb" - hooks: - - "ensure_running_lab" - - "system_test" + target: "${base_url}/notebooks/github.com/lsst-sqre/system-test/${path}" + - source_prefix: "/tutorials/" + target: "${base_url}/notebooks/github.com/rubin-dp0/tutorial-notebooks/${path}" From 62d4ebb56ce147016dd098dc7d0a144d674cc947 Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 23 Sep 2024 13:37:31 -0700 Subject: [PATCH 125/193] Move to Ghostwriter release version, enable in int/prod --- applications/ghostwriter/values-idfdev.yaml | 3 --- applications/ghostwriter/values-idfint.yaml | 17 +++++++++++++++++ applications/ghostwriter/values-idfprod.yaml | 17 +++++++++++++++++ environments/values-idfint.yaml | 1 + environments/values-idfprod.yaml | 1 + 5 files changed, 36 insertions(+), 3 deletions(-) create mode 100644 applications/ghostwriter/values-idfint.yaml create mode 100644 applications/ghostwriter/values-idfprod.yaml diff --git a/applications/ghostwriter/values-idfdev.yaml b/applications/ghostwriter/values-idfdev.yaml index 706121ea96..a2ee3d93ca 100644 --- a/applications/ghostwriter/values-idfdev.yaml +++ b/applications/ghostwriter/values-idfdev.yaml @@ -1,6 +1,3 @@ -image: - pullPolicy: "Always" - tag: "tickets-dm-46010" config: # slackAlerts: true debug: true diff --git a/applications/ghostwriter/values-idfint.yaml b/applications/ghostwriter/values-idfint.yaml new file mode 100644 index 0000000000..364d12c05b --- /dev/null +++ b/applications/ghostwriter/values-idfint.yaml @@ -0,0 +1,17 @@ +mapping: + routes: + - source_prefix: "/queries/" + target: "${base_url}/nb/user/${user}/lab/tree/notebooks/queries/portal_${path}.ipynb" + hooks: + - "ensure_running_lab" + - "portal_query" + - source_prefix: "/notebooks/github.com/" + target: "${base_url}/nb/user/${user}/lab/tree/notebooks/on-demand/github.com/${path}.ipynb" + hooks: + - "ensure_running_lab" + - "github_notebook" + # Two convenience routes that themselves just use the github_notebook hook + - source_prefix: "/system-test/" + target: "${base_url}/notebooks/github.com/lsst-sqre/system-test/${path}" + - source_prefix: "/tutorials/" + target: "${base_url}/notebooks/github.com/rubin-dp0/tutorial-notebooks/${path}" diff --git a/applications/ghostwriter/values-idfprod.yaml b/applications/ghostwriter/values-idfprod.yaml new file mode 100644 index 0000000000..364d12c05b --- /dev/null +++ b/applications/ghostwriter/values-idfprod.yaml @@ -0,0 +1,17 @@ +mapping: + routes: + - source_prefix: "/queries/" + target: "${base_url}/nb/user/${user}/lab/tree/notebooks/queries/portal_${path}.ipynb" + hooks: + - "ensure_running_lab" + - "portal_query" + - source_prefix: "/notebooks/github.com/" + target: "${base_url}/nb/user/${user}/lab/tree/notebooks/on-demand/github.com/${path}.ipynb" + hooks: + - "ensure_running_lab" + - "github_notebook" + # Two convenience routes that themselves just use the github_notebook hook + - source_prefix: "/system-test/" + target: "${base_url}/notebooks/github.com/lsst-sqre/system-test/${path}" + - source_prefix: "/tutorials/" + target: "${base_url}/notebooks/github.com/rubin-dp0/tutorial-notebooks/${path}" diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml index 27a63ea967..15190999da 100644 --- a/environments/values-idfint.yaml +++ b/environments/values-idfint.yaml @@ -16,6 +16,7 @@ applications: butler: true datalinker: true filestore-backup: true + ghostwriter: true hips: true mobu: true nublado: true diff --git a/environments/values-idfprod.yaml b/environments/values-idfprod.yaml index 611286d511..af3b77877c 100644 --- a/environments/values-idfprod.yaml +++ b/environments/values-idfprod.yaml @@ -17,6 +17,7 @@ applications: butler: true datalinker: true filestore-backup: true + ghostwriter: true hips: true mobu: true nublado: true From 4f49b48120b78fc8e78f5ee6d23084e09e462b64 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 23 Sep 2024 13:42:52 -0700 Subject: [PATCH 126/193] BTS: Change ack for CSC producers. --- charts/csc_shared/templates/configmap-env.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/charts/csc_shared/templates/configmap-env.yaml b/charts/csc_shared/templates/configmap-env.yaml index cefb956f2c..8e7f7485f8 100644 --- a/charts/csc_shared/templates/configmap-env.yaml +++ b/charts/csc_shared/templates/configmap-env.yaml @@ -10,5 +10,6 @@ data: LSST_KAFKA_REPLICATION_FACTOR: {{ $.Values.global.controlSystem.kafkaTopicReplicationFactor | quote }} LSST_KAFKA_SECURITY_USERNAME: ts-salkafka LSST_SCHEMA_REGISTRY_URL: {{ $.Values.global.controlSystem.schemaRegistryUrl }} + LSST_KAFKA_PRODUCER_WAIT_ACKS: "1" S3_ENDPOINT_URL: {{ $.Values.global.controlSystem.s3EndpointUrl }} {{- end }} From ea700e3af952a5c63a039c0f1642f95931de8eb7 Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 23 Sep 2024 13:46:56 -0700 Subject: [PATCH 127/193] Rename secret.yaml -> secrets.yaml (ghostwriter) --- applications/ghostwriter/{secret.yaml => secrets.yaml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename applications/ghostwriter/{secret.yaml => secrets.yaml} (100%) diff --git a/applications/ghostwriter/secret.yaml b/applications/ghostwriter/secrets.yaml similarity index 100% rename from applications/ghostwriter/secret.yaml rename to applications/ghostwriter/secrets.yaml From f220731a27eb1421f8bf24bebfca7e884e42e901 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 23 Sep 2024 14:50:30 -0700 Subject: [PATCH 128/193] Fix context deadline exceeded error - Reducing the batch size configuration in Telegraf from 5000 to 2500 messages fixed the problem. It seems that 5000 messages * avg size of M1M3 messages was too large for a request to the InfluxDB API. --- applications/sasquatch/values-base.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 576b753ae3..a28a51be3b 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -172,6 +172,7 @@ telegraf-kafka-consumer: database: "efd" topicRegexps: | [ "lsst.sal.MTM1M3" ] + metric_batch_size: 2500 debug: true m2: enabled: true From edfa77c7b4a91d339c7f63bbd6ff9319abd07de9 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 23 Sep 2024 15:11:46 -0700 Subject: [PATCH 129/193] Update Python and pre-commit dependencies --- .pre-commit-config.yaml | 2 +- requirements/dev.txt | 360 +++++++++++++++++++++------------------- requirements/main.txt | 204 +++++++++++------------ requirements/tox.txt | 74 ++++----- 4 files changed, 328 insertions(+), 312 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 32bdfa9de8..c684835a13 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -46,7 +46,7 @@ repos: - --template-files=../helm-docs.md.gotmpl - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.5 + rev: v0.6.7 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] diff --git a/requirements/dev.txt b/requirements/dev.txt index fd796c0977..a8d900ddd8 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -378,73 +378,80 @@ graphviz==0.20.3 \ --hash=sha256:09d6bc81e6a9fa392e7ba52135a9d49f1ed62526f96499325930e87ca1b5925d \ --hash=sha256:81f848f2904515d8cd359cc611faba817598d2feaac4027b266aa3eda7b3dde5 # via diagrams -greenlet==3.1.0 ; (python_full_version < '3.13' and platform_machine == 'AMD64') or (python_full_version < '3.13' and platform_machine == 'WIN32') or (python_full_version < '3.13' and platform_machine == 'aarch64') or (python_full_version < '3.13' and platform_machine == 'amd64') or (python_full_version < '3.13' and platform_machine == 'ppc64le') or (python_full_version < '3.13' and platform_machine == 'win32') or (python_full_version < '3.13' and platform_machine == 'x86_64') \ - --hash=sha256:01059afb9b178606b4b6e92c3e710ea1635597c3537e44da69f4531e111dd5e9 \ - --hash=sha256:037d9ac99540ace9424cb9ea89f0accfaff4316f149520b4ae293eebc5bded17 \ - --hash=sha256:0e49a65d25d7350cca2da15aac31b6f67a43d867448babf997fe83c7505f57bc \ - --hash=sha256:13ff8c8e54a10472ce3b2a2da007f915175192f18e6495bad50486e87c7f6637 \ - --hash=sha256:1544b8dd090b494c55e60c4ff46e238be44fdc472d2589e943c241e0169bcea2 \ - --hash=sha256:184258372ae9e1e9bddce6f187967f2e08ecd16906557c4320e3ba88a93438c3 \ - --hash=sha256:1ddc7bcedeb47187be74208bc652d63d6b20cb24f4e596bd356092d8000da6d6 \ - --hash=sha256:221169d31cada333a0c7fd087b957c8f431c1dba202c3a58cf5a3583ed973e9b \ - --hash=sha256:243a223c96a4246f8a30ea470c440fe9db1f5e444941ee3c3cd79df119b8eebf \ - --hash=sha256:24fc216ec7c8be9becba8b64a98a78f9cd057fd2dc75ae952ca94ed8a893bf27 \ - --hash=sha256:2651dfb006f391bcb240635079a68a261b227a10a08af6349cba834a2141efa1 \ - --hash=sha256:26811df4dc81271033a7836bc20d12cd30938e6bd2e9437f56fa03da81b0f8fc \ - --hash=sha256:26d9c1c4f1748ccac0bae1dbb465fb1a795a75aba8af8ca871503019f4285e2a \ - --hash=sha256:28fe80a3eb673b2d5cc3b12eea468a5e5f4603c26aa34d88bf61bba82ceb2f9b \ - --hash=sha256:2cd8518eade968bc52262d8c46727cfc0826ff4d552cf0430b8d65aaf50bb91d \ - --hash=sha256:2d004db911ed7b6218ec5c5bfe4cf70ae8aa2223dffbb5b3c69e342bb253cb28 \ - --hash=sha256:3d07c28b85b350564bdff9f51c1c5007dfb2f389385d1bc23288de51134ca303 \ - --hash=sha256:3e7e6ef1737a819819b1163116ad4b48d06cfdd40352d813bb14436024fcda99 \ - --hash=sha256:44151d7b81b9391ed759a2f2865bbe623ef00d648fed59363be2bbbd5154656f \ - --hash=sha256:44cd313629ded43bb3b98737bba2f3e2c2c8679b55ea29ed73daea6b755fe8e7 \ - --hash=sha256:4a3dae7492d16e85ea6045fd11cb8e782b63eac8c8d520c3a92c02ac4573b0a6 \ - --hash=sha256:4b5ea3664eed571779403858d7cd0a9b0ebf50d57d2cdeafc7748e09ef8cd81a \ - --hash=sha256:4c3446937be153718250fe421da548f973124189f18fe4575a0510b5c928f0cc \ - --hash=sha256:5415b9494ff6240b09af06b91a375731febe0090218e2898d2b85f9b92abcda0 \ - --hash=sha256:5fd6e94593f6f9714dbad1aaba734b5ec04593374fa6638df61592055868f8b8 \ - --hash=sha256:619935a44f414274a2c08c9e74611965650b730eb4efe4b2270f91df5e4adf9a \ - --hash=sha256:655b21ffd37a96b1e78cc48bf254f5ea4b5b85efaf9e9e2a526b3c9309d660ca \ - --hash=sha256:665b21e95bc0fce5cab03b2e1d90ba9c66c510f1bb5fdc864f3a377d0f553f6b \ - --hash=sha256:6a4bf607f690f7987ab3291406e012cd8591a4f77aa54f29b890f9c331e84989 \ - --hash=sha256:6cea1cca3be76c9483282dc7760ea1cc08a6ecec1f0b6ca0a94ea0d17432da19 \ - --hash=sha256:713d450cf8e61854de9420fb7eea8ad228df4e27e7d4ed465de98c955d2b3fa6 \ - --hash=sha256:726377bd60081172685c0ff46afbc600d064f01053190e4450857483c4d44484 \ - --hash=sha256:76b3e3976d2a452cba7aa9e453498ac72240d43030fdc6d538a72b87eaff52fd \ - --hash=sha256:76dc19e660baea5c38e949455c1181bc018893f25372d10ffe24b3ed7341fb25 \ - --hash=sha256:76e5064fd8e94c3f74d9fd69b02d99e3cdb8fc286ed49a1f10b256e59d0d3a0b \ - --hash=sha256:7f346d24d74c00b6730440f5eb8ec3fe5774ca8d1c9574e8e57c8671bb51b910 \ - --hash=sha256:81eeec4403a7d7684b5812a8aaa626fa23b7d0848edb3a28d2eb3220daddcbd0 \ - --hash=sha256:90b5bbf05fe3d3ef697103850c2ce3374558f6fe40fd57c9fac1bf14903f50a5 \ - --hash=sha256:9730929375021ec90f6447bff4f7f5508faef1c02f399a1953870cdb78e0c345 \ - --hash=sha256:9eb4a1d7399b9f3c7ac68ae6baa6be5f9195d1d08c9ddc45ad559aa6b556bce6 \ - --hash=sha256:a0409bc18a9f85321399c29baf93545152d74a49d92f2f55302f122007cfda00 \ - --hash=sha256:a22f4e26400f7f48faef2d69c20dc055a1f3043d330923f9abe08ea0aecc44df \ - --hash=sha256:a53dfe8f82b715319e9953330fa5c8708b610d48b5c59f1316337302af5c0811 \ - --hash=sha256:a771dc64fa44ebe58d65768d869fcfb9060169d203446c1d446e844b62bdfdca \ - --hash=sha256:a814dc3100e8a046ff48faeaa909e80cdb358411a3d6dd5293158425c684eda8 \ - --hash=sha256:a8870983af660798dc1b529e1fd6f1cefd94e45135a32e58bd70edd694540f33 \ - --hash=sha256:ac0adfdb3a21dc2a24ed728b61e72440d297d0fd3a577389df566651fcd08f97 \ - --hash=sha256:b395121e9bbe8d02a750886f108d540abe66075e61e22f7353d9acb0b81be0f0 \ - --hash=sha256:b9505a0c8579899057cbefd4ec34d865ab99852baf1ff33a9481eb3924e2da0b \ - --hash=sha256:c0a5b1c22c82831f56f2f7ad9bbe4948879762fe0d59833a4a71f16e5fa0f682 \ - --hash=sha256:c3967dcc1cd2ea61b08b0b276659242cbce5caca39e7cbc02408222fb9e6ff39 \ - --hash=sha256:c6f4c2027689093775fd58ca2388d58789009116844432d920e9147f91acbe64 \ - --hash=sha256:c9d86401550b09a55410f32ceb5fe7efcd998bd2dad9e82521713cb148a4a15f \ - --hash=sha256:cd468ec62257bb4544989402b19d795d2305eccb06cde5da0eb739b63dc04665 \ - --hash=sha256:cfcfb73aed40f550a57ea904629bdaf2e562c68fa1164fa4588e752af6efdc3f \ - --hash=sha256:d0dd943282231480aad5f50f89bdf26690c995e8ff555f26d8a5b9887b559bcc \ - --hash=sha256:d3c59a06c2c28a81a026ff11fbf012081ea34fb9b7052f2ed0366e14896f0a1d \ - --hash=sha256:d45b75b0f3fd8d99f62eb7908cfa6d727b7ed190737dec7fe46d993da550b81a \ - --hash=sha256:d46d5069e2eeda111d6f71970e341f4bd9aeeee92074e649ae263b834286ecc0 \ - --hash=sha256:d58ec349e0c2c0bc6669bf2cd4982d2f93bf067860d23a0ea1fe677b0f0b1e09 \ - --hash=sha256:db1b3ccb93488328c74e97ff888604a8b95ae4f35f4f56677ca57a4fc3a4220b \ - --hash=sha256:dd65695a8df1233309b701dec2539cc4b11e97d4fcc0f4185b4a12ce54db0491 \ - --hash=sha256:f9482c2ed414781c0af0b35d9d575226da6b728bd1a720668fa05837184965b7 \ - --hash=sha256:f9671e7282d8c6fcabc32c0fb8d7c0ea8894ae85cee89c9aadc2d7129e1a9954 \ - --hash=sha256:fad7a051e07f64e297e6e8399b4d6a3bdcad3d7297409e9a06ef8cbccff4f501 \ - --hash=sha256:ffb08f2a1e59d38c7b8b9ac8083c9c8b9875f0955b1e9b9b9a965607a51f8e54 +greenlet==3.1.1 ; (python_full_version < '3.13' and platform_machine == 'AMD64') or (python_full_version < '3.13' and platform_machine == 'WIN32') or (python_full_version < '3.13' and platform_machine == 'aarch64') or (python_full_version < '3.13' and platform_machine == 'amd64') or (python_full_version < '3.13' and platform_machine == 'ppc64le') or (python_full_version < '3.13' and platform_machine == 'win32') or (python_full_version < '3.13' and platform_machine == 'x86_64') \ + --hash=sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e \ + --hash=sha256:03a088b9de532cbfe2ba2034b2b85e82df37874681e8c470d6fb2f8c04d7e4b7 \ + --hash=sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01 \ + --hash=sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1 \ + --hash=sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159 \ + --hash=sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563 \ + --hash=sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83 \ + --hash=sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9 \ + --hash=sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395 \ + --hash=sha256:1d3755bcb2e02de341c55b4fca7a745a24a9e7212ac953f6b3a48d117d7257aa \ + --hash=sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942 \ + --hash=sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1 \ + --hash=sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441 \ + --hash=sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22 \ + --hash=sha256:346bed03fe47414091be4ad44786d1bd8bef0c3fcad6ed3dee074a032ab408a9 \ + --hash=sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0 \ + --hash=sha256:37b9de5a96111fc15418819ab4c4432e4f3c2ede61e660b1e33971eba26ef9ba \ + --hash=sha256:396979749bd95f018296af156201d6211240e7a23090f50a8d5d18c370084dc3 \ + --hash=sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1 \ + --hash=sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6 \ + --hash=sha256:47da355d8687fd65240c364c90a31569a133b7b60de111c255ef5b606f2ae291 \ + --hash=sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39 \ + --hash=sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d \ + --hash=sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467 \ + --hash=sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475 \ + --hash=sha256:54558ea205654b50c438029505def3834e80f0869a70fb15b871c29b4575ddef \ + --hash=sha256:5e06afd14cbaf9e00899fae69b24a32f2196c19de08fcb9f4779dd4f004e5e7c \ + --hash=sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511 \ + --hash=sha256:63e4844797b975b9af3a3fb8f7866ff08775f5426925e1e0bbcfe7932059a12c \ + --hash=sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822 \ + --hash=sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a \ + --hash=sha256:6ef9ea3f137e5711f0dbe5f9263e8c009b7069d8a1acea822bd5e9dae0ae49c8 \ + --hash=sha256:7017b2be767b9d43cc31416aba48aab0d2309ee31b4dbf10a1d38fb7972bdf9d \ + --hash=sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01 \ + --hash=sha256:73aaad12ac0ff500f62cebed98d8789198ea0e6f233421059fa68a5aa7220145 \ + --hash=sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80 \ + --hash=sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13 \ + --hash=sha256:7939aa3ca7d2a1593596e7ac6d59391ff30281ef280d8632fa03d81f7c5f955e \ + --hash=sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b \ + --hash=sha256:85f3ff71e2e60bd4b4932a043fbbe0f499e263c628390b285cb599154a3b03b1 \ + --hash=sha256:8b8b36671f10ba80e159378df9c4f15c14098c4fd73a36b9ad715f057272fbef \ + --hash=sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc \ + --hash=sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff \ + --hash=sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120 \ + --hash=sha256:94ebba31df2aa506d7b14866fed00ac141a867e63143fe5bca82a8e503b36437 \ + --hash=sha256:95ffcf719966dd7c453f908e208e14cde192e09fde6c7186c8f1896ef778d8cd \ + --hash=sha256:98884ecf2ffb7d7fe6bd517e8eb99d31ff7855a840fa6d0d63cd07c037f6a981 \ + --hash=sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36 \ + --hash=sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a \ + --hash=sha256:a0dfc6c143b519113354e780a50381508139b07d2177cb6ad6a08278ec655798 \ + --hash=sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7 \ + --hash=sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761 \ + --hash=sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0 \ + --hash=sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e \ + --hash=sha256:b8da394b34370874b4572676f36acabac172602abf054cbc4ac910219f3340af \ + --hash=sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa \ + --hash=sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c \ + --hash=sha256:ca9d0ff5ad43e785350894d97e13633a66e2b50000e8a183a50a88d834752d42 \ + --hash=sha256:d0028e725ee18175c6e422797c407874da24381ce0690d6b9396c204c7f7276e \ + --hash=sha256:d21e10da6ec19b457b82636209cbe2331ff4306b54d06fa04b7c138ba18c8a81 \ + --hash=sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e \ + --hash=sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617 \ + --hash=sha256:db32b5348615a04b82240cc67983cb315309e88d444a288934ee6ceaebcad6cc \ + --hash=sha256:dcc62f31eae24de7f8dce72134c8651c58000d3b1868e01392baea7c32c247de \ + --hash=sha256:dfc59d69fc48664bc693842bd57acfdd490acafda1ab52c7836e3fc75c90a111 \ + --hash=sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383 \ + --hash=sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70 \ + --hash=sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6 \ + --hash=sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4 \ + --hash=sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011 \ + --hash=sha256:f1d4aeb8891338e60d1ab6127af1fe45def5259def8094b9c7e34690c8858803 \ + --hash=sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79 \ + --hash=sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f # via sqlalchemy idna==3.10 \ --hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \ @@ -505,9 +512,9 @@ jupyter-cache==1.0.0 \ --hash=sha256:594b1c4e29b488b36547e12477645f489dbdc62cc939b2408df5679f79245078 \ --hash=sha256:d0fa7d7533cd5798198d8889318269a8c1382ed3b22f622c09a9356521f48687 # via myst-nb -jupyter-client==8.6.2 \ - --hash=sha256:2bda14d55ee5ba58552a8c53ae43d215ad9868853489213f37da060ced54d8df \ - --hash=sha256:50cbc5c66fd1b8f65ecb66bc490ab73217993632809b6e505687de18e9dea39f +jupyter-client==8.6.3 \ + --hash=sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419 \ + --hash=sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f # via # ipykernel # nbclient @@ -689,9 +696,9 @@ pexpect==4.9.0 ; sys_platform != 'emscripten' and sys_platform != 'win32' \ --hash=sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523 \ --hash=sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f # via ipython -platformdirs==4.3.3 \ - --hash=sha256:50a5450e2e84f44539718293cbb1da0a0885c9d14adf21b77bae4e66fc99d9b5 \ - --hash=sha256:d4e0b7d8ec176b341fb03cb11ca12d0276faa8c485f9cd218f613840463fc2c0 +platformdirs==4.3.6 \ + --hash=sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907 \ + --hash=sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb # via jupyter-core pluggy==1.5.0 \ --hash=sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1 \ @@ -744,104 +751,104 @@ pycparser==2.22 ; implementation_name == 'pypy' \ # via # -c requirements/main.txt # cffi -pydantic==2.9.1 \ - --hash=sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2 \ - --hash=sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612 +pydantic==2.9.2 \ + --hash=sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f \ + --hash=sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12 # via # -c requirements/main.txt # autodoc-pydantic # documenteer # pydantic-settings -pydantic-core==2.23.3 \ - --hash=sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801 \ - --hash=sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec \ - --hash=sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295 \ - --hash=sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba \ - --hash=sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e \ - --hash=sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e \ - --hash=sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4 \ - --hash=sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211 \ - --hash=sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea \ - --hash=sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c \ - --hash=sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835 \ - --hash=sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d \ - --hash=sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c \ - --hash=sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c \ - --hash=sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61 \ - --hash=sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83 \ - --hash=sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb \ - --hash=sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1 \ - --hash=sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5 \ - --hash=sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690 \ - --hash=sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b \ - --hash=sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7 \ - --hash=sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70 \ - --hash=sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a \ - --hash=sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8 \ - --hash=sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd \ - --hash=sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee \ - --hash=sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1 \ - --hash=sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab \ - --hash=sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958 \ - --hash=sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5 \ - --hash=sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b \ - --hash=sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961 \ - --hash=sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c \ - --hash=sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25 \ - --hash=sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4 \ - --hash=sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4 \ - --hash=sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f \ - --hash=sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326 \ - --hash=sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab \ - --hash=sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8 \ - --hash=sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b \ - --hash=sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6 \ - --hash=sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8 \ - --hash=sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01 \ - --hash=sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc \ - --hash=sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d \ - --hash=sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e \ - --hash=sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b \ - --hash=sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855 \ - --hash=sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700 \ - --hash=sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a \ - --hash=sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa \ - --hash=sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541 \ - --hash=sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791 \ - --hash=sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162 \ - --hash=sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611 \ - --hash=sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef \ - --hash=sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe \ - --hash=sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5 \ - --hash=sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba \ - --hash=sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28 \ - --hash=sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa \ - --hash=sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27 \ - --hash=sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4 \ - --hash=sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b \ - --hash=sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2 \ - --hash=sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c \ - --hash=sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8 \ - --hash=sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb \ - --hash=sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c \ - --hash=sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e \ - --hash=sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305 \ - --hash=sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8 \ - --hash=sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4 \ - --hash=sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433 \ - --hash=sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45 \ - --hash=sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16 \ - --hash=sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed \ - --hash=sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0 \ - --hash=sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d \ - --hash=sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710 \ - --hash=sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48 \ - --hash=sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423 \ - --hash=sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf \ - --hash=sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9 \ - --hash=sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63 \ - --hash=sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5 \ - --hash=sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb +pydantic-core==2.23.4 \ + --hash=sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36 \ + --hash=sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05 \ + --hash=sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071 \ + --hash=sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327 \ + --hash=sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c \ + --hash=sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36 \ + --hash=sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29 \ + --hash=sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744 \ + --hash=sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d \ + --hash=sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec \ + --hash=sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e \ + --hash=sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e \ + --hash=sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577 \ + --hash=sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232 \ + --hash=sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863 \ + --hash=sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6 \ + --hash=sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368 \ + --hash=sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480 \ + --hash=sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2 \ + --hash=sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2 \ + --hash=sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6 \ + --hash=sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769 \ + --hash=sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d \ + --hash=sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2 \ + --hash=sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84 \ + --hash=sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166 \ + --hash=sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271 \ + --hash=sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5 \ + --hash=sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb \ + --hash=sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13 \ + --hash=sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323 \ + --hash=sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556 \ + --hash=sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665 \ + --hash=sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef \ + --hash=sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb \ + --hash=sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119 \ + --hash=sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126 \ + --hash=sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510 \ + --hash=sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b \ + --hash=sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87 \ + --hash=sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f \ + --hash=sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc \ + --hash=sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8 \ + --hash=sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21 \ + --hash=sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f \ + --hash=sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6 \ + --hash=sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658 \ + --hash=sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b \ + --hash=sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3 \ + --hash=sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb \ + --hash=sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59 \ + --hash=sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24 \ + --hash=sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9 \ + --hash=sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3 \ + --hash=sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd \ + --hash=sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753 \ + --hash=sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55 \ + --hash=sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad \ + --hash=sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a \ + --hash=sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605 \ + --hash=sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e \ + --hash=sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b \ + --hash=sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433 \ + --hash=sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8 \ + --hash=sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07 \ + --hash=sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728 \ + --hash=sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0 \ + --hash=sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327 \ + --hash=sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555 \ + --hash=sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64 \ + --hash=sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6 \ + --hash=sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea \ + --hash=sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b \ + --hash=sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df \ + --hash=sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e \ + --hash=sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd \ + --hash=sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068 \ + --hash=sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3 \ + --hash=sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040 \ + --hash=sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12 \ + --hash=sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916 \ + --hash=sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f \ + --hash=sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f \ + --hash=sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801 \ + --hash=sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231 \ + --hash=sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5 \ + --hash=sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8 \ + --hash=sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee \ + --hash=sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607 # via # -c requirements/main.txt # pydantic @@ -1252,9 +1259,9 @@ sphinx==8.0.2 \ # sphinxcontrib-youtube # sphinxext-opengraph # sphinxext-rediraffe -sphinx-autodoc-typehints==2.4.1 \ - --hash=sha256:af37abb816ebd2cf56c7a8174fd2f34d0f2f84fbf58265f89429ae107212fe6f \ - --hash=sha256:cfe410920cecf08ade046bb387b0007edb83e992de59686c62d194c762f1e45c +sphinx-autodoc-typehints==2.4.4 \ + --hash=sha256:940de2951fd584d147e46772579fdc904f945c5f1ee1a78c614646abfbbef18b \ + --hash=sha256:e743512da58b67a06579a1462798a6907664ab77460758a43234adeac350afbf # via documenteer sphinx-automodapi==0.18.0 \ --hash=sha256:022860385590768f52d4f6e19abb83b2574772d2721fb4050ecdb6e593a1a440 \ @@ -1338,14 +1345,19 @@ sphinxext-rediraffe==0.2.7 \ --hash=sha256:9e430a52d4403847f4ffb3a8dd6dfc34a9fe43525305131f52ed899743a5fd8c # via documenteer sqlalchemy==2.0.35 \ + --hash=sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9 \ --hash=sha256:032d979ce77a6c2432653322ba4cbeabf5a6837f704d16fa38b5a05d8e21fa00 \ --hash=sha256:0375a141e1c0878103eb3d719eb6d5aa444b490c96f3fedab8471c7f6ffe70ee \ --hash=sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6 \ + --hash=sha256:05c3f58cf91683102f2f0265c0db3bd3892e9eedabe059720492dbaa4f922da1 \ + --hash=sha256:0630774b0977804fba4b6bbea6852ab56c14965a2b0c7fc7282c5f7d90a1ae72 \ --hash=sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf \ --hash=sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8 \ --hash=sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b \ --hash=sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc \ --hash=sha256:2a275a806f73e849e1c309ac11108ea1a14cd7058577aba962cd7190e27c9e3c \ + --hash=sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1 \ + --hash=sha256:2e795c2f7d7249b75bb5f479b432a51b59041580d20599d4e112b5f2046437a3 \ --hash=sha256:3655af10ebcc0f1e4e06c5900bb33e080d6a1fa4228f502121f28a3b1753cde5 \ --hash=sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90 \ --hash=sha256:4c31943b61ed8fdd63dfd12ccc919f2bf95eefca133767db6fbbd15da62078ec \ @@ -1355,6 +1367,7 @@ sqlalchemy==2.0.35 \ --hash=sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468 \ --hash=sha256:69683e02e8a9de37f17985905a5eca18ad651bf592314b4d3d799029797d0eb3 \ --hash=sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e \ + --hash=sha256:732e026240cdd1c1b2e3ac515c7a23820430ed94292ce33806a95869c46bd139 \ --hash=sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff \ --hash=sha256:890da8cd1941fa3dab28c5bac3b9da8502e7e366f895b3b8e500896f12f94d11 \ --hash=sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01 \ @@ -1365,10 +1378,13 @@ sqlalchemy==2.0.35 \ --hash=sha256:9509c4123491d0e63fb5e16199e09f8e262066e58903e84615c301dde8fa2e87 \ --hash=sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e \ --hash=sha256:a62dd5d7cc8626a3634208df458c5fe4f21200d96a74d122c83bc2015b333bc1 \ + --hash=sha256:ada603db10bb865bbe591939de854faf2c60f43c9b763e90f653224138f910d9 \ --hash=sha256:aee110e4ef3c528f3abbc3c2018c121e708938adeeff9006428dd7c8555e9b3f \ --hash=sha256:b76d63495b0508ab9fc23f8152bac63205d2a704cd009a2b0722f4c8e0cba8e0 \ + --hash=sha256:c0d8326269dbf944b9201911b0d9f3dc524d64779a07518199a58384c3d37a44 \ --hash=sha256:c41411e192f8d3ea39ea70e0fae48762cd11a2244e03751a98bd3c0ca9a4e936 \ --hash=sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8 \ + --hash=sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea \ --hash=sha256:cc32b2990fc34380ec2f6195f33a76b6cdaa9eecf09f0c9404b74fc120aef36f \ --hash=sha256:ccae5de2a0140d8be6838c331604f91d6fafd0735dbdcee1ac78fc8fbaba76b4 \ --hash=sha256:d299797d75cd747e7797b1b41817111406b8b10a4f88b6e8fe5b5e59598b43b0 \ @@ -1465,9 +1481,9 @@ typed-ast==1.5.5 \ --hash=sha256:fd946abf3c31fb50eee07451a6aedbfff912fcd13cf357363f5b4e834cc5e71a \ --hash=sha256:fe58ef6a764de7b4b36edfc8592641f56e69b7163bba9f9c8089838ee596bfb2 # via diagrams -types-pyyaml==6.0.12.20240808 \ - --hash=sha256:b8f76ddbd7f65440a8bda5526a9607e4c7a322dc2f8e1a8c405644f9a6f4b9af \ - --hash=sha256:deda34c5c655265fc517b546c902aa6eed2ef8d3e921e4765fe606fe2afe8d35 +types-pyyaml==6.0.12.20240917 \ + --hash=sha256:392b267f1c0fe6022952462bf5d6523f31e37f6cea49b14cee7ad634b6301570 \ + --hash=sha256:d1405a86f9576682234ef83bcb4e6fff7c9305c8b1fbad5e0bcd4f7dbdc9c587 # via -r requirements/dev.in typing-extensions==4.12.2 \ --hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \ diff --git a/requirements/main.txt b/requirements/main.txt index f8cb7df176..10ad6927b2 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -4,9 +4,9 @@ annotated-types==0.7.0 \ --hash=sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53 \ --hash=sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89 # via pydantic -anyio==4.4.0 \ - --hash=sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94 \ - --hash=sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7 +anyio==4.6.0 \ + --hash=sha256:137b4559cbb034c477165047febb6ff83f390fc3b20bf181c1fc0a728cb8beeb \ + --hash=sha256:c7d2e9d63e31599eeb636c8c5c03a7e108d73b345f064f1c19fdc87b79036a9a # via # httpcore # starlette @@ -249,9 +249,9 @@ cryptography==43.0.1 \ # phalanx (pyproject.toml) # pyjwt # safir -fastapi==0.114.2 \ - --hash=sha256:0adb148b62edb09e8c6eeefa3ea934e8f276dabc038c5a82989ea6346050c3da \ - --hash=sha256:44474a22913057b1acb973ab90f4b671ba5200482e7622816d79105dcece1ac5 +fastapi==0.115.0 \ + --hash=sha256:17ea427674467486e997206a5ab25760f6b09e069f099b96f5b55a32fb6f1631 \ + --hash=sha256:f93b4ca3529a8ebc6fc3fcf710e5efa8de3df9b41570958abf1d97d843138004 # via safir gidgethub==5.3.0 \ --hash=sha256:4dd92f2252d12756b13f9dd15cde322bfb0d625b6fb5d680da1567ec74b462c0 \ @@ -364,103 +364,103 @@ pycparser==2.22 ; platform_python_implementation != 'PyPy' \ --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ --hash=sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc # via cffi -pydantic==2.9.1 \ - --hash=sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2 \ - --hash=sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612 +pydantic==2.9.2 \ + --hash=sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f \ + --hash=sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12 # via # phalanx (pyproject.toml) # fastapi # safir -pydantic-core==2.23.3 \ - --hash=sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801 \ - --hash=sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec \ - --hash=sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295 \ - --hash=sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba \ - --hash=sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e \ - --hash=sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e \ - --hash=sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4 \ - --hash=sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211 \ - --hash=sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea \ - --hash=sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c \ - --hash=sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835 \ - --hash=sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d \ - --hash=sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c \ - --hash=sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c \ - --hash=sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61 \ - --hash=sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83 \ - --hash=sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb \ - --hash=sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1 \ - --hash=sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5 \ - --hash=sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690 \ - --hash=sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b \ - --hash=sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7 \ - --hash=sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70 \ - --hash=sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a \ - --hash=sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8 \ - --hash=sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd \ - --hash=sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee \ - --hash=sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1 \ - --hash=sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab \ - --hash=sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958 \ - --hash=sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5 \ - --hash=sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b \ - --hash=sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961 \ - --hash=sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c \ - --hash=sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25 \ - --hash=sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4 \ - --hash=sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4 \ - --hash=sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f \ - --hash=sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326 \ - --hash=sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab \ - --hash=sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8 \ - --hash=sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b \ - --hash=sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6 \ - --hash=sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8 \ - --hash=sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01 \ - --hash=sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc \ - --hash=sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d \ - --hash=sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e \ - --hash=sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b \ - --hash=sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855 \ - --hash=sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700 \ - --hash=sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a \ - --hash=sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa \ - --hash=sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541 \ - --hash=sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791 \ - --hash=sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162 \ - --hash=sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611 \ - --hash=sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef \ - --hash=sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe \ - --hash=sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5 \ - --hash=sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba \ - --hash=sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28 \ - --hash=sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa \ - --hash=sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27 \ - --hash=sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4 \ - --hash=sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b \ - --hash=sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2 \ - --hash=sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c \ - --hash=sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8 \ - --hash=sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb \ - --hash=sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c \ - --hash=sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e \ - --hash=sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305 \ - --hash=sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8 \ - --hash=sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4 \ - --hash=sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433 \ - --hash=sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45 \ - --hash=sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16 \ - --hash=sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed \ - --hash=sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0 \ - --hash=sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d \ - --hash=sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710 \ - --hash=sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48 \ - --hash=sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423 \ - --hash=sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf \ - --hash=sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9 \ - --hash=sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63 \ - --hash=sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5 \ - --hash=sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb +pydantic-core==2.23.4 \ + --hash=sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36 \ + --hash=sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05 \ + --hash=sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071 \ + --hash=sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327 \ + --hash=sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c \ + --hash=sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36 \ + --hash=sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29 \ + --hash=sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744 \ + --hash=sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d \ + --hash=sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec \ + --hash=sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e \ + --hash=sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e \ + --hash=sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577 \ + --hash=sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232 \ + --hash=sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863 \ + --hash=sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6 \ + --hash=sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368 \ + --hash=sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480 \ + --hash=sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2 \ + --hash=sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2 \ + --hash=sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6 \ + --hash=sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769 \ + --hash=sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d \ + --hash=sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2 \ + --hash=sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84 \ + --hash=sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166 \ + --hash=sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271 \ + --hash=sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5 \ + --hash=sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb \ + --hash=sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13 \ + --hash=sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323 \ + --hash=sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556 \ + --hash=sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665 \ + --hash=sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef \ + --hash=sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb \ + --hash=sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119 \ + --hash=sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126 \ + --hash=sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510 \ + --hash=sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b \ + --hash=sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87 \ + --hash=sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f \ + --hash=sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc \ + --hash=sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8 \ + --hash=sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21 \ + --hash=sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f \ + --hash=sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6 \ + --hash=sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658 \ + --hash=sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b \ + --hash=sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3 \ + --hash=sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb \ + --hash=sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59 \ + --hash=sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24 \ + --hash=sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9 \ + --hash=sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3 \ + --hash=sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd \ + --hash=sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753 \ + --hash=sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55 \ + --hash=sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad \ + --hash=sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a \ + --hash=sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605 \ + --hash=sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e \ + --hash=sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b \ + --hash=sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433 \ + --hash=sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8 \ + --hash=sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07 \ + --hash=sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728 \ + --hash=sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0 \ + --hash=sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327 \ + --hash=sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555 \ + --hash=sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64 \ + --hash=sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6 \ + --hash=sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea \ + --hash=sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b \ + --hash=sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df \ + --hash=sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e \ + --hash=sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd \ + --hash=sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068 \ + --hash=sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3 \ + --hash=sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040 \ + --hash=sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12 \ + --hash=sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916 \ + --hash=sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f \ + --hash=sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f \ + --hash=sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801 \ + --hash=sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231 \ + --hash=sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5 \ + --hash=sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8 \ + --hash=sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee \ + --hash=sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607 # via # pydantic # safir @@ -558,9 +558,9 @@ sniffio==1.3.1 \ # anyio # httpcore # httpx -starlette==0.38.5 \ - --hash=sha256:04a92830a9b6eb1442c766199d62260c3d4dc9c4f9188360626b1e0273cb7077 \ - --hash=sha256:632f420a9d13e3ee2a6f18f437b0a9f1faecb0bc42e1942aa2ea0e379a4c4206 +starlette==0.38.6 \ + --hash=sha256:4517a1409e2e73ee4951214ba012052b9e16f60e90d73cfb06192c19203bbb05 \ + --hash=sha256:863a1588f5574e70a821dadefb41e4881ea451a47a3cd1b4df359d4ffefe5ead # via # fastapi # safir diff --git a/requirements/tox.txt b/requirements/tox.txt index 0db59a8534..f50f47aa68 100644 --- a/requirements/tox.txt +++ b/requirements/tox.txt @@ -19,9 +19,9 @@ distlib==0.3.8 \ --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \ --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64 # via virtualenv -filelock==3.16.0 \ - --hash=sha256:81de9eb8453c769b63369f87f11131a7ab04e367f8d97ad39dc230daa07e3bec \ - --hash=sha256:f6ed4c963184f4c84dd5557ce8fece759a3724b37b80c6c4f20a2f63a4dc6609 +filelock==3.16.1 \ + --hash=sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0 \ + --hash=sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435 # via # tox # virtualenv @@ -33,9 +33,9 @@ packaging==24.1 \ # pyproject-api # tox # tox-uv -platformdirs==4.3.3 \ - --hash=sha256:50a5450e2e84f44539718293cbb1da0a0885c9d14adf21b77bae4e66fc99d9b5 \ - --hash=sha256:d4e0b7d8ec176b341fb03cb11ca12d0276faa8c485f9cd218f613840463fc2c0 +platformdirs==4.3.6 \ + --hash=sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907 \ + --hash=sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb # via # -c requirements/dev.txt # tox @@ -46,41 +46,41 @@ pluggy==1.5.0 \ # via # -c requirements/dev.txt # tox -pyproject-api==1.7.1 \ - --hash=sha256:2dc1654062c2b27733d8fd4cdda672b22fe8741ef1dde8e3a998a9547b071eeb \ - --hash=sha256:7ebc6cd10710f89f4cf2a2731710a98abce37ebff19427116ff2174c9236a827 +pyproject-api==1.8.0 \ + --hash=sha256:3d7d347a047afe796fd5d1885b1e391ba29be7169bd2f102fcd378f04273d228 \ + --hash=sha256:77b8049f2feb5d33eefcc21b57f1e279636277a8ac8ad6b5871037b243778496 # via tox -tox==4.18.1 \ - --hash=sha256:35d472032ee1f73fe20c3e0e73d7073a4e85075c86ff02c576f9fc7c6a15a578 \ - --hash=sha256:3c0c96bc3a568a5c7e66387a4cfcf8c875b52e09f4d47c9f7a277ec82f1a0b11 +tox==4.20.0 \ + --hash=sha256:21a8005e3d3fe5658a8e36b8ca3ed13a4230429063c5cc2a2fdac6ee5aa0de34 \ + --hash=sha256:5b78a49b6eaaeab3ae4186415e7c97d524f762ae967c63562687c3e5f0ec23d5 # via # -r requirements/tox.in # tox-uv -tox-uv==1.11.3 \ - --hash=sha256:316f559ae5525edec12791d9e1f393e405ded5b7e7d50fbaee4726676951f49a \ - --hash=sha256:d434787406ff2854600c1ceaa555519080026208cf7f65bb5d4b2d7c9c4776de +tox-uv==1.13.0 \ + --hash=sha256:1037e4abad15a3b708b5970ed7a17a0765d7249b641a92b155bc3343b8b0145b \ + --hash=sha256:fb087b8b4ff779c72b48fc72ea1995387bb1c0dfb37910c20e46cef8b5f98c15 # via -r requirements/tox.in -uv==0.4.10 \ - --hash=sha256:0784f75093a75390d8d480cc8a444516e78f08849db9a13c21791a5f651df4a1 \ - --hash=sha256:0f8b9ba4ecfbea343a00e46d509669606e55fe233d800752c4c25650473df358 \ - --hash=sha256:1b6b6c6b8cc0c4e54ab25e3b46e49d1e583e26c194572eb42bfeebf71b39cca2 \ - --hash=sha256:1ff5130b6f3af79c4e47f63db03215aed15e78cb4f1f51682af6f9949c2bcf00 \ - --hash=sha256:2ff29a2f55a697e78d787a41ab41d4b26421d200728289b88b6241d3b486c436 \ - --hash=sha256:30d1f8348a2b18e21a35c97ce42528781f242d0303881fc92fbacdcb653c8bca \ - --hash=sha256:3be73788db9ceacb94a521cf67ca5cc08bac512aef71145b904ab62a3acabdae \ - --hash=sha256:444e1cdb36d7ef103e52185f918800527c255dc369c9f90eb1f198dfa3f4d5bc \ - --hash=sha256:6ba1cc3070e5c63ce0a1421fbed28bd1b3ff520671d7badda11a501504c78394 \ - --hash=sha256:8fa510dfbbde4f8ad5cd2769568c7b0c3e867b74deaf4beabcca79e74e7550cc \ - --hash=sha256:97a1187e11a9df70d55bc577721ad4a19441cda56e4d69fb2f38d88c7650d2a0 \ - --hash=sha256:99954a94dd6c4bff8a9a963c05bc3988214ea39e7511a52fda35112e1a478447 \ - --hash=sha256:a9dc1f8fca5c4a2f73054d9f56c7397e9fc6ba43baefc503d6f0128d72ea662f \ - --hash=sha256:b89dfd213359a23797155ff8175e5202ed6b84aadeb20df92132127608d46acf \ - --hash=sha256:bc87d6c581cfed0979e0f5ee93383d46006c6d4a5e4eb9f43ef13bce61b50cc2 \ - --hash=sha256:bc99e6b45303f0881a8dc199f0b7ea8261dd1779e576e8477a7721ceeeaafcc7 \ - --hash=sha256:e99e3f761875962942e0743b868bd666021d5e14c3df494e820ef8f45fb88578 \ - --hash=sha256:ff9046a8c5e836e892ac7741e672ee016e92e55c659fa8195595df65a1f3accf +uv==0.4.15 \ + --hash=sha256:04858bfd551fabe1635127d9a0afe5c62e1e7d56cf309a9674840c90bfc1f21e \ + --hash=sha256:0e9b78f1a800a4cfdfbdc9ff4e5d4cce34af770f8a1f2b9416b161f294eb3703 \ + --hash=sha256:1401e73f0e8df62b4cfbf394e65a75f18b73bf8a94a6c5653a55bd6fdb8e1bc3 \ + --hash=sha256:1bb79cb06be9bb25a1bf8641bf34593f64a96b3ba66ebd8712954f647d9faa24 \ + --hash=sha256:21a3cedb2276d635543a10a11c61f75c6e387110e23e90cdb6c6dd2e1f3c9453 \ + --hash=sha256:27884429b7fed371fe1fcbe829659c4a259463d0ecacb7891d800e4754b5f24c \ + --hash=sha256:4e40deb2cf2cb403dbaf65209d49c45462ebbb1bff290d4c18b902b5b385cdc9 \ + --hash=sha256:6eef6881abf9b858020ffd23f4e5d77423329da2d4a1bc0af6613c2f698c369a \ + --hash=sha256:7fcf7f3812dd173d39273e99fb2abb0814be6133e7a721baa424cbcfd25b483b \ + --hash=sha256:8d45295757f66d1913e5917c06f1974745adad842403d419362491939be889a6 \ + --hash=sha256:8e36b8e07595fc6216d01e729c81a0b4ff029a93cc2ef987a73d3b650d6d559c \ + --hash=sha256:9822fa4db0d8d50abf5eebe081c01666a98120455090d0b71463d01d5d4153c1 \ + --hash=sha256:9e28141883c0aa8525ad5418e519d8791b7dd75f35020d3b1457db89346c5dc8 \ + --hash=sha256:a5920ff4d114025c51d3f925130ca3b0fad277631846b1109347c24948b29159 \ + --hash=sha256:be46b37b569e3c8ffb7d78022bcc0eadeb987109f709c1cec01b00c261ed9595 \ + --hash=sha256:cf7d554656bb8c5b7710300e04d86ab5137ebdd31fe309d66860a9d474b385f8 \ + --hash=sha256:d16ae6b97eb77f478dfe51d6eb3627048d3f47bd04282d3006e6a212e541dba0 \ + --hash=sha256:e32137ba8202b1291e879e8145113bfb543fcc992b5f043852a96d803788b83c # via tox-uv -virtualenv==20.26.4 \ - --hash=sha256:48f2695d9809277003f30776d155615ffc11328e6a0a8c1f0ec80188d7874a55 \ - --hash=sha256:c17f4e0f3e6036e9f26700446f85c76ab11df65ff6d8a9cbfad9f71aabfcf23c +virtualenv==20.26.5 \ + --hash=sha256:4f3ac17b81fba3ce3bd6f4ead2749a72da5929c01774948e243db9ba41df4ff6 \ + --hash=sha256:ce489cac131aa58f4b25e321d6d186171f78e6cb13fafbf32a840cee67733ff4 # via tox From 11e360b23201d3777dac67b0c6302ba397d05600 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 23 Sep 2024 15:12:17 -0700 Subject: [PATCH 130/193] Remove now-unncessary Black configuration We still use Black to reformat docs, but there's no need to maintain the file exclusion list since Python linting is done with Ruff. --- pyproject.toml | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2911638a09..c503a3df13 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,22 +53,7 @@ build-backend = "setuptools.build_meta" [tool.black] line-length = 79 -target-version = ["py311"] -exclude = ''' -/( - \.eggs - | \.git - | \.mypy_cache - | \.ruff_cache - | \.tox - | \.venv - | _build - | build - | dist -)/ -''' -# Use single-quoted strings so TOML treats the string like a Python r-string -# Multi-line strings are implicitly treated by black as regular expressions +target-version = ["py312"] [tool.coverage.run] parallel = true From 22601c3bac3bfc3ec139c3131171862db4cee569 Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 23 Sep 2024 16:27:52 -0700 Subject: [PATCH 131/193] Remove notebook delegation from portal; adopt newer ghostwriter --- applications/ghostwriter/Chart.yaml | 2 +- applications/portal/templates/ingress.yaml | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/applications/ghostwriter/Chart.yaml b/applications/ghostwriter/Chart.yaml index 0ea46c3e3d..8d923876b0 100644 --- a/applications/ghostwriter/Chart.yaml +++ b/applications/ghostwriter/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 0.1.0 +appVersion: 0.1.1 description: URL rewriter/personalizer name: ghostwriter sources: diff --git a/applications/portal/templates/ingress.yaml b/applications/portal/templates/ingress.yaml index 4edef4ab7b..0d7d6fc957 100644 --- a/applications/portal/templates/ingress.yaml +++ b/applications/portal/templates/ingress.yaml @@ -18,7 +18,6 @@ config: - "read:image" - "read:tap" - "write:files" - - "exec:notebook" template: metadata: name: {{ include "portal.fullname" . }} From d77314bd51d33e5515692055867308de098c2a7e Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Tue, 24 Sep 2024 10:18:21 -0300 Subject: [PATCH 132/193] exposurelog: move site-specific environment variables to Values.env Also add `DAF_BUTLER_REPOSITORY_INDEX`, `S3_ENDPOINT_URL` and `PGPASSFILE` env variables for butler access on the usdf-dev --- applications/exposurelog/README.md | 1 + applications/exposurelog/templates/deployment.yaml | 6 ++++-- applications/exposurelog/values-usdfdev.yaml | 9 +++++++++ applications/exposurelog/values.yaml | 3 +++ 4 files changed, 17 insertions(+), 2 deletions(-) diff --git a/applications/exposurelog/README.md b/applications/exposurelog/README.md index be173d896a..c22286d0e7 100644 --- a/applications/exposurelog/README.md +++ b/applications/exposurelog/README.md @@ -32,6 +32,7 @@ Log messages related to an exposure | db.host | string | `"postgres.postgres"` | database host | | db.port | int | `5432` | database port | | db.user | string | `"exposurelog"` | database user | +| env | list | `[]` | Environment variables to set in the exposurelog pod | | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | diff --git a/applications/exposurelog/templates/deployment.yaml b/applications/exposurelog/templates/deployment.yaml index 714ccb5136..c77bb88f7a 100644 --- a/applications/exposurelog/templates/deployment.yaml +++ b/applications/exposurelog/templates/deployment.yaml @@ -77,8 +77,10 @@ spec: value: {{ .Values.db.database | quote }} - name: SITE_ID value: {{ .Values.config.site_id | quote }} - - name: AWS_SHARED_CREDENTIALS_FILE - value: /var/secrets/butler/aws-credentials.ini + {{- range .Values.env }} + - name: {{ .name }} + value: {{ .value | quote }} + {{- end }} volumeMounts: {{- if .Values.config.nfs_path_1 }} - name: volume1 diff --git a/applications/exposurelog/values-usdfdev.yaml b/applications/exposurelog/values-usdfdev.yaml index 5153d2fde7..6c7663c3ad 100644 --- a/applications/exposurelog/values-usdfdev.yaml +++ b/applications/exposurelog/values-usdfdev.yaml @@ -4,3 +4,12 @@ config: db: host: usdf-summitdb.slac.stanford.edu user: usdf +env: + - name: AWS_SHARED_CREDENTIALS_FILE + value: "/var/secrets/butler/aws-credentials.ini" + - name: DAF_BUTLER_REPOSITORY_INDEX + value: "/project/data-repos.yaml" + - name: S3_ENDPOINT_URL + value: "https://s3dfrgw.slac.stanford.edu" + - name: PGPASSFILE + value: "/var/secrets/butler/postgres-credentials.txt" diff --git a/applications/exposurelog/values.yaml b/applications/exposurelog/values.yaml index 15be2fd7df..426ba95480 100644 --- a/applications/exposurelog/values.yaml +++ b/applications/exposurelog/values.yaml @@ -86,6 +86,9 @@ config: # Sandboxes should use `test`. site_id: "" +# -- Environment variables to set in the exposurelog pod +env: [] + # -- Annotations for the exposurelog pod podAnnotations: {} From 1cdcf79654e1b8c7b13ef28127302de54cb2d279 Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Tue, 24 Sep 2024 10:20:37 -0300 Subject: [PATCH 133/193] exposurelog: remove `PGPASSWORD` as we are setting `PGPASSFILE` Also move `PGUSER` env to site specific --- applications/exposurelog/templates/deployment.yaml | 7 ------- applications/exposurelog/values-usdfdev.yaml | 2 ++ 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/applications/exposurelog/templates/deployment.yaml b/applications/exposurelog/templates/deployment.yaml index c77bb88f7a..6c50301944 100644 --- a/applications/exposurelog/templates/deployment.yaml +++ b/applications/exposurelog/templates/deployment.yaml @@ -57,18 +57,11 @@ spec: value: {{ .Values.config.butler_uri_2 | quote }} - name: EXPOSURELOG_DB_USER value: {{ .Values.db.user | quote }} - - name: PGUSER - value: {{ .Values.db.user | quote }} - name: EXPOSURELOG_DB_PASSWORD valueFrom: secretKeyRef: name: exposurelog key: exposurelog_password - - name: PGPASSWORD - valueFrom: - secretKeyRef: - name: exposurelog - key: exposurelog_password - name: EXPOSURELOG_DB_HOST value: {{ .Values.db.host | quote }} - name: EXPOSURELOG_DB_PORT diff --git a/applications/exposurelog/values-usdfdev.yaml b/applications/exposurelog/values-usdfdev.yaml index 6c7663c3ad..da7cfbcc8d 100644 --- a/applications/exposurelog/values-usdfdev.yaml +++ b/applications/exposurelog/values-usdfdev.yaml @@ -13,3 +13,5 @@ env: value: "https://s3dfrgw.slac.stanford.edu" - name: PGPASSFILE value: "/var/secrets/butler/postgres-credentials.txt" + - name: PGUSER + value: "rubin" From 34f48e9621a237062f79873feb5f1d56875151d9 Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Tue, 24 Sep 2024 10:24:45 -0300 Subject: [PATCH 134/193] exposurelog: update butler uri on usdfdev --- applications/exposurelog/values-usdfdev.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/exposurelog/values-usdfdev.yaml b/applications/exposurelog/values-usdfdev.yaml index da7cfbcc8d..e914a0e17f 100644 --- a/applications/exposurelog/values-usdfdev.yaml +++ b/applications/exposurelog/values-usdfdev.yaml @@ -1,6 +1,6 @@ config: site_id: usdfdev - butler_uri_1: s3://rubin-summit-users/butler.yaml + butler_uri_1: s3://embargo@rubin-summit-users/butler.yaml db: host: usdf-summitdb.slac.stanford.edu user: usdf From 617dcd682a1fc1d950a70007456f5bc94e283a07 Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Tue, 24 Sep 2024 11:18:25 -0300 Subject: [PATCH 135/193] exposurelog: Add secretEnv to be configured site-specific --- applications/exposurelog/README.md | 1 + applications/exposurelog/templates/deployment.yaml | 7 +++++++ applications/exposurelog/values.yaml | 3 +++ 3 files changed, 11 insertions(+) diff --git a/applications/exposurelog/README.md b/applications/exposurelog/README.md index c22286d0e7..927c35f2f7 100644 --- a/applications/exposurelog/README.md +++ b/applications/exposurelog/README.md @@ -47,5 +47,6 @@ Log messages related to an exposure | podSecurityContext | object | `{}` | Security context for the exposurelog pod | | replicaCount | int | `1` | How many exposurelog pods to run | | resources | object | `{}` | Resource limits and requests for the exposurelog pod | +| secretEnv | list | `[]` | Additional secret environment variables to set in the exposurelog pod | | securityContext | object | `{}` | Security context for the exposurelog deployment | | tolerations | list | `[]` | Tolerations for the exposurelog pod | diff --git a/applications/exposurelog/templates/deployment.yaml b/applications/exposurelog/templates/deployment.yaml index 6c50301944..f738aaa49c 100644 --- a/applications/exposurelog/templates/deployment.yaml +++ b/applications/exposurelog/templates/deployment.yaml @@ -74,6 +74,13 @@ spec: - name: {{ .name }} value: {{ .value | quote }} {{- end }} + {{- range .Values.secretEnv }} + - name: {{ .name }} + valueFrom: + secretKeyRef: + name: {{ .secretName }} + key: {{ .secretKey }} + {{- end }} volumeMounts: {{- if .Values.config.nfs_path_1 }} - name: volume1 diff --git a/applications/exposurelog/values.yaml b/applications/exposurelog/values.yaml index 426ba95480..ece7625737 100644 --- a/applications/exposurelog/values.yaml +++ b/applications/exposurelog/values.yaml @@ -89,6 +89,9 @@ config: # -- Environment variables to set in the exposurelog pod env: [] +# -- Additional secret environment variables to set in the exposurelog pod +secretEnv: [] + # -- Annotations for the exposurelog pod podAnnotations: {} From 590794cf07eedc167eed8639cbe9d9a4330317ee Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Tue, 24 Sep 2024 11:19:24 -0300 Subject: [PATCH 136/193] exposurelog: add `PGUSER` and `PGPASSWORD` to base, tucson and summit --- applications/exposurelog/values-base.yaml | 12 ++++++++++++ applications/exposurelog/values-summit.yaml | 12 ++++++++++++ .../exposurelog/values-tucson-teststand.yaml | 12 ++++++++++++ 3 files changed, 36 insertions(+) diff --git a/applications/exposurelog/values-base.yaml b/applications/exposurelog/values-base.yaml index c3ff786c6e..3aff3ea83a 100644 --- a/applications/exposurelog/values-base.yaml +++ b/applications/exposurelog/values-base.yaml @@ -6,3 +6,15 @@ config: db: host: postgresdb01.ls.lsst.org + +# We use the same database user and password defined on the db object +# in the values.yaml file. This is due to telescope deployments +# are not using butler access which requires a different user and password. +env: + - name: PGUSER + value: exposurelog + +secretEnv: + - name: PGPASSWORD + secretName: exposurelog + secretKey: exposurelog_password diff --git a/applications/exposurelog/values-summit.yaml b/applications/exposurelog/values-summit.yaml index 636150ebec..dac0d8412a 100644 --- a/applications/exposurelog/values-summit.yaml +++ b/applications/exposurelog/values-summit.yaml @@ -9,3 +9,15 @@ config: butler_uri_2: /volume_2 db: host: postgresdb01.cp.lsst.org + +# We use the same database user and password defined on the db object +# in the values.yaml file. This is due to telescope deployments +# are not using butler access which requires a different user and password. +env: + - name: PGUSER + value: exposurelog + +secretEnv: + - name: PGPASSWORD + secretName: exposurelog + secretKey: exposurelog_password diff --git a/applications/exposurelog/values-tucson-teststand.yaml b/applications/exposurelog/values-tucson-teststand.yaml index 94a3159b2f..9a9f75c408 100644 --- a/applications/exposurelog/values-tucson-teststand.yaml +++ b/applications/exposurelog/values-tucson-teststand.yaml @@ -9,3 +9,15 @@ config: butler_uri_2: /volume_2 db: host: postgresdb01.tu.lsst.org + +# We use the same database user and password defined on the db object +# in the values.yaml file. This is due to telescope deployments +# are not using butler access which requires a different user and password. +env: + - name: PGUSER + value: exposurelog + +secretEnv: + - name: PGPASSWORD + secretName: exposurelog + secretKey: exposurelog_password From 5841933f8de95bd51ca6cf5079db438fafeda1b4 Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Tue, 24 Sep 2024 13:52:45 -0300 Subject: [PATCH 137/193] exposurelog: Update appVersion to 1.3.0 --- applications/exposurelog/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/exposurelog/Chart.yaml b/applications/exposurelog/Chart.yaml index c1a84f7c27..5b095cfd61 100644 --- a/applications/exposurelog/Chart.yaml +++ b/applications/exposurelog/Chart.yaml @@ -12,4 +12,4 @@ version: 1.0.0 # number should be incremented each time you make changes to the # application. Versions are not expected to follow Semantic Versioning. They # should reflect the version the application is using. -appVersion: 1.2.1 +appVersion: 1.3.0 From e974ae28d6a7e630403d7dcd92511b29ed8f2a8e Mon Sep 17 00:00:00 2001 From: dspeck1 Date: Tue, 24 Sep 2024 13:14:42 -0500 Subject: [PATCH 138/193] Add additional test topic for testing job based processing with keda. --- applications/sasquatch/values-usdfdev.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/sasquatch/values-usdfdev.yaml b/applications/sasquatch/values-usdfdev.yaml index 833f333ba7..a08a521d88 100644 --- a/applications/sasquatch/values-usdfdev.yaml +++ b/applications/sasquatch/values-usdfdev.yaml @@ -157,6 +157,7 @@ rest-proxy: kafka: topics: - test.next-visit + - test.next-visit-job topicPrefixes: - test - lsst.dm From d1e260100e1736ea3faa7320b9178ceb34e4d1f2 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 24 Sep 2024 11:36:07 -0700 Subject: [PATCH 139/193] Remove acks envvar. --- charts/csc_shared/templates/configmap-env.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/charts/csc_shared/templates/configmap-env.yaml b/charts/csc_shared/templates/configmap-env.yaml index 8e7f7485f8..cefb956f2c 100644 --- a/charts/csc_shared/templates/configmap-env.yaml +++ b/charts/csc_shared/templates/configmap-env.yaml @@ -10,6 +10,5 @@ data: LSST_KAFKA_REPLICATION_FACTOR: {{ $.Values.global.controlSystem.kafkaTopicReplicationFactor | quote }} LSST_KAFKA_SECURITY_USERNAME: ts-salkafka LSST_SCHEMA_REGISTRY_URL: {{ $.Values.global.controlSystem.schemaRegistryUrl }} - LSST_KAFKA_PRODUCER_WAIT_ACKS: "1" S3_ENDPOINT_URL: {{ $.Values.global.controlSystem.s3EndpointUrl }} {{- end }} From a96f9f9277cd12b580e6b5a13f33f6a85ed87c1c Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Tue, 24 Sep 2024 16:05:51 -0300 Subject: [PATCH 140/193] nightreport: add application to usdfdev --- applications/nightreport/values-usdfdev.yaml | 9 +++++++++ environments/values-usdfdev.yaml | 1 + 2 files changed, 10 insertions(+) create mode 100644 applications/nightreport/values-usdfdev.yaml diff --git a/applications/nightreport/values-usdfdev.yaml b/applications/nightreport/values-usdfdev.yaml new file mode 100644 index 0000000000..1fab965ee8 --- /dev/null +++ b/applications/nightreport/values-usdfdev.yaml @@ -0,0 +1,9 @@ +image: + repository: ts-dockerhub.lsst.org/nightreport + tag: c0039 + pullPolicy: Always +config: + site_id: usdfdev +db: + host: usdf-summitdb.slac.stanford.edu + user: usdf diff --git a/environments/values-usdfdev.yaml b/environments/values-usdfdev.yaml index af80333545..9eb8c8c0a7 100644 --- a/environments/values-usdfdev.yaml +++ b/environments/values-usdfdev.yaml @@ -20,6 +20,7 @@ applications: livetap: true mobu: true narrativelog: true + nightreport: true noteburst: true nublado: true obsloctap: true From 6d9ddda9f178d00be5f4302fdde133b485ddaae7 Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Tue, 24 Sep 2024 17:55:30 -0300 Subject: [PATCH 141/193] nightreport: update nightreport secrets --- .../nightreport/{secrets.yaml => secrets-usdfdev.yaml} | 3 +++ 1 file changed, 3 insertions(+) rename applications/nightreport/{secrets.yaml => secrets-usdfdev.yaml} (53%) diff --git a/applications/nightreport/secrets.yaml b/applications/nightreport/secrets-usdfdev.yaml similarity index 53% rename from applications/nightreport/secrets.yaml rename to applications/nightreport/secrets-usdfdev.yaml index 7a1e9e4a72..a748a56695 100644 --- a/applications/nightreport/secrets.yaml +++ b/applications/nightreport/secrets-usdfdev.yaml @@ -1,2 +1,5 @@ nightreport_password: description: "Password for the nightreport database." + copy: + application: exposurelog + key: exposurelog_password From e5d123ae84aadb290acaf9da129787b06b9bdaad Mon Sep 17 00:00:00 2001 From: Stelios Voutsinas Date: Tue, 24 Sep 2024 15:44:17 -0700 Subject: [PATCH 142/193] Turn off siav2 app on all IDFs --- environments/values-idfdev.yaml | 2 +- environments/values-idfint.yaml | 2 +- environments/values-idfprod.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/environments/values-idfdev.yaml b/environments/values-idfdev.yaml index 39c028701b..6283dddfbd 100644 --- a/environments/values-idfdev.yaml +++ b/environments/values-idfdev.yaml @@ -25,7 +25,7 @@ applications: portal: true sasquatch: true semaphore: true - siav2: true + siav2: false ssotap: true squareone: true sqlproxy-cross-project: true diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml index 15190999da..34696fc711 100644 --- a/environments/values-idfint.yaml +++ b/environments/values-idfint.yaml @@ -23,7 +23,7 @@ applications: plot-navigator: true portal: true sasquatch: true - siav2: true + siav2: false ssotap: true production-tools: true sasquatch-backpack: true diff --git a/environments/values-idfprod.yaml b/environments/values-idfprod.yaml index af3b77877c..0a6a26cc37 100644 --- a/environments/values-idfprod.yaml +++ b/environments/values-idfprod.yaml @@ -23,7 +23,7 @@ applications: nublado: true portal: true semaphore: true - siav2: true + siav2: false squareone: true ssotap: true tap: true From 4a0b35b75082e42a4b325c8bf1e65fb2bcb24bfc Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Tue, 27 Aug 2024 12:45:03 -0700 Subject: [PATCH 143/193] usdf-cm-dev: enable gf and postgres (pav) --- .../gafaelfawr/values-usdf-cm-dev.yaml | 223 ++++++++++++++++++ applications/postgres/values-usdf-cm-dev.yaml | 5 + environments/values-usdf-cm-dev.yaml | 3 +- 3 files changed, 230 insertions(+), 1 deletion(-) create mode 100644 applications/gafaelfawr/values-usdf-cm-dev.yaml create mode 100644 applications/postgres/values-usdf-cm-dev.yaml diff --git a/applications/gafaelfawr/values-usdf-cm-dev.yaml b/applications/gafaelfawr/values-usdf-cm-dev.yaml new file mode 100644 index 0000000000..18f741fc04 --- /dev/null +++ b/applications/gafaelfawr/values-usdf-cm-dev.yaml @@ -0,0 +1,223 @@ +replicaCount: 2 + +# Use the CSI storage class so that we can use snapshots. +redis: + persistence: + storageClass: "wekafs--sdf-k8s01" + +config: + internalDatabase: true + + oidcServer: + enabled: true + + oidc: + clientId: vcluster--usdf-cm-dev + audience: "vcluster--usdf-cm-dev" + loginUrl: "https://dex.slac.stanford.edu/auth" + tokenUrl: "https://dex.slac.stanford.edu/token" + issuer: "https://dex.slac.stanford.edu" + scopes: + - "openid" + - "email" + - "groups" + - "profile" + usernameClaim: "name" + + ldap: + url: ldaps://ldap-unix.slac.stanford.edu:636 + groupBaseDn: ou=Group,dc=slac,dc=stanford,dc=edu + groupObjectClass: posixGroup + groupMemberAttr: memberUid + groupSearchByDn: false + userBaseDn: ou=Accounts,dc=slac,dc=stanford,dc=edu + userSearchAttr: uid + addUserGroup: false + uidAttr: uidNumber + gidAttr: gidNumber + nameAttr: gecos + + groupMapping: + "admin:token": + - "rubinmgr" + - "unix-admin" + "exec:admin": + - "rubinmgr" + - "unix-admin" + "exec:internal-tools": + - "lsst" + - "lsst-ccs" + - "rubin_users" + - "rubin_users-a" + - "rubin_users-b" + - "rubin_users-c" + - "rubin_users-d" + - "rubin_users-e" + - "rubin_users-f" + - "rubin_users-g" + - "rubin_users-h" + - "rubin_users-i" + - "rubin_users-j" + - "rubin_users-k" + - "rubin_users-l" + - "rubin_users-m" + - "rubin_users-n" + - "rubin_users-o" + - "rubin_users-p" + - "rubin_users-q" + - "rubin_users-r" + - "rubin_users-s" + - "rubin_users-t" + - "rubin_users-u" + - "rubin_users-v" + - "rubin_users-w" + - "rubin_users-x" + - "rubin_users-y" + - "rubin_users-z" + - "rubin_admin_datasets" + - "rubin_admin_repos" + - "unix-admin" + "exec:notebook": + - "lsst" + - "lsst-ccs" + - "rubin_users" + - "rubin_users-a" + - "rubin_users-b" + - "rubin_users-c" + - "rubin_users-d" + - "rubin_users-e" + - "rubin_users-f" + - "rubin_users-g" + - "rubin_users-h" + - "rubin_users-i" + - "rubin_users-j" + - "rubin_users-k" + - "rubin_users-l" + - "rubin_users-m" + - "rubin_users-n" + - "rubin_users-o" + - "rubin_users-p" + - "rubin_users-q" + - "rubin_users-r" + - "rubin_users-s" + - "rubin_users-t" + - "rubin_users-u" + - "rubin_users-v" + - "rubin_users-w" + - "rubin_users-x" + - "rubin_users-y" + - "rubin_users-z" + - "rubin_admin_datasets" + - "rubin_admin_repos" + - "unix-admin" + "exec:portal": + - "lsst" + - "lsst-ccs" + - "rubin_users" + - "rubin_users-a" + - "rubin_users-b" + - "rubin_users-c" + - "rubin_users-d" + - "rubin_users-e" + - "rubin_users-f" + - "rubin_users-g" + - "rubin_users-h" + - "rubin_users-i" + - "rubin_users-j" + - "rubin_users-k" + - "rubin_users-l" + - "rubin_users-m" + - "rubin_users-n" + - "rubin_users-o" + - "rubin_users-p" + - "rubin_users-q" + - "rubin_users-r" + - "rubin_users-s" + - "rubin_users-t" + - "rubin_users-u" + - "rubin_users-v" + - "rubin_users-w" + - "rubin_users-x" + - "rubin_users-y" + - "rubin_users-z" + - "rubin_admin_datasets" + - "rubin_admin_repos" + - "unix-admin" + "read:tap": + - "lsst" + - "lsst-ccs" + - "rubin_users" + - "rubin_users-a" + - "rubin_users-b" + - "rubin_users-c" + - "rubin_users-d" + - "rubin_users-e" + - "rubin_users-f" + - "rubin_users-g" + - "rubin_users-h" + - "rubin_users-i" + - "rubin_users-j" + - "rubin_users-k" + - "rubin_users-l" + - "rubin_users-m" + - "rubin_users-n" + - "rubin_users-o" + - "rubin_users-p" + - "rubin_users-q" + - "rubin_users-r" + - "rubin_users-s" + - "rubin_users-t" + - "rubin_users-u" + - "rubin_users-v" + - "rubin_users-w" + - "rubin_users-x" + - "rubin_users-y" + - "rubin_users-z" + - "rubin_admin_datasets" + - "rubin_admin_repos" + - "unix-admin" + "read:image": + - "lsst" + - "lsst-ccs" + - "rubin_users" + - "rubin_users-a" + - "rubin_users-b" + - "rubin_users-c" + - "rubin_users-d" + - "rubin_users-e" + - "rubin_users-f" + - "rubin_users-g" + - "rubin_users-h" + - "rubin_users-i" + - "rubin_users-j" + - "rubin_users-k" + - "rubin_users-l" + - "rubin_users-m" + - "rubin_users-n" + - "rubin_users-o" + - "rubin_users-p" + - "rubin_users-q" + - "rubin_users-r" + - "rubin_users-s" + - "rubin_users-t" + - "rubin_users-u" + - "rubin_users-v" + - "rubin_users-w" + - "rubin_users-x" + - "rubin_users-y" + - "rubin_users-z" + - "rubin_admin_datasets" + - "rubin_admin_repos" + - "unix-admin" + "write:sasquatch": + - "rubinmgr" + - "unix-admin" + + initialAdmins: + - "afausti" + - "athor" + - "frossie" + - "jonathansick" + - "rra" + - "ytl" + - "ppascual" diff --git a/applications/postgres/values-usdf-cm-dev.yaml b/applications/postgres/values-usdf-cm-dev.yaml new file mode 100644 index 0000000000..79960946d4 --- /dev/null +++ b/applications/postgres/values-usdf-cm-dev.yaml @@ -0,0 +1,5 @@ +gafaelfawr_db: + user: 'gafaelfawr' + db: 'gafaelfawr' + +postgresStorageClass: 'wekafs--sdf-k8s01' diff --git a/environments/values-usdf-cm-dev.yaml b/environments/values-usdf-cm-dev.yaml index 7ca7a8afd7..79573c97b2 100644 --- a/environments/values-usdf-cm-dev.yaml +++ b/environments/values-usdf-cm-dev.yaml @@ -8,7 +8,8 @@ applications: # This environment uses an ingress managed in a separate Kubernetes cluster, # despite that configuration not being officially supported by Phalanx. cert-manager: false - gafaelfawr: false + gafaelfawr: true ingress-nginx: false cm-service: true + postgres: true From d5dd8ca08358a044b6c06a3fc9b4e0d3da2b4c38 Mon Sep 17 00:00:00 2001 From: Fritz Mueller Date: Mon, 23 Sep 2024 14:52:02 -0700 Subject: [PATCH 144/193] cm-service: update to 0.1.2 --- applications/cm-service/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/cm-service/Chart.yaml b/applications/cm-service/Chart.yaml index 9ea2b7b9f1..f6174a96f1 100644 --- a/applications/cm-service/Chart.yaml +++ b/applications/cm-service/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 0.1.1 +appVersion: 0.1.2 description: Campaign Management for Rubin Data Release Production name: cm-service sources: From 0bc9516b7f6c3cfc7cbbf6747b0d98757f0ab888 Mon Sep 17 00:00:00 2001 From: Fritz Mueller Date: Mon, 23 Sep 2024 16:59:12 -0700 Subject: [PATCH 145/193] cm-service: add back gf ingress --- .../cm-service/templates/ingress.yaml | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 applications/cm-service/templates/ingress.yaml diff --git a/applications/cm-service/templates/ingress.yaml b/applications/cm-service/templates/ingress.yaml new file mode 100644 index 0000000000..882de320dc --- /dev/null +++ b/applications/cm-service/templates/ingress.yaml @@ -0,0 +1,38 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "cm-service" + labels: + {{- include "cm-service.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + loginRedirect: true + scopes: + all: + - "exec:internal-tools" +template: + metadata: + name: "cm-service" + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: {{ .Values.config.pathPrefix | quote }} + pathType: "Prefix" + backend: + service: + name: "cm-service" + port: + number: 8080 + - path: "/web_app" + pathType: "Prefix" + backend: + service: + name: "cm-service" + port: + number: 8080 From bebfdf10f538b451d2a2ff691db0c962d396e995 Mon Sep 17 00:00:00 2001 From: Jeremy McCormick Date: Wed, 25 Sep 2024 13:39:05 -0500 Subject: [PATCH 146/193] Update sdm_schemas to v3.2.1 --- charts/cadc-tap/README.md | 4 ++-- charts/cadc-tap/values.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/charts/cadc-tap/README.md b/charts/cadc-tap/README.md index f0ab35a1aa..1da63ab1f8 100644 --- a/charts/cadc-tap/README.md +++ b/charts/cadc-tap/README.md @@ -22,7 +22,7 @@ IVOA TAP service | cloudsql.resources | object | See `values.yaml` | Resource limits and requests for the Cloud SQL Proxy container | | cloudsql.serviceAccount | string | None, must be set | The Google service account that has an IAM binding to the `cadc-tap` Kubernetes service accounts and has the `cloudsql.client` role, access | | config.backend | string | None, must be set to `pg` or `qserv` | What type of backend are we connecting to? | -| config.datalinkPayloadUrl | string | `"https://github.com/lsst/sdm_schemas/releases/download/3.0.2/datalink-snippets.zip"` | Datalink payload URL | +| config.datalinkPayloadUrl | string | `"https://github.com/lsst/sdm_schemas/releases/download/v3.2.1/datalink-snippets.zip"` | Datalink payload URL | | config.gcsBucket | string | `"async-results.lsst.codes"` | Name of GCS bucket in which to store results | | config.gcsBucketType | string | `"GCS"` | GCS bucket type (GCS or S3) | | config.gcsBucketUrl | string | `"https://tap-files.lsst.codes"` | Base URL for results stored in GCS bucket | @@ -69,7 +69,7 @@ IVOA TAP service | tapSchema.affinity | object | `{}` | Affinity rules for the TAP schema database pod | | tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image | | tapSchema.image.repository | string | `"lsstsqre/tap-schema-mock"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | -| tapSchema.image.tag | string | `"3.0.2"` | Tag of TAP schema image | +| tapSchema.image.tag | string | `"v3.2.1"` | Tag of TAP schema image | | tapSchema.nodeSelector | object | `{}` | Node selection rules for the TAP schema database pod | | tapSchema.podAnnotations | object | `{}` | Annotations for the TAP schema database pod | | tapSchema.resources | object | See `values.yaml` | Resource limits and requests for the TAP schema database pod | diff --git a/charts/cadc-tap/values.yaml b/charts/cadc-tap/values.yaml index d61fdf37af..fd8b7e20ce 100644 --- a/charts/cadc-tap/values.yaml +++ b/charts/cadc-tap/values.yaml @@ -99,7 +99,7 @@ config: tapSchemaAddress: "cadc-tap-schema-db:3306" # -- Datalink payload URL - datalinkPayloadUrl: "https://github.com/lsst/sdm_schemas/releases/download/3.0.2/datalink-snippets.zip" + datalinkPayloadUrl: "https://github.com/lsst/sdm_schemas/releases/download/v3.2.1/datalink-snippets.zip" # -- Name of GCS bucket in which to store results gcsBucket: "async-results.lsst.codes" @@ -162,7 +162,7 @@ tapSchema: pullPolicy: "IfNotPresent" # -- Tag of TAP schema image - tag: "3.0.2" + tag: "v3.2.1" # -- Resource limits and requests for the TAP schema database pod # @default -- See `values.yaml` From 5cccf3058b467147c7eafc9bf6cea2d02c4a8cef Mon Sep 17 00:00:00 2001 From: Hsin-Fang Chiang Date: Wed, 25 Sep 2024 14:05:00 -0700 Subject: [PATCH 147/193] Shutdown Prompt Processing LSSTComCamSim prod We want to keep LSSTComCamSim-dev alive for now for testing use. --- .../values-usdfprod-prompt-processing.yaml | 58 ------------------- .../values-usdfprod-prompt-processing.yaml | 2 +- 2 files changed, 1 insertion(+), 59 deletions(-) delete mode 100644 applications/prompt-proto-service-lsstcomcamsim/values-usdfprod-prompt-processing.yaml diff --git a/applications/prompt-proto-service-lsstcomcamsim/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-lsstcomcamsim/values-usdfprod-prompt-processing.yaml deleted file mode 100644 index 2195736e75..0000000000 --- a/applications/prompt-proto-service-lsstcomcamsim/values-usdfprod-prompt-processing.yaml +++ /dev/null @@ -1,58 +0,0 @@ -prompt-proto-service: - - podAnnotations: - # Expect to need roughly n_detector × request_latency / survey_cadence pods - # For a 30 s ComCam survey with 500 s latency, this is 150 - autoscaling.knative.dev/max-scale: "150" - autoscaling.knative.dev/target-utilization-percentage: "100" - # Update this field if using latest or static image tag in dev - revision: "1" - - worker: - # Embargo rack allows fast cleanup. - grace_period: 20 - - image: - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: 4.2.0 - - instrument: - pipelines: - main: >- - (survey="BLOCK-297")=[${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCamSim/ApPipe.yaml, - ${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCamSim/SingleFrame.yaml, - ${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCamSim/Isr.yaml] - (survey="")=[] - preprocessing: >- - (survey="BLOCK-297")=[${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCamSim/Preprocessing.yaml] - (survey="")=[] - calibRepo: s3://rubin-summit-users - - s3: - imageBucket: rubin-summit - endpointUrl: https://sdfembs3.sdf.slac.stanford.edu - - raw_microservice: http://172.24.5.158:8080/presence - - imageNotifications: - kafkaClusterAddress: prompt-processing-2-kafka-bootstrap.kafka:9092 - topic: rubin-summit-notification - - apdb: - config: s3://rubin-summit-users/apdb_config/cassandra/pp_apdb_lsstcomcamsim_or4.py - - alerts: - topic: alerts-simulated - - sasquatch: - endpointUrl: https://usdf-rsp-dev.slac.stanford.edu/sasquatch-rest-proxy - namespace: lsst.prompt.prod - auth_env: false - - logLevel: timer.lsst.activator=DEBUG lsst.diaPipe=VERBOSE lsst.rbClassify=VERBOSE - - knative: - memoryLimit: "16Gi" - - fullnameOverride: "prompt-proto-service-lsstcomcamsim" diff --git a/environments/values-usdfprod-prompt-processing.yaml b/environments/values-usdfprod-prompt-processing.yaml index 7ec0cb921e..b1c1ce92d9 100644 --- a/environments/values-usdfprod-prompt-processing.yaml +++ b/environments/values-usdfprod-prompt-processing.yaml @@ -12,5 +12,5 @@ applications: prompt-proto-service-latiss: true prompt-proto-service-lsstcam: false prompt-proto-service-lsstcomcam: false - prompt-proto-service-lsstcomcamsim: true + prompt-proto-service-lsstcomcamsim: false vault-secrets-operator: false From b49ad6905b8b55cb5de2a00b0cb3e9bdc2526b0e Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Wed, 25 Sep 2024 15:07:35 -0700 Subject: [PATCH 148/193] Remove .pgpass support from Prompt Processing. All credentials are now handled through db-auth.yaml, so there's no need to set up a .pgpass file at pod startup. --- .../templates/prompt-proto-service.yaml | 27 ------------------- 1 file changed, 27 deletions(-) diff --git a/charts/prompt-proto-service/templates/prompt-proto-service.yaml b/charts/prompt-proto-service/templates/prompt-proto-service.yaml index 841be47dcb..f08eb4b17e 100644 --- a/charts/prompt-proto-service/templates/prompt-proto-service.yaml +++ b/charts/prompt-proto-service/templates/prompt-proto-service.yaml @@ -13,17 +13,6 @@ spec: spec: containerConcurrency: {{ .Values.containerConcurrency }} initContainers: - - name: init-pgpass - # Make a copy of the read-only secret that's owned by lsst - # lsst account is created by main image with id 1000 - image: busybox - command: ["sh", "-c", "cp -L /app/pg-mount/.pgpass /app/pgsql/ && chown 1000:1000 /app/pgsql/.pgpass && chmod u=r,go-rwx /app/pgsql/.pgpass"] - volumeMounts: - - mountPath: /app/pg-mount - name: pgpass-mount - readOnly: true - - mountPath: /app/pgsql - name: pgpass-credentials-file - name: init-db-auth # Make a copy of the read-only secret that's owned by lsst # lsst account is created by main image with id 1000 @@ -103,8 +92,6 @@ spec: - name: AWS_SHARED_CREDENTIALS_FILE value: /app/s3/credentials {{- end }} - - name: PGPASSFILE - value: /app/pgsql/.pgpass - name: LSST_DB_AUTH value: /app/lsst-credentials/db-auth.yaml - name: AP_KAFKA_PRODUCER_PASSWORD @@ -133,9 +120,6 @@ spec: volumeMounts: - mountPath: /tmp-butler name: ephemeral - - mountPath: /app/pgsql - name: pgpass-credentials-file - readOnly: true - mountPath: /app/lsst-credentials name: db-auth-credentials-file readOnly: true @@ -166,17 +150,6 @@ spec: - name: ephemeral emptyDir: sizeLimit: {{ .Values.knative.ephemeralStorageLimit }} - - name: pgpass-mount - # Temporary mount for .pgpass; cannot be read directly because it's owned by root - secret: - secretName: {{ template "prompt-proto-service.fullname" . }}-secret - items: - - key: pgpass_file - path: .pgpass - defaultMode: 0400 # Minimal permissions, as extra protection - - name: pgpass-credentials-file - emptyDir: - sizeLimit: 10Ki # Just a text file! - name: db-auth-mount # Temporary mount for db-auth.yaml; cannot be read directly because it's owned by root secret: From cf9882509e84197554b598b89c0969367affe297 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 24 Sep 2024 13:15:58 -0700 Subject: [PATCH 149/193] Deploy the Telegraf-based connectors at TTS --- .../sasquatch/values-tucson-teststand.yaml | 126 +++++++----------- 1 file changed, 50 insertions(+), 76 deletions(-) diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 002c0d1bca..1c7732ecef 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -76,122 +76,96 @@ influxdb: hostname: tucson-teststand.lsst.codes telegraf-kafka-consumer: - enabled: false + enabled: true + image: + repo: "docker.io/lsstsqre/telegraf" + tag: "avro-mutex" kafkaConsumers: auxtel: enabled: true + database: "efd" topicRegexps: | - [ ".*ATAOS", ".*ATDome", ".*ATDomeTrajectory", ".*ATHexapod", ".*ATPneumatics", ".*ATPtg", ".*ATMCS" ] + [ "lsst.sal.ATAOS", "lsst.sal.ATDome", "lsst.sal.ATDomeTrajectory", "lsst.sal.ATHexapod", "lsst.sal.ATPneumatics", "lsst.sal.ATPtg", "lsst.sal.ATMCS" ] + debug: true maintel: enabled: true + database: "efd" topicRegexps: | - [ ".*MTAOS", ".*MTDome", ".*MTDomeTrajectory", ".*MTPtg" ] + [ "lsst.sal.MTAOS", "lsst.sal.MTDome", "lsst.sal.MTDomeTrajectory", "lsst.sal.MTPtg" ] + debug: true mtmount: enabled: true + database: "efd" topicRegexps: | - [ ".*MTMount" ] - comcam: - enabled: true - topicRegexps: | - [ ".*CCCamera", ".*CCHeaderService", ".*CCOODS" ] + [ "lsst.sal.MTMount" ] + debug: true eas: enabled: true + database: "efd" topicRegexps: | - [ ".*DIMM", ".*DSM", ".*EPM", ".*ESS", ".*WeatherForecast" ] + [ "lsst.sal.DIMM", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.ESS", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] + debug: true latiss: enabled: true + database: "efd" topicRegexps: | - [ ".*ATCamera", ".*ATHeaderService", ".*ATOODS", ".*ATSpectrograph" ] + [ "lsst.sal.ATCamera", "lsst.sal.ATHeaderService", "lsst.sal.ATOODS", "lsst.sal.ATSpectrograph" ] + debug: true m1m3: enabled: true - flush_interval: "1s" - metric_batch_size: 5000 - interval: "0.1s" + database: "efd" topicRegexps: | - [ ".*MTM1M3" ] + [ "lsst.sal.MTM1M3" ] + metric_batch_size: 2500 + debug: true m2: enabled: true + database: "efd" topicRegexps: | - [ ".*MTHexapod", ".*MTM2", ".*MTRotator" ] + [ "lsst.sal.MTHexapod", "lsst.sal.MTM2", "lsst.sal.MTRotator" ] + debug: true obssys: enabled: true + database: "efd" topicRegexps: | - [ ".*Scheduler", ".*Script", ".*ScriptQueue", ".*Watcher" ] + [ "lsst.sal.Scheduler", "lsst.sal.Script", "lsst.sal.ScriptQueue", "lsst.sal.Watcher" ] + debug: true ocps: enabled: true + database: "efd" topicRegexps: | - [ ".*OCPS" ] - calsys: + [ "lsst.sal.OCPS" ] + debug: true + test: enabled: true + database: "efd" topicRegexps: | - [ ".*ATMonochromator", ".*ATWhiteLight", ".*CBP", ".*Electrometer", ".*FiberSpectrograph", ".*LEDProjector", ".*LinearStage", ".*MTReflector", ".*TunableLaser" ] + [ "lsst.sal.Test" ] + debug: true mtaircompressor: enabled: true + database: "efd" topicRegexps: | - [ ".*MTAirCompressor" ] + [ "lsst.sal.MTAirCompressor" ] + debug: true lasertracker: enabled: true + database: "efd" topicRegexps: | - [ ".*LaserTracker" ] - test: + [ "lsst.sal.LaserTracker" ] + debug: true + genericcamera: enabled: true + database: "efd" topicRegexps: | - [ "lsst.sal.Test" ] - genericcamera: + [ "lsst.sal.GCHeaderService", "lsst.sal.GenericCamera" ] + debug: true + lsstcam: enabled: true + database: "efd" topicRegexps: | - [ ".*GCHeaderService", ".*GenericCamera" ] - -kafka-connect-manager: - influxdbSink: - # Based on the kafka producers configuration for the TTS - # https://github.com/lsst-ts/argocd-csc/blob/main/apps/kafka-producers/values-tucson-teststand.yaml - connectors: - auxtel: - enabled: true - topicsRegex: ".*ATAOS|.*ATDome|.*ATDomeTrajectory|.*ATHexapod|.*ATPneumatics|.*ATPtg|.*ATMCS" - maintel: - enabled: true - topicsRegex: ".*MTAOS|.*MTDome|.*MTDomeTrajectory|.*MTPtg" - mtmount: - enabled: true - topicsRegex: ".*MTMount" - comcam: - enabled: true - topicsRegex: ".*CCCamera|.*CCHeaderService|.*CCOODS" - eas: - enabled: true - topicsRegex: ".*DIMM|.*DSM|.*EPM|.*ESS|.*WeatherForecast" - latiss: - enabled: true - topicsRegex: ".*ATCamera|.*ATHeaderService|.*ATOODS|.*ATSpectrograph" - m1m3: - enabled: true - topicsRegex: ".*MTM1M3" - m2: - enabled: true - topicsRegex: ".*MTHexapod|.*MTM2|.*MTRotator" - obssys: - enabled: true - topicsRegex: ".*Scheduler|.*Script|.*ScriptQueue|.*Watcher" - ocps: - enabled: true - topicsRegex: ".*OCPS" - test: - enabled: true - topicsRegex: "lsst.sal.Test" - calsys: - enabled: true - topicsRegex: ".*ATMonochromator|.*ATWhiteLight|.*CBP|.*Electrometer|.*FiberSpectrograph|.*LEDProjector|.*LinearStage|.*MTReflector|.*TunableLaser" - mtaircompressor: - enabled: true - topicsRegex: ".*MTAirCompressor" - lasertracker: - enabled: true - topicsRegex: ".*LaserTracker" - genericcamera: - enabled: true - topicsRegex: ".*GCHeaderService|.*GenericCamera" + [ "lsst.sal.MTCamera", "lsst.sal.MTHeaderService", "lsst.sal.MTOODS" ] + debug: true kafdrop: cmdArgs: "--message.format=AVRO --message.keyFormat=DEFAULT --topic.deleteEnabled=false --topic.createEnabled=false" From b6e1aa2e7c4f883f84cb0531b1dba3871ec1ebca Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 24 Sep 2024 13:59:08 -0700 Subject: [PATCH 150/193] Remove LSSTCam --- applications/sasquatch/values-tucson-teststand.yaml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 1c7732ecef..98e315ef21 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -160,12 +160,6 @@ telegraf-kafka-consumer: topicRegexps: | [ "lsst.sal.GCHeaderService", "lsst.sal.GenericCamera" ] debug: true - lsstcam: - enabled: true - database: "efd" - topicRegexps: | - [ "lsst.sal.MTCamera", "lsst.sal.MTHeaderService", "lsst.sal.MTOODS" ] - debug: true kafdrop: cmdArgs: "--message.format=AVRO --message.keyFormat=DEFAULT --topic.deleteEnabled=false --topic.createEnabled=false" From d4a0494550b08a6f325b0637a562a212fbd1dbc7 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 24 Sep 2024 13:59:51 -0700 Subject: [PATCH 151/193] Add ComCam --- applications/sasquatch/values-tucson-teststand.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 98e315ef21..aab1e77686 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -136,6 +136,12 @@ telegraf-kafka-consumer: topicRegexps: | [ "lsst.sal.OCPS" ] debug: true + comcam: + enabled: true + database: "efd" + topicRegexps: | + [ "lsst.sal.CCCamera", "lsst.sal.CCHeaderService", "lsst.sal.CCOODS" ] + debug: true test: enabled: true database: "efd" From 8d2006504a2b458027a7d3f3c0ae2e7c612b298a Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 24 Sep 2024 14:00:32 -0700 Subject: [PATCH 152/193] Add the calibration systems back - RemoveATMonochromator and ATWhiteLight --- applications/sasquatch/values-tucson-teststand.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index aab1e77686..2e148cf30b 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -136,6 +136,12 @@ telegraf-kafka-consumer: topicRegexps: | [ "lsst.sal.OCPS" ] debug: true + calsys: + enabled: true + database: "efd" + topicRegexps: | + [ "lsst.sal.CBP", "lsst.sal.Electrometer", "lsst.sal.FiberSpectrograph", "lsst.sal.LEDProjector", "lsst.sal.LinearStage", "lsst.sal.MTReflector", "lsst.sal.TunableLaser" ] + debug: true comcam: enabled: true database: "efd" From 02459083ced1a9fcd9acc09cdc5ea6dc2418de62 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 24 Sep 2024 12:50:48 -0700 Subject: [PATCH 153/193] Add lsst.obsenv namespace for telescope environments --- applications/sasquatch/values-base.yaml | 9 +++++++++ applications/sasquatch/values-summit.yaml | 10 ++++++++++ applications/sasquatch/values-tucson-teststand.yaml | 9 +++++++++ 3 files changed, 28 insertions(+) diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index a28a51be3b..8bd0138b2b 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -222,6 +222,14 @@ telegraf-kafka-consumer: topicRegexps: | [ "lsst.sal.MTCamera", "lsst.sal.MTHeaderService", "lsst.sal.MTOODS" ] debug: true + obsenv: + enabled: true + database: "lsst.obsenv" + timestamp_format: "unix_ms" + timestamp_field: "timestamp" + topicRegexps: | + [ "lsst.obsenv" ] + debug: true kafdrop: cmdArgs: "--message.format=AVRO --message.keyFormat=DEFAULT --topic.deleteEnabled=false --topic.createEnabled=false" @@ -242,6 +250,7 @@ rest-proxy: topicPrefixes: - test - lsst.dm + - lsst.obsenv chronograf: persistence: diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index 30b1873a25..fd4905696c 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -276,6 +276,14 @@ telegraf-kafka-consumer-oss: [ "Agent", "Aspic", "Axis", "Canbus", "Cip", "Clamp", "Cold", "Controller", "Cryo", "Gateway", "Hardware", "Hip", "Hook", "Latch", "Location", "Ps", "RTD", "Raft", "Reb", "Segment", "Sensor", "Socket", "Source", "Truck" ] topicRegexps: | [ "lsst.MTCamera" ] + oss-obsenv: + enabled: true + database: "lsst.obsenv" + timestamp_format: "unix_ms" + timestamp_field: "timestamp" + topicRegexps: | + [ "lsst.obsenv" ] + debug: true telegraf-kafka-consumer: enabled: true @@ -450,6 +458,7 @@ telegraf-kafka-consumer: [ "lsst.MTCamera" ] debug: true + kafdrop: ingress: enabled: true @@ -466,6 +475,7 @@ rest-proxy: topicPrefixes: - lsst.dm - lsst.backpack + - lsst.obsenv - lsst.ATCamera - lsst.CCCamera - lsst.MTCamera diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 2e148cf30b..1df0bcd307 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -172,6 +172,14 @@ telegraf-kafka-consumer: topicRegexps: | [ "lsst.sal.GCHeaderService", "lsst.sal.GenericCamera" ] debug: true + obsenv: + enabled: true + database: "lsst.obsenv" + timestamp_format: "unix_ms" + timestamp_field: "timestamp" + topicRegexps: | + [ "lsst.obsenv" ] + debug: true kafdrop: cmdArgs: "--message.format=AVRO --message.keyFormat=DEFAULT --topic.deleteEnabled=false --topic.createEnabled=false" @@ -191,6 +199,7 @@ rest-proxy: - test.next-visit topicPrefixes: - test + - lsst.obsenv - lsst.dm chronograf: From 8a0eb43692c0a8b8c5fe37dfbd618136dc7774cf Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 25 Sep 2024 16:28:15 -0700 Subject: [PATCH 154/193] Replicate lsst.obsenv topics to USDF --- applications/sasquatch/values-usdfprod.yaml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index 4dfe10f35f..bd0516b5a7 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -22,7 +22,7 @@ strimzi-kafka: enabled: true source: bootstrapServer: sasquatch-summit-kafka-bootstrap.lsst.codes:9094 - topicsPattern: "registry-schemas, lsst.sal.*, lsst.dm.*, lsst.backpack.*, lsst.ATCamera.*, lsst.CCCamera.*, lsst.MTCamera.*" + topicsPattern: "registry-schemas, lsst.sal.*, lsst.dm.*, lsst.backpack.*, lsst.ATCamera.*, lsst.CCCamera.*, lsst.MTCamera.*, lsst.obsenv.*" resources: requests: cpu: 2 @@ -312,6 +312,14 @@ telegraf-kafka-consumer: topicRegexps: | [ "lsst.MTCamera" ] debug: true + obsenv: + enabled: true + database: "lsst.obsenv" + timestamp_format: "unix_ms" + timestamp_field: "timestamp" + topicRegexps: | + [ "lsst.obsenv" ] + debug: true kafdrop: ingress: From cd153ba43545ec442355d6c89dc6cc2c4b487511 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 25 Sep 2024 17:31:28 -0700 Subject: [PATCH 155/193] Add lsst.cp namespace for summit environment - This namespace is used by the Calibration Pipeline --- applications/sasquatch/values-summit.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index fd4905696c..7a6158cfef 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -284,6 +284,16 @@ telegraf-kafka-consumer-oss: topicRegexps: | [ "lsst.obsenv" ] debug: true + oss-cp: + enabled: true + database: "lsst.cp" + timestamp_format: "unix" + timestamp_field: "timestamp" + topicRegexps: | + [ "lsst.cp" ] + tags: | + [ "dataset_tag", "band", "instrument", "skymap", "detector", "physical_filter", "tract", "exposure", "patch", "visit", "run", "pipeline" ] + debug: true telegraf-kafka-consumer: enabled: true @@ -476,6 +486,7 @@ rest-proxy: - lsst.dm - lsst.backpack - lsst.obsenv + - lsst.cp - lsst.ATCamera - lsst.CCCamera - lsst.MTCamera From 3d7cffb40a262083abd30bd9a16f27e405938502 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 25 Sep 2024 17:36:36 -0700 Subject: [PATCH 156/193] Enable replication of lsst.cp topics to USDF --- applications/sasquatch/values-usdfprod.yaml | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index bd0516b5a7..d3adca0d93 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -22,7 +22,7 @@ strimzi-kafka: enabled: true source: bootstrapServer: sasquatch-summit-kafka-bootstrap.lsst.codes:9094 - topicsPattern: "registry-schemas, lsst.sal.*, lsst.dm.*, lsst.backpack.*, lsst.ATCamera.*, lsst.CCCamera.*, lsst.MTCamera.*, lsst.obsenv.*" + topicsPattern: "registry-schemas, lsst.sal.*, lsst.dm.*, lsst.backpack.*, lsst.ATCamera.*, lsst.CCCamera.*, lsst.MTCamera.*, lsst.obsenv.*, lsst.cp.*" resources: requests: cpu: 2 @@ -320,6 +320,16 @@ telegraf-kafka-consumer: topicRegexps: | [ "lsst.obsenv" ] debug: true + cp: + enabled: true + database: "lsst.cp" + timestamp_format: "unix" + timestamp_field: "timestamp" + topicRegexps: | + [ "lsst.cp" ] + tags: | + [ "dataset_tag", "band", "instrument", "skymap", "detector", "physical_filter", "tract", "exposure", "patch", "visit", "run", "pipeline" ] + debug: true kafdrop: ingress: From d851003230502b870ca9bcae1c9c81d0a80f1644 Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Date: Thu, 26 Sep 2024 12:53:17 -0300 Subject: [PATCH 157/193] rubintv: update app version for summit and usdf production deployments --- applications/rubintv/values-summit.yaml | 2 +- applications/rubintv/values-usdfprod.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/rubintv/values-summit.yaml b/applications/rubintv/values-summit.yaml index c1f2cb88ef..07a3594fb2 100644 --- a/applications/rubintv/values-summit.yaml +++ b/applications/rubintv/values-summit.yaml @@ -20,7 +20,7 @@ rubintv: - name: DDV_CLIENT_WS_ADDRESS value: "rubintv/ws/ddv" image: - tag: v2.3.0 + tag: v2.3.1 pullPolicy: Always workers: diff --git a/applications/rubintv/values-usdfprod.yaml b/applications/rubintv/values-usdfprod.yaml index 7349f935f4..9818e96584 100644 --- a/applications/rubintv/values-usdfprod.yaml +++ b/applications/rubintv/values-usdfprod.yaml @@ -16,7 +16,7 @@ rubintv: - name: DDV_CLIENT_WS_ADDRESS value: "rubintv/ws/ddv" image: - tag: v2.3.0 + tag: v2.3.1 pullPolicy: Always workers: From a694855d5f014ec1e15d040ae1153104efebcee6 Mon Sep 17 00:00:00 2001 From: Hsin-Fang Chiang Date: Thu, 26 Sep 2024 10:27:18 -0700 Subject: [PATCH 158/193] Send production-run alerts to a separate alert topic Both development testing and production prompt processing runs can generate alerts. Currently all LATISS alerts are sent to the "alert-stream-test" topic. This removes "alert-stream-test" as the default topic for all deployments and changes where the LATISS production PP runs send their alerts to. --- applications/prompt-proto-service-hsc-gpu/README.md | 2 +- .../values-usdfdev-prompt-processing.yaml | 3 +++ applications/prompt-proto-service-hsc-gpu/values.yaml | 3 ++- applications/prompt-proto-service-hsc/README.md | 2 +- .../values-usdfdev-prompt-processing.yaml | 3 +++ applications/prompt-proto-service-hsc/values.yaml | 3 ++- applications/prompt-proto-service-latiss/README.md | 2 +- .../values-usdfdev-prompt-processing.yaml | 3 +++ .../values-usdfprod-prompt-processing.yaml | 3 +++ applications/prompt-proto-service-latiss/values.yaml | 3 ++- applications/prompt-proto-service-lsstcam/README.md | 2 +- .../values-usdfdev-prompt-processing.yaml | 3 +++ applications/prompt-proto-service-lsstcam/values.yaml | 3 ++- applications/prompt-proto-service-lsstcomcam/README.md | 2 +- .../values-usdfdev-prompt-processing.yaml | 3 +++ applications/prompt-proto-service-lsstcomcam/values.yaml | 3 ++- applications/prompt-proto-service-lsstcomcamsim/README.md | 2 +- applications/prompt-proto-service-lsstcomcamsim/values.yaml | 1 + 18 files changed, 35 insertions(+), 11 deletions(-) diff --git a/applications/prompt-proto-service-hsc-gpu/README.md b/applications/prompt-proto-service-hsc-gpu/README.md index 76ce7c399a..2415159676 100644 --- a/applications/prompt-proto-service-hsc-gpu/README.md +++ b/applications/prompt-proto-service-hsc-gpu/README.md @@ -15,7 +15,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | | prompt-proto-service.affinity | object | `{}` | Affinity rules for the prompt processing pods | | prompt-proto-service.alerts.server | string | `"usdf-alert-stream-dev-broker-0.lsst.cloud:9094"` | Server address for the alert stream | -| prompt-proto-service.alerts.topic | string | `"alert-stream-test"` | Topic name where alerts will be sent | +| prompt-proto-service.alerts.topic | string | None, must be set | Topic name where alerts will be sent | | prompt-proto-service.alerts.username | string | `"kafka-admin"` | Username for sending alerts to the alert stream | | prompt-proto-service.apdb.config | string | None, must be set | URL to a serialized APDB configuration, or the "label:" prefix followed by the indexed name of such a config. | | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | diff --git a/applications/prompt-proto-service-hsc-gpu/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-hsc-gpu/values-usdfdev-prompt-processing.yaml index fe819556fd..7e9e4e559b 100644 --- a/applications/prompt-proto-service-hsc-gpu/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-hsc-gpu/values-usdfdev-prompt-processing.yaml @@ -27,6 +27,9 @@ prompt-proto-service: apdb: config: s3://rubin-pp-dev-users/apdb_config/sql/pp_apdb_hsc-dev.py + alerts: + topic: "alert-stream-test" + sasquatch: endpointUrl: https://usdf-rsp-dev.slac.stanford.edu/sasquatch-rest-proxy auth_env: false diff --git a/applications/prompt-proto-service-hsc-gpu/values.yaml b/applications/prompt-proto-service-hsc-gpu/values.yaml index c838c1475a..7efc93a3bb 100644 --- a/applications/prompt-proto-service-hsc-gpu/values.yaml +++ b/applications/prompt-proto-service-hsc-gpu/values.yaml @@ -99,7 +99,8 @@ prompt-proto-service: # -- Server address for the alert stream server: "usdf-alert-stream-dev-broker-0.lsst.cloud:9094" # -- Topic name where alerts will be sent - topic: "alert-stream-test" + # @default -- None, must be set + topic: "" registry: # -- If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. diff --git a/applications/prompt-proto-service-hsc/README.md b/applications/prompt-proto-service-hsc/README.md index a463a85160..fbb60fceae 100644 --- a/applications/prompt-proto-service-hsc/README.md +++ b/applications/prompt-proto-service-hsc/README.md @@ -15,7 +15,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | | prompt-proto-service.affinity | object | `{}` | Affinity rules for the prompt processing pods | | prompt-proto-service.alerts.server | string | `"usdf-alert-stream-dev-broker-0.lsst.cloud:9094"` | Server address for the alert stream | -| prompt-proto-service.alerts.topic | string | `"alert-stream-test"` | Topic name where alerts will be sent | +| prompt-proto-service.alerts.topic | string | None, must be set | Topic name where alerts will be sent | | prompt-proto-service.alerts.username | string | `"kafka-admin"` | Username for sending alerts to the alert stream | | prompt-proto-service.apdb.config | string | None, must be set | URL to a serialized APDB configuration, or the "label:" prefix followed by the indexed name of such a config. | | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | diff --git a/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml index f507024096..aba3ca2b2c 100644 --- a/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml @@ -28,6 +28,9 @@ prompt-proto-service: apdb: config: s3://rubin-pp-dev-users/apdb_config/sql/pp_apdb_hsc-dev.py + alerts: + topic: "alert-stream-test" + sasquatch: endpointUrl: https://usdf-rsp-dev.slac.stanford.edu/sasquatch-rest-proxy auth_env: false diff --git a/applications/prompt-proto-service-hsc/values.yaml b/applications/prompt-proto-service-hsc/values.yaml index 1361c25215..c3921fcb42 100644 --- a/applications/prompt-proto-service-hsc/values.yaml +++ b/applications/prompt-proto-service-hsc/values.yaml @@ -99,7 +99,8 @@ prompt-proto-service: # -- Server address for the alert stream server: "usdf-alert-stream-dev-broker-0.lsst.cloud:9094" # -- Topic name where alerts will be sent - topic: "alert-stream-test" + # @default -- None, must be set + topic: "" registry: # -- If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. diff --git a/applications/prompt-proto-service-latiss/README.md b/applications/prompt-proto-service-latiss/README.md index 579207cc66..941c350a20 100644 --- a/applications/prompt-proto-service-latiss/README.md +++ b/applications/prompt-proto-service-latiss/README.md @@ -15,7 +15,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | | prompt-proto-service.affinity | object | `{}` | Affinity rules for the prompt processing pods | | prompt-proto-service.alerts.server | string | `"usdf-alert-stream-dev-broker-0.lsst.cloud:9094"` | Server address for the alert stream | -| prompt-proto-service.alerts.topic | string | `"alert-stream-test"` | Topic name where alerts will be sent | +| prompt-proto-service.alerts.topic | string | None, must be set | Topic name where alerts will be sent | | prompt-proto-service.alerts.username | string | `"kafka-admin"` | Username for sending alerts to the alert stream | | prompt-proto-service.apdb.config | string | None, must be set | URL to a serialized APDB configuration, or the "label:" prefix followed by the indexed name of such a config. | | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | diff --git a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml index 3c34271230..9e0c60bf5d 100644 --- a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml @@ -28,6 +28,9 @@ prompt-proto-service: apdb: config: s3://rubin-pp-dev-users/apdb_config/cassandra/pp_apdb_latiss-dev.py + alerts: + topic: "alert-stream-test" + sasquatch: endpointUrl: https://usdf-rsp-dev.slac.stanford.edu/sasquatch-rest-proxy auth_env: false diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index 77d8ba6207..07426caa9c 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -58,6 +58,9 @@ prompt-proto-service: apdb: config: s3://rubin-summit-users/apdb_config/cassandra/pp_apdb_latiss.py + alerts: + topic: "latiss-alerts" + sasquatch: endpointUrl: https://usdf-rsp-dev.slac.stanford.edu/sasquatch-rest-proxy namespace: lsst.prompt.prod diff --git a/applications/prompt-proto-service-latiss/values.yaml b/applications/prompt-proto-service-latiss/values.yaml index 5b82a11fed..38fddacd35 100644 --- a/applications/prompt-proto-service-latiss/values.yaml +++ b/applications/prompt-proto-service-latiss/values.yaml @@ -99,7 +99,8 @@ prompt-proto-service: # -- Server address for the alert stream server: "usdf-alert-stream-dev-broker-0.lsst.cloud:9094" # -- Topic name where alerts will be sent - topic: "alert-stream-test" + # @default -- None, must be set + topic: "" registry: # -- If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. diff --git a/applications/prompt-proto-service-lsstcam/README.md b/applications/prompt-proto-service-lsstcam/README.md index 419a466c0d..b2d000f026 100644 --- a/applications/prompt-proto-service-lsstcam/README.md +++ b/applications/prompt-proto-service-lsstcam/README.md @@ -15,7 +15,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | | prompt-proto-service.affinity | object | `{}` | Affinity rules for the prompt processing pods | | prompt-proto-service.alerts.server | string | `"usdf-alert-stream-dev-broker-0.lsst.cloud:9094"` | Server address for the alert stream | -| prompt-proto-service.alerts.topic | string | `"alert-stream-test"` | Topic name where alerts will be sent | +| prompt-proto-service.alerts.topic | string | None, must be set | Topic name where alerts will be sent | | prompt-proto-service.alerts.username | string | `"kafka-admin"` | Username for sending alerts to the alert stream | | prompt-proto-service.apdb.config | string | None, must be set | URL to a serialized APDB configuration, or the "label:" prefix followed by the indexed name of such a config. | | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | diff --git a/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml index 32e0705ba0..818307f6ca 100644 --- a/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml @@ -22,4 +22,7 @@ prompt-proto-service: kafkaClusterAddress: prompt-processing-kafka-bootstrap.kafka:9092 topic: rubin-prompt-processing + alerts: + topic: "alert-stream-test" + fullnameOverride: "prompt-proto-service-lsstcam" diff --git a/applications/prompt-proto-service-lsstcam/values.yaml b/applications/prompt-proto-service-lsstcam/values.yaml index c0d79823c9..a590661413 100644 --- a/applications/prompt-proto-service-lsstcam/values.yaml +++ b/applications/prompt-proto-service-lsstcam/values.yaml @@ -99,7 +99,8 @@ prompt-proto-service: # -- Server address for the alert stream server: "usdf-alert-stream-dev-broker-0.lsst.cloud:9094" # -- Topic name where alerts will be sent - topic: "alert-stream-test" + # @default -- None, must be set + topic: "" registry: # -- If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. diff --git a/applications/prompt-proto-service-lsstcomcam/README.md b/applications/prompt-proto-service-lsstcomcam/README.md index 71a9b5713d..9e9b55654b 100644 --- a/applications/prompt-proto-service-lsstcomcam/README.md +++ b/applications/prompt-proto-service-lsstcomcam/README.md @@ -15,7 +15,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | | prompt-proto-service.affinity | object | `{}` | Affinity rules for the prompt processing pods | | prompt-proto-service.alerts.server | string | `"usdf-alert-stream-dev-broker-0.lsst.cloud:9094"` | Server address for the alert stream | -| prompt-proto-service.alerts.topic | string | `"alert-stream-test"` | Topic name where alerts will be sent | +| prompt-proto-service.alerts.topic | string | None, must be set | Topic name where alerts will be sent | | prompt-proto-service.alerts.username | string | `"kafka-admin"` | Username for sending alerts to the alert stream | | prompt-proto-service.apdb.config | string | None, must be set | URL to a serialized APDB configuration, or the "label:" prefix followed by the indexed name of such a config. | | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | diff --git a/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml index b6b4ce83dc..45667dadc3 100644 --- a/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml @@ -22,4 +22,7 @@ prompt-proto-service: kafkaClusterAddress: prompt-processing-kafka-bootstrap.kafka:9092 topic: rubin-prompt-processing + alerts: + topic: "alert-stream-test" + fullnameOverride: "prompt-proto-service-lsstcomcam" diff --git a/applications/prompt-proto-service-lsstcomcam/values.yaml b/applications/prompt-proto-service-lsstcomcam/values.yaml index 83d6a9616b..7682298e07 100644 --- a/applications/prompt-proto-service-lsstcomcam/values.yaml +++ b/applications/prompt-proto-service-lsstcomcam/values.yaml @@ -99,7 +99,8 @@ prompt-proto-service: # -- Server address for the alert stream server: "usdf-alert-stream-dev-broker-0.lsst.cloud:9094" # -- Topic name where alerts will be sent - topic: "alert-stream-test" + # @default -- None, must be set + topic: "" registry: # -- If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. diff --git a/applications/prompt-proto-service-lsstcomcamsim/README.md b/applications/prompt-proto-service-lsstcomcamsim/README.md index 0bf22395e9..6854bea8e2 100644 --- a/applications/prompt-proto-service-lsstcomcamsim/README.md +++ b/applications/prompt-proto-service-lsstcomcamsim/README.md @@ -15,7 +15,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | | prompt-proto-service.affinity | object | `{}` | Affinity rules for the prompt processing pods | | prompt-proto-service.alerts.server | string | `"usdf-alert-stream-dev-broker-0.lsst.cloud:9094"` | Server address for the alert stream | -| prompt-proto-service.alerts.topic | string | `""` | Topic name where alerts will be sent | +| prompt-proto-service.alerts.topic | string | None, must be set | Topic name where alerts will be sent | | prompt-proto-service.alerts.username | string | `"kafka-admin"` | Username for sending alerts to the alert stream | | prompt-proto-service.apdb.config | string | None, must be set | URL to a serialized APDB configuration, or the "label:" prefix followed by the indexed name of such a config. | | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | diff --git a/applications/prompt-proto-service-lsstcomcamsim/values.yaml b/applications/prompt-proto-service-lsstcomcamsim/values.yaml index ae5879d20a..99f8eea75b 100644 --- a/applications/prompt-proto-service-lsstcomcamsim/values.yaml +++ b/applications/prompt-proto-service-lsstcomcamsim/values.yaml @@ -99,6 +99,7 @@ prompt-proto-service: # -- Server address for the alert stream server: "usdf-alert-stream-dev-broker-0.lsst.cloud:9094" # -- Topic name where alerts will be sent + # @default -- None, must be set topic: "" registry: From 3a2a96b58b2e304b360fbffb64adf8b321084f49 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Mon, 23 Sep 2024 11:49:32 -0400 Subject: [PATCH 159/193] Deploy Squarebot with interaction support This adds support for parsing block_actions interaction events from Slack. See https://github.com/lsst-sqre/squarebot/pull/33 --- applications/squarebot/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/squarebot/Chart.yaml b/applications/squarebot/Chart.yaml index e46b7e53fd..78ee9ca608 100644 --- a/applications/squarebot/Chart.yaml +++ b/applications/squarebot/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: squarebot version: 1.0.0 -appVersion: "0.9.0" +appVersion: "tickets-DM-46427" description: Squarebot feeds events from services like Slack and GitHub into the SQuaRE Events Kafka message bus running on Roundtable. Backend apps like Templatebot and Unfurlbot can subscribe to these events and take domain-specific action. type: application home: https://squarebot.lsst.io/ From 398e88af917cbc30e2033edf99dd50e5250df2ce Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Mon, 23 Sep 2024 18:19:32 -0400 Subject: [PATCH 160/193] Add block actions topic config for Squarebot This is the name of the Kafka topic for Slack block actions. --- applications/squarebot/README.md | 2 +- applications/squarebot/templates/configmap.yaml | 2 +- applications/squarebot/values.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/applications/squarebot/README.md b/applications/squarebot/README.md index 2695e862fb..68ea50f85d 100644 --- a/applications/squarebot/README.md +++ b/applications/squarebot/README.md @@ -19,7 +19,7 @@ Squarebot feeds events from services like Slack and GitHub into the SQuaRE Event | autoscaling.targetCPUUtilizationPercentage | int | `80` | | | config.logLevel | string | `"INFO"` | Logging level: "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" | | config.topics.slackAppMention | string | `"lsst.square-events.squarebot.slack.app.mention"` | Kafka topic name for the Slack `app_mention` events | -| config.topics.slackInteraction | string | `"lsst.square-events.squarebot.slack.interaction"` | Kafka topic for Slack interaction events | +| config.topics.slackBlockActions | string | `"lsst.square-events.squarebot.slack.interaction.block-actions"` | Kafka topic for Slack `block_actions` interaction events | | config.topics.slackMessageChannels | string | `"lsst.square-events.squarebot.slack.message.channels"` | Kafka topic name for the Slack `message.channels` events (public channels) | | config.topics.slackMessageGroups | string | `"lsst.square-events.squarebot.slack.message.groups"` | Kafka topic name for the Slack `message.groups` events (private channels) | | config.topics.slackMessageIm | string | `"lsst.square-events.squarebot.slack.message.im"` | Kafka topic name for the Slack `message.im` events (direct message channels) | diff --git a/applications/squarebot/templates/configmap.yaml b/applications/squarebot/templates/configmap.yaml index b6f81143a8..687d54aede 100644 --- a/applications/squarebot/templates/configmap.yaml +++ b/applications/squarebot/templates/configmap.yaml @@ -14,4 +14,4 @@ data: SQUAREBOT_TOPIC_MESSAGE_GROUPS: {{ .Values.config.topics.slackMessageGroups | quote }} SQUAREBOT_TOPIC_MESSAGE_IM: {{ .Values.config.topics.slackMessageIm | quote }} SQUAREBOT_TOPIC_MESSAGE_MPIM: {{ .Values.config.topics.slackMessageMpim | quote }} - SQUAREBOT_TOPIC_INTERACTION: {{ .Values.config.topics.slackInteraction | quote }} + SQUAREBOT_TOPIC_BLOCK_ACTIONS: {{ .Values.config.topics.slackBlockActions | quote }} diff --git a/applications/squarebot/values.yaml b/applications/squarebot/values.yaml index bd00c36a37..276dc85811 100644 --- a/applications/squarebot/values.yaml +++ b/applications/squarebot/values.yaml @@ -107,5 +107,5 @@ config: # -- Kafka topic name for the Slack `message.mpim` events (multi-person direct messages) slackMessageMpim: "lsst.square-events.squarebot.slack.message.mpim" - # -- Kafka topic for Slack interaction events - slackInteraction: "lsst.square-events.squarebot.slack.interaction" + # -- Kafka topic for Slack `block_actions` interaction events + slackBlockActions: "lsst.square-events.squarebot.slack.interaction.block-actions" From 2d5c8720f835a998b667396447c973ef98884de9 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Tue, 24 Sep 2024 11:09:58 -0400 Subject: [PATCH 161/193] Add block-actions Kafka topic Rename the "interaction" Squarebot Kafka topic to `...interaction.block-actions` since we'll have different topics for each type of interaction. Add topic permissions for squarebot and templatebot. Add configmap configuration for Squarebot ("templatebot's configuration will be in a separate PR). --- .../charts/square-events/templates/squarebot-topics.yaml | 2 +- .../charts/square-events/templates/squarebot-user.yaml | 2 +- .../charts/square-events/templates/templatebot-user.yaml | 9 +++++++++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/applications/sasquatch/charts/square-events/templates/squarebot-topics.yaml b/applications/sasquatch/charts/square-events/templates/squarebot-topics.yaml index 1517ea6c55..7896298b70 100644 --- a/applications/sasquatch/charts/square-events/templates/squarebot-topics.yaml +++ b/applications/sasquatch/charts/square-events/templates/squarebot-topics.yaml @@ -2,7 +2,7 @@ apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaTopic metadata: - name: "lsst.square-events.squarebot.slack.interaction" + name: "lsst.square-events.squarebot.slack.interaction.block-actions" labels: strimzi.io/cluster: {{ .Values.cluster.name }} spec: diff --git a/applications/sasquatch/charts/square-events/templates/squarebot-user.yaml b/applications/sasquatch/charts/square-events/templates/squarebot-user.yaml index 3b0f8e252a..6353ae2784 100644 --- a/applications/sasquatch/charts/square-events/templates/squarebot-user.yaml +++ b/applications/sasquatch/charts/square-events/templates/squarebot-user.yaml @@ -64,7 +64,7 @@ spec: - "Describe" - resource: type: topic - name: "lsst.square-events.squarebot.slack.interaction" + name: "lsst.square-events.squarebot.slack.interaction.block-actions" patternType: literal type: allow host: "*" diff --git a/applications/sasquatch/charts/square-events/templates/templatebot-user.yaml b/applications/sasquatch/charts/square-events/templates/templatebot-user.yaml index fb46b65e2b..0a00275bb2 100644 --- a/applications/sasquatch/charts/square-events/templates/templatebot-user.yaml +++ b/applications/sasquatch/charts/square-events/templates/templatebot-user.yaml @@ -42,3 +42,12 @@ spec: operations: - "Read" - "Describe" + - resource: + type: topic + name: "lsst.square-events.squarebot.slack.interaction.block-actions" + patternType: literal + type: allow + host: "*" + operations: + - "Read" + - "Describe" From 4e838201aef3d03acb719dc06d3be990825251f1 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Tue, 24 Sep 2024 14:40:50 -0400 Subject: [PATCH 162/193] Configure the block actions topic for templatebot --- applications/templatebot/README.md | 1 + applications/templatebot/templates/configmap.yaml | 1 + applications/templatebot/values.yaml | 3 +++ 3 files changed, 5 insertions(+) diff --git a/applications/templatebot/README.md b/applications/templatebot/README.md index c743d3c467..b0e3509ea8 100644 --- a/applications/templatebot/README.md +++ b/applications/templatebot/README.md @@ -15,6 +15,7 @@ Create new projects | config.logProfile | string | `"production"` | Logging profile (`production` for JSON, `development` for human-friendly) | | config.pathPrefix | string | `"/templatebot"` | URL path prefix | | config.topics.slackAppMention | string | `"lsst.square-events.squarebot.slack.app.mention"` | Kafka topic name for the Slack `app_mention` events | +| config.topics.slackBlockActions | string | `"lsst.square-events.squarebot.slack.interaction.block-actions"` | Kafka topic for Slack `block_actions` interaction events | | config.topics.slackMessageIm | string | `"lsst.square-events.squarebot.slack.message.im"` | Kafka topic name for the Slack `message.im` events (direct message channels) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | diff --git a/applications/templatebot/templates/configmap.yaml b/applications/templatebot/templates/configmap.yaml index 81782fd7e0..41f0e97266 100644 --- a/applications/templatebot/templates/configmap.yaml +++ b/applications/templatebot/templates/configmap.yaml @@ -11,3 +11,4 @@ data: TEMPLATEBOT_PROFILE: {{ .Values.config.logProfile | quote }} TEMPLATEBOT_APP_MENTION_TOPIC: {{ .Values.config.topics.slackAppMention | quote }} TEMPLATEBOT_MESSAGE_IM_TOPIC: {{ .Values.config.topics.slackMessageIm | quote }} + TEMPLATEBOT_BLOCK_ACTIONS_TOPIC: {{ .Values.config.topics.slackBlockActions | quote }} diff --git a/applications/templatebot/values.yaml b/applications/templatebot/values.yaml index cf65f9bab7..de5e85995b 100644 --- a/applications/templatebot/values.yaml +++ b/applications/templatebot/values.yaml @@ -34,6 +34,9 @@ config: # -- Kafka topic name for the Slack `message.im` events (direct message channels) slackMessageIm: "lsst.square-events.squarebot.slack.message.im" + # -- Kafka topic for Slack `block_actions` interaction events + slackBlockActions: "lsst.square-events.squarebot.slack.interaction.block-actions" + ingress: # -- Additional annotations for the ingress rule annotations: {} From d5a10d898e6e25086dadd2b57a8c869e9f91c4de Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Wed, 25 Sep 2024 15:31:51 -0400 Subject: [PATCH 163/193] Add Slack view submission topic for squarebot Squarebot will publish Slack view submission payload messages and templatebot will consume them. --- .../square-events/templates/squarebot-topics.yaml | 13 +++++++++++++ .../square-events/templates/squarebot-user.yaml | 9 +++++++++ .../square-events/templates/templatebot-user.yaml | 9 +++++++++ applications/squarebot/README.md | 1 + applications/squarebot/templates/configmap.yaml | 1 + applications/squarebot/values.yaml | 3 +++ applications/templatebot/README.md | 1 + applications/templatebot/templates/configmap.yaml | 1 + applications/templatebot/values.yaml | 3 +++ 9 files changed, 41 insertions(+) diff --git a/applications/sasquatch/charts/square-events/templates/squarebot-topics.yaml b/applications/sasquatch/charts/square-events/templates/squarebot-topics.yaml index 7896298b70..25eba2af35 100644 --- a/applications/sasquatch/charts/square-events/templates/squarebot-topics.yaml +++ b/applications/sasquatch/charts/square-events/templates/squarebot-topics.yaml @@ -14,6 +14,19 @@ spec: --- apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaTopic +metadata: + name: "lsst.square-events.squarebot.slack.interaction.view-submission" + labels: + strimzi.io/cluster: {{ .Values.cluster.name }} +spec: + partitions: 4 + replicas: 3 + config: + # http://kafka.apache.org/documentation/#topicconfigs + retention.ms: 1800000 # 30 minutes +--- +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaTopic metadata: name: "lsst.square-events.squarebot.slack.app.mention" labels: diff --git a/applications/sasquatch/charts/square-events/templates/squarebot-user.yaml b/applications/sasquatch/charts/square-events/templates/squarebot-user.yaml index 6353ae2784..1285a4ec6f 100644 --- a/applications/sasquatch/charts/square-events/templates/squarebot-user.yaml +++ b/applications/sasquatch/charts/square-events/templates/squarebot-user.yaml @@ -71,3 +71,12 @@ spec: operations: - "Write" - "Describe" + - resource: + type: topic + name: "lsst.square-events.squarebot.slack.interaction.view-submission" + patternType: literal + type: allow + host: "*" + operations: + - "Write" + - "Describe" diff --git a/applications/sasquatch/charts/square-events/templates/templatebot-user.yaml b/applications/sasquatch/charts/square-events/templates/templatebot-user.yaml index 0a00275bb2..580bfa028f 100644 --- a/applications/sasquatch/charts/square-events/templates/templatebot-user.yaml +++ b/applications/sasquatch/charts/square-events/templates/templatebot-user.yaml @@ -51,3 +51,12 @@ spec: operations: - "Read" - "Describe" + - resource: + type: topic + name: "lsst.square-events.squarebot.slack.interaction.view-submission" + patternType: literal + type: allow + host: "*" + operations: + - "Read" + - "Describe" diff --git a/applications/squarebot/README.md b/applications/squarebot/README.md index 68ea50f85d..8828804def 100644 --- a/applications/squarebot/README.md +++ b/applications/squarebot/README.md @@ -24,6 +24,7 @@ Squarebot feeds events from services like Slack and GitHub into the SQuaRE Event | config.topics.slackMessageGroups | string | `"lsst.square-events.squarebot.slack.message.groups"` | Kafka topic name for the Slack `message.groups` events (private channels) | | config.topics.slackMessageIm | string | `"lsst.square-events.squarebot.slack.message.im"` | Kafka topic name for the Slack `message.im` events (direct message channels) | | config.topics.slackMessageMpim | string | `"lsst.square-events.squarebot.slack.message.mpim"` | Kafka topic name for the Slack `message.mpim` events (multi-person direct messages) | +| config.topics.slackViewSubmission | string | `"lsst.square-events.squarebot.slack.interaction.view-submission"` | Kafka topic for Slack `view_submission` interaction events | | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | diff --git a/applications/squarebot/templates/configmap.yaml b/applications/squarebot/templates/configmap.yaml index 687d54aede..916c526898 100644 --- a/applications/squarebot/templates/configmap.yaml +++ b/applications/squarebot/templates/configmap.yaml @@ -15,3 +15,4 @@ data: SQUAREBOT_TOPIC_MESSAGE_IM: {{ .Values.config.topics.slackMessageIm | quote }} SQUAREBOT_TOPIC_MESSAGE_MPIM: {{ .Values.config.topics.slackMessageMpim | quote }} SQUAREBOT_TOPIC_BLOCK_ACTIONS: {{ .Values.config.topics.slackBlockActions | quote }} + SQUAREBOT_TOPIC_VIEW_SUBMISSION: {{ .Values.config.topics.slackViewSubmission | quote }} diff --git a/applications/squarebot/values.yaml b/applications/squarebot/values.yaml index 276dc85811..a59e16748f 100644 --- a/applications/squarebot/values.yaml +++ b/applications/squarebot/values.yaml @@ -109,3 +109,6 @@ config: # -- Kafka topic for Slack `block_actions` interaction events slackBlockActions: "lsst.square-events.squarebot.slack.interaction.block-actions" + + # -- Kafka topic for Slack `view_submission` interaction events + slackViewSubmission: "lsst.square-events.squarebot.slack.interaction.view-submission" diff --git a/applications/templatebot/README.md b/applications/templatebot/README.md index b0e3509ea8..fa76b28227 100644 --- a/applications/templatebot/README.md +++ b/applications/templatebot/README.md @@ -17,6 +17,7 @@ Create new projects | config.topics.slackAppMention | string | `"lsst.square-events.squarebot.slack.app.mention"` | Kafka topic name for the Slack `app_mention` events | | config.topics.slackBlockActions | string | `"lsst.square-events.squarebot.slack.interaction.block-actions"` | Kafka topic for Slack `block_actions` interaction events | | config.topics.slackMessageIm | string | `"lsst.square-events.squarebot.slack.message.im"` | Kafka topic name for the Slack `message.im` events (direct message channels) | +| config.topics.slackViewSubmission | string | `"lsst.square-events.squarebot.slack.interaction.view-submission"` | Kafka topic for Slack `view_submission` interaction events | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | diff --git a/applications/templatebot/templates/configmap.yaml b/applications/templatebot/templates/configmap.yaml index 41f0e97266..343c47e17b 100644 --- a/applications/templatebot/templates/configmap.yaml +++ b/applications/templatebot/templates/configmap.yaml @@ -12,3 +12,4 @@ data: TEMPLATEBOT_APP_MENTION_TOPIC: {{ .Values.config.topics.slackAppMention | quote }} TEMPLATEBOT_MESSAGE_IM_TOPIC: {{ .Values.config.topics.slackMessageIm | quote }} TEMPLATEBOT_BLOCK_ACTIONS_TOPIC: {{ .Values.config.topics.slackBlockActions | quote }} + TEMPLATEBOT_VIEW_SUBMISSION_TOPIC: {{ .Values.config.topics.slackViewSubmission | quote }} diff --git a/applications/templatebot/values.yaml b/applications/templatebot/values.yaml index de5e85995b..227aa85890 100644 --- a/applications/templatebot/values.yaml +++ b/applications/templatebot/values.yaml @@ -37,6 +37,9 @@ config: # -- Kafka topic for Slack `block_actions` interaction events slackBlockActions: "lsst.square-events.squarebot.slack.interaction.block-actions" + # -- Kafka topic for Slack `view_submission` interaction events + slackViewSubmission: "lsst.square-events.squarebot.slack.interaction.view-submission" + ingress: # -- Additional annotations for the ingress rule annotations: {} From 4f21b8580771444582b569290c002bfbffbadcdb Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 26 Sep 2024 18:24:18 -0400 Subject: [PATCH 164/193] Update to Squarebot 0.10.0 --- applications/squarebot/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/squarebot/Chart.yaml b/applications/squarebot/Chart.yaml index 78ee9ca608..46f43eabff 100644 --- a/applications/squarebot/Chart.yaml +++ b/applications/squarebot/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: squarebot version: 1.0.0 -appVersion: "tickets-DM-46427" +appVersion: "0.10.0" description: Squarebot feeds events from services like Slack and GitHub into the SQuaRE Events Kafka message bus running on Roundtable. Backend apps like Templatebot and Unfurlbot can subscribe to these events and take domain-specific action. type: application home: https://squarebot.lsst.io/ From 195a44002c48a65130f501d917ea262c7a570413 Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Fri, 27 Sep 2024 13:08:06 -0300 Subject: [PATCH 165/193] nightreport: fix summit site_id --- applications/nightreport/values-summit.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/nightreport/values-summit.yaml b/applications/nightreport/values-summit.yaml index 4d12a865e7..9fa3095228 100644 --- a/applications/nightreport/values-summit.yaml +++ b/applications/nightreport/values-summit.yaml @@ -3,7 +3,7 @@ image: tag: c0036 pullPolicy: Always config: - site_id: base + site_id: summit db: host: postgresdb01.cp.lsst.org global: From 0bb33e375b3406a37a5a64402a5e84a0fc39cd43 Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Fri, 27 Sep 2024 09:28:14 -0700 Subject: [PATCH 166/193] Deploy Prompt Processing 4.5.1 for LATISS. --- .../values-usdfprod-prompt-processing.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index 07426caa9c..a187cd1136 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -14,7 +14,7 @@ prompt-proto-service: image: pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. - tag: 4.5.0 + tag: 4.5.1 instrument: pipelines: From a526dbea4e2a735f172deb71941ba8e6c5d9b29f Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Fri, 27 Sep 2024 09:28:48 -0700 Subject: [PATCH 167/193] Document block IDs for LATISS Prompt Processing. LATISS has now completely moved away from human-readable block names, so the only way to identify a block is to look it up on Jira. --- .../values-usdfprod-prompt-processing.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index a187cd1136..ec4b9cc3f7 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -18,6 +18,9 @@ prompt-proto-service: instrument: pipelines: + # BLOCK-306 is photographic imaging + # BLOCK-T17 is daytime checkout + # BLOCK-271 is photon transfer curve calibrations # BLOCK-295 is the daily calibration sequence as of May 27, 2024 main: >- (survey="BLOCK-306")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml, From 4152d49d7ce5d36d747f1bb520e1c99b8a1030d9 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 10:12:59 +0000 Subject: [PATCH 168/193] chore(deps): update confluentinc/cp-kafka-rest docker tag to v7.7.1 --- applications/sasquatch/README.md | 2 +- applications/sasquatch/charts/rest-proxy/README.md | 2 +- applications/sasquatch/charts/rest-proxy/values.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index daaf651fd7..aac4c033b9 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -313,7 +313,7 @@ Rubin Observatory's telemetry service | rest-proxy.heapOptions | string | `"-Xms512M -Xmx512M"` | Kafka REST proxy JVM Heap Option | | rest-proxy.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | | rest-proxy.image.repository | string | `"confluentinc/cp-kafka-rest"` | Kafka REST proxy image repository | -| rest-proxy.image.tag | string | `"7.7.0"` | Kafka REST proxy image tag | +| rest-proxy.image.tag | string | `"7.7.1"` | Kafka REST proxy image tag | | rest-proxy.ingress.annotations | object | See `values.yaml` | Additional annotations to add to the ingress | | rest-proxy.ingress.enabled | bool | `false` | Whether to enable the ingress | | rest-proxy.ingress.hostname | string | None, must be set if ingress is enabled | Ingress hostname | diff --git a/applications/sasquatch/charts/rest-proxy/README.md b/applications/sasquatch/charts/rest-proxy/README.md index 2daa2e6d24..eea798d3ae 100644 --- a/applications/sasquatch/charts/rest-proxy/README.md +++ b/applications/sasquatch/charts/rest-proxy/README.md @@ -16,7 +16,7 @@ A subchart to deploy Confluent REST proxy for Sasquatch. | heapOptions | string | `"-Xms512M -Xmx512M"` | Kafka REST proxy JVM Heap Option | | image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | | image.repository | string | `"confluentinc/cp-kafka-rest"` | Kafka REST proxy image repository | -| image.tag | string | `"7.7.0"` | Kafka REST proxy image tag | +| image.tag | string | `"7.7.1"` | Kafka REST proxy image tag | | ingress.annotations | object | See `values.yaml` | Additional annotations to add to the ingress | | ingress.enabled | bool | `false` | Whether to enable the ingress | | ingress.hostname | string | None, must be set if ingress is enabled | Ingress hostname | diff --git a/applications/sasquatch/charts/rest-proxy/values.yaml b/applications/sasquatch/charts/rest-proxy/values.yaml index e396a6e9bf..ef0cd8cbac 100644 --- a/applications/sasquatch/charts/rest-proxy/values.yaml +++ b/applications/sasquatch/charts/rest-proxy/values.yaml @@ -11,7 +11,7 @@ image: pullPolicy: IfNotPresent # -- Kafka REST proxy image tag - tag: 7.7.0 + tag: 7.7.1 service: # -- Kafka REST proxy service port From 0ed022a321bb8c73cec979d89884942f4d60f8c1 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 27 Sep 2024 12:05:24 -0700 Subject: [PATCH 169/193] Change metric_batch_size for USDF M1M3 telegraf connector. --- applications/sasquatch/values-usdfprod.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index d3adca0d93..f210ecb710 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -182,6 +182,7 @@ telegraf-kafka-consumer: timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTM1M3" ] + metric_batch_size: 2500 debug: true m2: enabled: true From 562ebc4f66dfee40e885a41d11752b7e9e75ea24 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 27 Sep 2024 15:48:06 -0700 Subject: [PATCH 170/193] Add a named template for generating the Telegraf configmap - Use it to hash the configmap in the deployment to trigger the deployment restart --- .../templates/_helpers.tpl | 73 +++++++++++++++++++ .../templates/configmap.yaml | 71 +----------------- .../templates/deployment.yaml | 5 +- 3 files changed, 77 insertions(+), 72 deletions(-) create mode 100644 applications/sasquatch/charts/telegraf-kafka-consumer/templates/_helpers.tpl diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/_helpers.tpl b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/_helpers.tpl new file mode 100644 index 0000000000..72e8d824c3 --- /dev/null +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/_helpers.tpl @@ -0,0 +1,73 @@ +{{- define "configmap" -}} +{{- if .value.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: sasquatch-telegraf-{{ .key }} + labels: + app.kubernetes.io/name: sasquatch-telegraf + app.kubernetes.io/instance: sasquatch-telegraf-{{ .key }} + app.kubernetes.io/part-of: sasquatch +data: + telegraf.conf: |+ + [agent] + metric_batch_size = {{ default 5000 .value.metric_batch_size }} + metric_buffer_limit = {{ default 100000 .value.metric_buffer_limit }} + collection_jitter = {{ default "0s" .value.collection_jitter | quote }} + flush_interval = {{ default "10s" .value.flush_interval | quote }} + flush_jitter = {{ default "0s" .value.flush_jitter | quote }} + debug = {{ default false .value.debug }} + omit_hostname = true + + [[outputs.influxdb]] + urls = [ + {{ .influxdbUrl | quote }} + ] + database = {{ .value.database | quote }} + username = "${INFLUXDB_USER}" + password = "${INFLUXDB_PASSWORD}" + + [[outputs.influxdb]] + namepass = ["telegraf_*"] + urls = [ + {{ .influxdbUrl | quote }} + ] + database = "telegraf" + username = "${INFLUXDB_USER}" + password = "${INFLUXDB_PASSWORD}" + + [[inputs.kafka_consumer]] + brokers = [ + "sasquatch-kafka-brokers.sasquatch:9092" + ] + consumer_group = "telegraf-kafka-consumer-{{ .key }}" + sasl_mechanism = "SCRAM-SHA-512" + sasl_password = "$TELEGRAF_PASSWORD" + sasl_username = "telegraf" + data_format = "avro" + avro_schema_registry = "http://sasquatch-schema-registry.sasquatch:8081" + avro_timestamp = {{ default "private_efdStamp" .value.timestamp_field | quote }} + avro_timestamp_format = {{ default "unix" .value.timestamp_format | quote }} + avro_union_mode = {{ default "nullable" .value.union_mode | quote }} + avro_field_separator = {{ default "" .value.union_field_separator | quote }} + {{- if .value.fields }} + avro_fields = {{ .value.fields }} + {{- end }} + {{- if .value.tags }} + avro_tags = {{ .value.tags }} + {{- end }} + topic_regexps = {{ .value.topicRegexps }} + offset = {{ default "oldest" .value.offset | quote }} + precision = {{ default "1us" .value.precision | quote }} + max_processing_time = {{ default "5s" .value.max_processing_time | quote }} + consumer_fetch_default = {{ default "20MB" .value.consumer_fetch_default | quote }} + max_undelivered_messages = {{ default 10000 .value.max_undelivered_messages }} + compression_codec = {{ default 3 .value.compression_codec }} + + [[inputs.internal]] + name_prefix = "telegraf_" + collect_memstats = true + tags = { instance = "{{ .key }}" } +{{- end }} +{{- end }} diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml index 5be588773d..6f70b74961 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml @@ -1,74 +1,5 @@ {{- range $key, $value := .Values.kafkaConsumers }} -{{- if $value.enabled }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: sasquatch-telegraf-{{ $key }} - labels: - app.kubernetes.io/name: sasquatch-telegraf - app.kubernetes.io/instance: sasquatch-telegraf-{{ $key }} - app.kubernetes.io/part-of: sasquatch -data: - telegraf.conf: |+ - [agent] - metric_batch_size = {{ default 5000 $value.metric_batch_size }} - metric_buffer_limit = {{ default 100000 $value.metric_buffer_limit }} - collection_jitter = {{ default "0s" $value.collection_jitter | quote }} - flush_interval = {{ default "10s" $value.flush_interval | quote }} - flush_jitter = {{ default "0s" $value.flush_jitter | quote }} - debug = {{ default false $value.debug }} - omit_hostname = true - [[outputs.influxdb]] - urls = [ - {{ $.Values.influxdb.url | quote }} - ] - database = {{ $value.database | quote }} - username = "${INFLUXDB_USER}" - password = "${INFLUXDB_PASSWORD}" +{{ include "configmap" (dict "key" $key "value" $value "influxdbUrl" $.Values.influxdb.url ) }} - [[outputs.influxdb]] - namepass = ["telegraf_*"] - urls = [ - {{ $.Values.influxdb.url | quote }} - ] - database = "telegraf" - username = "${INFLUXDB_USER}" - password = "${INFLUXDB_PASSWORD}" - - [[inputs.kafka_consumer]] - brokers = [ - "sasquatch-kafka-brokers.sasquatch:9092" - ] - consumer_group = "telegraf-kafka-consumer-{{ $key }}" - sasl_mechanism = "SCRAM-SHA-512" - sasl_password = "$TELEGRAF_PASSWORD" - sasl_username = "telegraf" - data_format = "avro" - avro_schema_registry = "http://sasquatch-schema-registry.sasquatch:8081" - avro_timestamp = {{ default "private_efdStamp" $value.timestamp_field | quote }} - avro_timestamp_format = {{ default "unix" $value.timestamp_format | quote }} - avro_union_mode = {{ default "nullable" $value.union_mode | quote }} - avro_field_separator = {{ default "" $value.union_field_separator | quote }} - {{ with $value.fields }} - avro_fields = {{ $value.fields }} - {{ end }} - {{ with $value.tags }} - avro_tags = {{ $value.tags }} - {{ end }} - topic_regexps = {{ $value.topicRegexps }} - offset = {{ default "oldest" $value.offset | quote }} - precision = {{ default "1us" $value.precision | quote }} - max_processing_time = {{ default "5s" $value.max_processing_time | quote }} - consumer_fetch_default = {{ default "20MB" $value.consumer_fetch_default | quote }} - max_undelivered_messages = {{ default 10000 $value.max_undelivered_messages }} - compression_codec = {{ default 3 $value.compression_codec }} - - [[inputs.internal]] - name_prefix = "telegraf_" - collect_memstats = true - tags = { instance = "{{ $key }}" } - -{{- end }} {{- end }} diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/deployment.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/deployment.yaml index 5408f4f93f..f8117c8900 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/deployment.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/deployment.yaml @@ -18,10 +18,11 @@ spec: metadata: labels: app.kubernetes.io/instance: sasquatch-telegraf-{{ $key }} - {{- if $.Values.podAnnotations }} annotations: + checksum/config: {{ include "configmap" (dict "key" $key "value" $value "influxdbUrl" $.Values.influxdb.url ) | sha256sum }} + {{- if $.Values.podAnnotations }} {{- toYaml $.Values.podAnnotations | nindent 8 }} - {{- end }} + {{- end }} spec: securityContext: runAsNonRoot: true From ff7d4f9001f12c9b2e4f8026afc53259c6d119bf Mon Sep 17 00:00:00 2001 From: Valerie Becker Date: Mon, 30 Sep 2024 08:04:42 -0700 Subject: [PATCH 171/193] Add ConsDb to Tucson TestStand environment --- environments/values-tucson-teststand.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index 0bd875b947..ae6ec29038 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -11,6 +11,7 @@ namespaceLabels: applications: argo-workflows: true + consdb: true exposurelog: true mobu: true narrativelog: true From 53be7755cf78a7cbef953517ff833aba7a98064a Mon Sep 17 00:00:00 2001 From: Valerie Becker Date: Mon, 30 Sep 2024 08:11:21 -0700 Subject: [PATCH 172/193] Add configuration for ConsDb on TTS --- .../consdb/values-tucson-teststand.yaml | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 applications/consdb/values-tucson-teststand.yaml diff --git a/applications/consdb/values-tucson-teststand.yaml b/applications/consdb/values-tucson-teststand.yaml new file mode 100644 index 0000000000..21997de89d --- /dev/null +++ b/applications/consdb/values-tucson-teststand.yaml @@ -0,0 +1,21 @@ +db: + user: "oods" + host: "postgresdb01.tu.lsst.org" + database: "exposurelog" +lfa: + s3EndpointUrl: "https://s3.tu.lsst.org" +hinfo: + latiss: + enable: true + tag: "tickets-DM-44551" + logConfig: "consdb.hinfo=DEBUG" + lsstcomcam: + enable: true + tag: "tickets-DM-44551" + logConfig: "consdb.hinfo=DEBUG" + lsstcam: + enable: false + tag: "tickets-DM-44551" +pq: + image: + tag: "main" From 9328a5c70160e768593f3ec5f985dea89646df4b Mon Sep 17 00:00:00 2001 From: Valerie Becker Date: Mon, 30 Sep 2024 08:12:17 -0700 Subject: [PATCH 173/193] Add secrets update for ConsDb --- applications/consdb/secrets.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/applications/consdb/secrets.yaml b/applications/consdb/secrets.yaml index 61468589eb..1727b68e71 100644 --- a/applications/consdb/secrets.yaml +++ b/applications/consdb/secrets.yaml @@ -6,7 +6,12 @@ consdb-password: key: consdb-password oods-password: description: >- - PostgreSQL password for the OODS user Butler database. + PostgreSQL password for the OODS user Butler database. lfa-password: description: >- LFA password +exposurelog-password: + description: "Password for the TTS where we use exposurelog database." + copy: + application: exposure-log + key: exposurelog_password \ No newline at end of file From c3e6e352e9c7ec97c419b40fa726282b71bf8d4e Mon Sep 17 00:00:00 2001 From: Valerie Becker Date: Mon, 30 Sep 2024 08:13:09 -0700 Subject: [PATCH 174/193] Add secrets update for ConsDb --- applications/consdb/secrets.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/consdb/secrets.yaml b/applications/consdb/secrets.yaml index 1727b68e71..99a8f6ba13 100644 --- a/applications/consdb/secrets.yaml +++ b/applications/consdb/secrets.yaml @@ -14,4 +14,4 @@ exposurelog-password: description: "Password for the TTS where we use exposurelog database." copy: application: exposure-log - key: exposurelog_password \ No newline at end of file + key: exposurelog_password From e3052ea8294c0fe6548fe78e641437ef5908d5ef Mon Sep 17 00:00:00 2001 From: Brianna Smart Date: Mon, 16 Sep 2024 14:15:14 -0700 Subject: [PATCH 175/193] Update Kafka to 3.7, add kraft, and remove zookeeper We have updated kafka to version 3.7 and swapped to using kraft rather than zookeeper. --- applications/alert-stream-broker/README.md | 9 +- .../charts/alert-stream-broker/README.md | 8 +- .../alert-stream-broker/templates/kafka.yaml | 113 +++++++++--------- .../charts/alert-stream-broker/values.yaml | 34 ++++-- .../alert-stream-schema-registry/README.md | 1 + .../templates/schema-registry-server.yaml | 3 +- .../alert-stream-schema-registry/values.yaml | 2 + .../values-usdfdev-alert-stream-broker.yaml | 21 ++-- 8 files changed, 106 insertions(+), 85 deletions(-) diff --git a/applications/alert-stream-broker/README.md b/applications/alert-stream-broker/README.md index 7cf49c19d9..c8043c152d 100644 --- a/applications/alert-stream-broker/README.md +++ b/applications/alert-stream-broker/README.md @@ -72,12 +72,17 @@ Alert transmission to community brokers | alert-stream-broker.kafka.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Kafka brokers. | | alert-stream-broker.kafka.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. | | alert-stream-broker.kafka.version | string | `"3.4.0"` | Version of Kafka to deploy. | +| alert-stream-broker.kafkaController.enabled | bool | `false` | Enable Kafka Controller | +| alert-stream-broker.kafkaController.resources | object | See `values.yaml` | Kubernetes requests and limits for the Kafka Controller | +| alert-stream-broker.kafkaController.storage.size | string | `"20Gi"` | Size of the backing storage disk for each of the Kafka controllers | +| alert-stream-broker.kafkaController.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes | | alert-stream-broker.kafkaExporter | object | `{"enableSaramaLogging":false,"enabled":false,"groupRegex":".*","logLevel":"warning","topicRegex":".*"}` | Kafka JMX Exporter for more detailed diagnostic metrics. | | alert-stream-broker.kafkaExporter.enableSaramaLogging | bool | `false` | Enable Sarama logging | | alert-stream-broker.kafkaExporter.enabled | bool | `false` | Enable Kafka exporter. | | alert-stream-broker.kafkaExporter.groupRegex | string | `".*"` | Consumer groups to monitor | | alert-stream-broker.kafkaExporter.logLevel | string | `"warning"` | Log level for Sarama logging | | alert-stream-broker.kafkaExporter.topicRegex | string | `".*"` | Kafka topics to monitor | +| alert-stream-broker.kraft | bool | `true` | | | alert-stream-broker.maxBytesRetained | string | `"100000000000"` | Maximum number of bytes for the replay topic, per partition, per replica. Default is 100GB, but should be lower to not fill storage. | | alert-stream-broker.maxMillisecondsRetained | string | `"5259492000"` | Maximum amount of time to save alerts in the replay topic, in milliseconds. Default is 7 days (604800000). | | alert-stream-broker.nameOverride | string | `""` | | @@ -95,10 +100,8 @@ Alert transmission to community brokers | alert-stream-broker.users[0].readonlyTopics | list | `["alert-stream","alerts-simulated","alert-stream-test"]` | A list of topics that the user should get read-only access to. | | alert-stream-broker.users[0].username | string | `"rubin-testing"` | The username for the user that should be created. | | alert-stream-broker.vaultSecretsPath | string | `""` | Path to the secret resource in Vault | -| alert-stream-broker.zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. | -| alert-stream-broker.zookeeper.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Zookeeper instances. | -| alert-stream-broker.zookeeper.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. | | alert-stream-schema-registry.clusterName | string | `"alert-broker"` | Strimzi "cluster name" of the broker to use as a backend. | +| alert-stream-schema-registry.compatibilityLevel | string | `"None"` | | | alert-stream-schema-registry.hostname | string | `"usdf-alert-schemas-dev.slac.stanford.edu"` | Hostname for an ingress which sends traffic to the Schema Registry. | | alert-stream-schema-registry.name | string | `"alert-schema-registry"` | Name used by the registry, and by its users. | | alert-stream-schema-registry.port | int | `8081` | Port where the registry is listening. NOTE: Not actually configurable in strimzi-registry-operator, so this basically cannot be changed. | diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/README.md b/applications/alert-stream-broker/charts/alert-stream-broker/README.md index 4c6a0bcc4f..c44bd492cf 100644 --- a/applications/alert-stream-broker/charts/alert-stream-broker/README.md +++ b/applications/alert-stream-broker/charts/alert-stream-broker/README.md @@ -29,12 +29,17 @@ Kafka broker cluster for distributing alerts | kafka.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Kafka brokers. | | kafka.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. | | kafka.version | string | `"3.4.0"` | Version of Kafka to deploy. | +| kafkaController.enabled | bool | `false` | Enable Kafka Controller | +| kafkaController.resources | object | See `values.yaml` | Kubernetes requests and limits for the Kafka Controller | +| kafkaController.storage.size | string | `"20Gi"` | Size of the backing storage disk for each of the Kafka controllers | +| kafkaController.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes | | kafkaExporter | object | `{"enableSaramaLogging":false,"enabled":false,"groupRegex":".*","logLevel":"warning","topicRegex":".*"}` | Kafka JMX Exporter for more detailed diagnostic metrics. | | kafkaExporter.enableSaramaLogging | bool | `false` | Enable Sarama logging | | kafkaExporter.enabled | bool | `false` | Enable Kafka exporter. | | kafkaExporter.groupRegex | string | `".*"` | Consumer groups to monitor | | kafkaExporter.logLevel | string | `"warning"` | Log level for Sarama logging | | kafkaExporter.topicRegex | string | `".*"` | Kafka topics to monitor | +| kraft | bool | `true` | | | maxBytesRetained | string | `"100000000000"` | Maximum number of bytes for the replay topic, per partition, per replica. Default is 100GB, but should be lower to not fill storage. | | maxMillisecondsRetained | string | `"5259492000"` | Maximum amount of time to save alerts in the replay topic, in milliseconds. Default is 7 days (604800000). | | nameOverride | string | `""` | | @@ -52,6 +57,3 @@ Kafka broker cluster for distributing alerts | users[0].readonlyTopics | list | `["alert-stream","alerts-simulated","alert-stream-test"]` | A list of topics that the user should get read-only access to. | | users[0].username | string | `"rubin-testing"` | The username for the user that should be created. | | vaultSecretsPath | string | `""` | Path to the secret resource in Vault | -| zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. | -| zookeeper.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Zookeeper instances. | -| zookeeper.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. | diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka.yaml b/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka.yaml index a226042239..2ca5b98df1 100644 --- a/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka.yaml +++ b/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka.yaml @@ -1,7 +1,60 @@ +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaNodePool +metadata: + name: controller + labels: + strimzi.io/cluster: {{ .Values.cluster.name }} +spec: + replicas: {{ .Values.kafka.replicas }} + roles: + - controller + storage: + type: jbod + volumes: + - id: 0 + type: persistent-claim + size: {{ .Values.kafkaController.storage.size }} + class: {{ .Values.kafkaController.storage.storageClassName }} + deleteClaim: false + {{- with .Values.kafkaController.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} +--- +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaNodePool +metadata: + name: kafka + labels: + strimzi.io/cluster: {{ .Values.cluster.name }} + annotations: + strimzi.io/next-node-ids: "[0-99]" +spec: + replicas: {{ .Values.kafka.replicas }} + roles: + - broker + storage: + type: jbod + volumes: + - id: 0 + type: persistent-claim + size: {{ .Values.kafka.storage.size }} + {{- if .Values.kafka.storage.storageClassName }} + class: {{ .Values.kafka.storage.storageClassName }} + {{- end}} + deleteClaim: false + {{- with .Values.kafka.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} +--- apiVersion: kafka.strimzi.io/{{ .Values.strimziAPIVersion }} kind: Kafka metadata: name: {{ .Values.cluster.name }} + annotations: + strimzi.io/kraft: enabled + strimzi.io/node-pools: enabled spec: {{- if .Values.kafkaExporter.enabled }} kafkaExporter: @@ -85,14 +138,15 @@ spec: {{- if .Values.kafka.externalListener.brokers }} brokers: - {{- range $idx, $broker := .Values.kafka.externalListener.brokers }} - - broker: {{ $idx }} + {{- range $broker := .Values.kafka.externalListener.brokers }} + - broker: {{ $broker.broker }} loadBalancerIP: {{ $broker.ip }} advertisedHost: {{ $broker.host }} - annotations: {{ toYaml $broker.annotations | nindent 16 }} - {{- end }} + advertisedPort: 9094 + annotations: + annotations: {{ toYaml $broker.annotations | nindent 16 }} + {{- end }} {{- end }} - {{- if and (.Values.kafka.externalListener.tls.enabled) (.Values.kafka.externalListener.bootstrap.host) }} brokerCertChainAndKey: secretName: {{ .Values.cluster.name }}-external-tls @@ -114,8 +168,6 @@ spec: transaction.state.log.replication.factor: 3 transaction.state.log.min.isr: 2 message.max.bytes: 4194304 # 8 Megabytes. For testing purposes only. - log.message.format.version: {{ .Values.kafka.logMessageFormatVersion }} - inter.broker.protocol.version: {{ .Values.kafka.interBrokerProtocolVersion }} ssl.client.auth: required {{- range $key, $value := .Values.kafka.config }} {{ $key }}: {{ $value }} @@ -133,53 +185,6 @@ spec: class: {{ .Values.kafka.storage.storageClassName }} deleteClaim: false - template: - pod: - {{- if .Values.kafka.nodePool.tolerations }} - tolerations: - {{- range $tol := .Values.kafka.nodePool.tolerations }} - - key: {{ $tol.key }} - operator: "Equal" - value: {{ $tol.value }} - effect: {{ $tol.effect }} - {{- end }} - {{- end }} - - {{- if .Values.kafka.nodePool.affinities }} - affinity: - nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - {{- range $affinity := .Values.kafka.nodePool.affinities }} - - weight: 1 - preference: - matchExpressions: - - key: {{ $affinity.key }} - operator: In - values: [{{ $affinity.value }}] - {{- end }} - {{- end }} - - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: "app.kubernetes.io/name" - operator: In - values: - - kafka - topologyKey: "kubernetes.io/hostname" - - zookeeper: - replicas: {{ .Values.zookeeper.replicas }} - storage: - # Note that storage is configured per replica. If there are 3 replicas, - # each will get its own PersistentVolumeClaim for the configured size. - type: persistent-claim - size: {{ .Values.zookeeper.storage.size }} - class: {{ .Values.zookeeper.storage.storageClassName }} - deleteClaim: false - template: pod: {{- if .Values.kafka.nodePool.tolerations }} diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml b/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml index 1757660413..8c5f950fa4 100644 --- a/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml +++ b/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml @@ -121,17 +121,6 @@ users: # matches. groups: ["rubin-testing"] - -zookeeper: - # -- Number of Zookeeper replicas to run. - replicas: 3 - - storage: - # -- Size of the backing storage disk for each of the Zookeeper instances. - size: 1000Gi - # -- Name of a StorageClass to use when requesting persistent volumes. - storageClassName: standard - tls: subject: # -- Organization to use in the 'Subject' field of the broker's TLS certificate. @@ -149,6 +138,29 @@ fullnameOverride: "" nameOverride: "" +kraft: true + +kafkaController: + # -- Enable Kafka Controller + enabled: false + + storage: + # -- Size of the backing storage disk for each of the Kafka controllers + size: 20Gi + + # -- Name of a StorageClass to use when requesting persistent volumes + storageClassName: "" + + # -- Kubernetes requests and limits for the Kafka Controller + # @default -- See `values.yaml` + resources: + requests: + memory: 32Gi + cpu: "4" + limits: + memory: 64Gi + cpu: "8" + # -- Topic used to send test alerts. testTopicName: alert-stream-test diff --git a/applications/alert-stream-broker/charts/alert-stream-schema-registry/README.md b/applications/alert-stream-broker/charts/alert-stream-schema-registry/README.md index a31ce78c20..5e7df966e2 100644 --- a/applications/alert-stream-broker/charts/alert-stream-schema-registry/README.md +++ b/applications/alert-stream-broker/charts/alert-stream-schema-registry/README.md @@ -7,6 +7,7 @@ Confluent Schema Registry for managing schema versions for the Alert Stream | Key | Type | Default | Description | |-----|------|---------|-------------| | clusterName | string | `"alert-broker"` | Strimzi "cluster name" of the broker to use as a backend. | +| compatibilityLevel | string | `"None"` | | | hostname | string | `"usdf-alert-schemas-dev.slac.stanford.edu"` | Hostname for an ingress which sends traffic to the Schema Registry. | | name | string | `"alert-schema-registry"` | Name used by the registry, and by its users. | | port | int | `8081` | Port where the registry is listening. NOTE: Not actually configurable in strimzi-registry-operator, so this basically cannot be changed. | diff --git a/applications/alert-stream-broker/charts/alert-stream-schema-registry/templates/schema-registry-server.yaml b/applications/alert-stream-broker/charts/alert-stream-schema-registry/templates/schema-registry-server.yaml index f97585bec4..ce73059575 100644 --- a/applications/alert-stream-broker/charts/alert-stream-schema-registry/templates/schema-registry-server.yaml +++ b/applications/alert-stream-broker/charts/alert-stream-schema-registry/templates/schema-registry-server.yaml @@ -9,4 +9,5 @@ metadata: revision: "1" spec: strimzi-version: {{ .Values.strimziAPIVersion }} - listener: internal \ No newline at end of file + listener: internal + compatibilityLevel: none \ No newline at end of file diff --git a/applications/alert-stream-broker/charts/alert-stream-schema-registry/values.yaml b/applications/alert-stream-broker/charts/alert-stream-schema-registry/values.yaml index e77f15f03c..ab28d9c736 100644 --- a/applications/alert-stream-broker/charts/alert-stream-schema-registry/values.yaml +++ b/applications/alert-stream-broker/charts/alert-stream-schema-registry/values.yaml @@ -16,6 +16,8 @@ clusterName: alert-broker # -- Name of the topic used by the Schema Registry to store data. schemaTopic: registry-schemas +compatibilityLevel: None + # -- Hostname for an ingress which sends traffic to the Schema Registry. hostname: usdf-alert-schemas-dev.slac.stanford.edu diff --git a/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml b/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml index f779daf70c..c1aa6c112e 100644 --- a/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml +++ b/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml @@ -2,58 +2,53 @@ alert-stream-broker: cluster: name: "alert-broker" - zookeeper: - storage: - size: 1000Gi - storageClassName: wekafs--sdf-k8s01 - kafka: version: 3.7.0 - # -- Encoding version for messages, see - # https://strimzi.io/docs/operators/latest/deploying.html#ref-kafka-versions-str. - logMessageFormatVersion: 3.4 - # -- Version of the protocol for inter-broker communication, see - # https://strimzi.io/docs/operators/latest/deploying.html#ref-kafka-versions-str. - interBrokerProtocolVersion: 3.4 replicas: 6 prometheusScrapingEnabled: true # Addresses based on the state as of 2021-12-02; these were assigned by - # Google and now we're pinning them. + # Square and now we're pinning them. externalListener: tls: enabled: false bootstrap: host: usdf-alert-stream-dev.lsst.cloud - ip: "134.79.23.215" + ip: "" annotations: metallb.universe.tf/address-pool: 'sdf-dmz' brokers: - host: usdf-alert-stream-dev-broker-0.lsst.cloud ip: "134.79.23.214" + broker: 6 annotations: metallb.universe.tf/address-pool: 'sdf-dmz' - host: usdf-alert-stream-dev-broker-1.lsst.cloud ip: "134.79.23.216" + broker: 7 annotations: metallb.universe.tf/address-pool: 'sdf-dmz' - host: usdf-alert-stream-dev-broker-2.lsst.cloud ip: "134.79.23.218" + broker: 8 annotations: metallb.universe.tf/address-pool: 'sdf-dmz' - host: usdf-alert-stream-dev-broker-3.lsst.cloud ip: "134.79.23.220" + broker: 9 annotations: metallb.universe.tf/address-pool: 'sdf-dmz' - host: usdf-alert-stream-dev-broker-4.lsst.cloud ip: "134.79.23.217" + broker: 10 annotations: metallb.universe.tf/address-pool: 'sdf-dmz' - host: usdf-alert-stream-dev-broker-5.lsst.cloud ip: "134.79.23.219" + broker: 11 annotations: metallb.universe.tf/address-pool: 'sdf-dmz' From 2d5a5e0cf0a249ad43eccb9fc81d01ce8c611d91 Mon Sep 17 00:00:00 2001 From: Brianna Smart Date: Thu, 26 Sep 2024 16:40:56 -0700 Subject: [PATCH 176/193] Add LATISS topic Add LATISS topic for LATISS production alerts. --- .../templates/kafka-topics.yaml | 26 +++++++++++++++++-- .../values-usdfdev-alert-stream-broker.yaml | 14 +++++++--- 2 files changed, 35 insertions(+), 5 deletions(-) diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka-topics.yaml b/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka-topics.yaml index 98717b9a4a..26c74abe19 100644 --- a/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka-topics.yaml +++ b/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka-topics.yaml @@ -26,6 +26,7 @@ spec: cleanup.policy: "delete" retention.ms: {{ .Values.maxMillisecondsRetained }} # 7 days retention.bytes: {{ .Values.maxBytesRetained }} + compression.type: {{ .Values.topicCompression}} # The default timestamp is the creation time of the alert. # To get the ingestion rate, we need this to be the log # append time, and the header will contain the producer @@ -45,6 +46,7 @@ spec: cleanup.policy: "delete" retention.ms: {{ .Values.maxMillisecondsRetained }} # 7 days retention.bytes: {{ .Values.maxBytesRetained }} + compression.type: {{ .Values.topicCompression}} # The default timestamp is the creation time of the alert. # To get the ingestion rate, we need this to be the log # append time, and the header will contain the producer @@ -64,11 +66,31 @@ spec: cleanup.policy: "delete" retention.ms: {{ .Values.maxMillisecondsRetained }} # 7 days retention.bytes: {{ .Values.maxBytesRetained }} - compression.type: {{ .Values.devTopicCompression}} + compression.type: {{ .Values.topicCompression}} # The default timestamp is the creation time of the alert. # To get the ingestion rate, we need this to be the log # append time, and the header will contain the producer # timestamp instead message.timestamp.type: 'LogAppendTime' partitions: {{ .Values.devTopicPartitions }} - replicas: {{ .Values.devTopicReplicas }} \ No newline at end of file + replicas: {{ .Values.devTopicReplicas }} +--- +apiVersion: "kafka.strimzi.io/{{ .Values.strimziAPIVersion }}" +kind: KafkaTopic +metadata: + labels: + strimzi.io/cluster: "{{ .Values.clusterName }}" + name: "{{ .Values.latissTopicName}}" +spec: + config: + cleanup.policy: "delete" + retention.ms: {{ .Values.maxMillisecondsRetained }} # 7 days + retention.bytes: {{ .Values.maxBytesRetained }} + compression.type: {{ .Values.topicCompression}} + # The default timestamp is the creation time of the alert. + # To get the ingestion rate, we need this to be the log + # append time, and the header will contain the producer + # timestamp instead + message.timestamp.type: 'LogAppendTime' + partitions: {{ .Values.latissTopicPartitions }} + replicas: {{ .Values.latissTopicReplicas }} \ No newline at end of file diff --git a/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml b/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml index c1aa6c112e..0f56055671 100644 --- a/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml +++ b/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml @@ -10,7 +10,7 @@ alert-stream-broker: prometheusScrapingEnabled: true - # Addresses based on the state as of 2021-12-02; these were assigned by + # Addresses based on the state as of 2023; these were assigned by # Square and now we're pinning them. externalListener: tls: @@ -106,15 +106,23 @@ alert-stream-broker: groups: ["pittgoogle-idfint"] testTopicName: alert-stream-test - simulatedTopicName: alerts-simulated topicPartitions: 400 topicReplicas: 1 + + simulatedTopicName: alerts-simulated simulatedTopicPartitions: 45 simulatedTopicReplicas: 1 + devTopicName: dev-topic devTopicPartitions: 10 devTopicReplicas: 1 - devTopicCompression: lz4 + + latissTopicName: latiss-alerts + latissTopicPartitions: 45 + latissTopicReplicas: 1 + + # Compression set to snappy to balance alert packet compression speed and size. + topicCompression: snappy alert-stream-schema-registry: hostname: "usdf-alert-schemas-dev.slac.stanford.edu" From e3491f4d73b8d6ca7d54581fab3596fe7a3f82d8 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 24 Sep 2024 14:10:16 -0700 Subject: [PATCH 177/193] Summit: Update nublado mounts for LSSTComCam. --- applications/nublado/values-summit.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/applications/nublado/values-summit.yaml b/applications/nublado/values-summit.yaml index 6bede5f34d..360e229a8f 100644 --- a/applications/nublado/values-summit.yaml +++ b/applications/nublado/values-summit.yaml @@ -58,8 +58,8 @@ controller: - name: "lsstcomcam" source: type: "nfs" - serverPath: "/repo/LSSTComCam" - server: "comcam-archiver.cp.lsst.org" + serverPath: "/comcam/repo/LSSTComCam" + server: "nfs3.cp.lsst.org" - name: "lsstcam" source: type: "nfs" @@ -78,8 +78,8 @@ controller: - name: "lsstdata-comcam" source: type: "nfs" - serverPath: "/lsstdata" - server: "comcam-archiver.cp.lsst.org" + serverPath: "/comcam/lsstdata" + server: "nfs3.cp.lsst.org" - name: "lsstdata-auxtel" source: type: "nfs" @@ -93,8 +93,8 @@ controller: - name: "lsstdata-base-comcam" source: type: "nfs" - serverPath: "/lsstdata/base/comcam" - server: "comcam-archiver.cp.lsst.org" + serverPath: "/comcam/lsstdata/base/comcam" + server: "nfs3.cp.lsst.org" - name: "lsstdata-base-auxtel" source: type: "nfs" From cc608fe27e4b99055f2890e5041d02aa36e80f26 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 1 Oct 2024 10:25:19 -0700 Subject: [PATCH 178/193] Fix ComCam mounts in UWS. --- applications/uws/values-summit.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/applications/uws/values-summit.yaml b/applications/uws/values-summit.yaml index 41410dc51c..47341768f5 100644 --- a/applications/uws/values-summit.yaml +++ b/applications/uws/values-summit.yaml @@ -33,10 +33,10 @@ uws-api-server: subPath: "" readOnly: false - name: repo-comcam - server: comcam-archiver.cp.lsst.org + server: nfs3.cp.lsst.org claimName: repo-comcam-pvc mountPath: "/repo/LSSTComCam" - exportPath: "/repo/LSSTComCam" + exportPath: "/comcam/repo/LSSTComCam" subPath: "" readOnly: false - name: data-auxtel @@ -47,9 +47,9 @@ uws-api-server: subPath: "" readOnly: true - name: data-comcam - server: comcam-archiver.cp.lsst.org + server: nfs3.cp.lsst.org claimName: data-comcam-pvc mountPath: "/data/lsstdata/base/comcam" - exportPath: "/lsstdata/base/comcam" + exportPath: "/comcam/lsstdata/base/comcam" subPath: "" readOnly: true From 3c2bbdf5064adfc2600a843ae36a8ce19224c378 Mon Sep 17 00:00:00 2001 From: Adam Thornton Date: Tue, 1 Oct 2024 16:05:17 -0700 Subject: [PATCH 179/193] Bump nublado version --- applications/nublado/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/nublado/Chart.yaml b/applications/nublado/Chart.yaml index 1b0e3dad86..430008b4fc 100644 --- a/applications/nublado/Chart.yaml +++ b/applications/nublado/Chart.yaml @@ -5,7 +5,7 @@ description: JupyterHub and custom spawner for the Rubin Science Platform sources: - https://github.com/lsst-sqre/nublado home: https://nublado.lsst.io/ -appVersion: 7.0.0 +appVersion: 7.2.0 dependencies: - name: jupyterhub From 72cb2aeabf05ba66ea24fbfadc0e29177fe94f90 Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Tue, 1 Oct 2024 18:59:36 -0300 Subject: [PATCH 180/193] rapid-analysis: add application to summit environment --- applications/rapid-analysis/Chart.yaml | 4 + applications/rapid-analysis/README.md | 50 ++++ applications/rapid-analysis/secrets.yaml | 8 + .../rapid-analysis/templates/_helpers.tpl | 124 ++++++++++ .../rapid-analysis/templates/configmap.yaml | 7 + .../rapid-analysis/templates/deployment.yaml | 224 +++++++++++++++++ .../templates/gather-rollup-set.yaml | 231 ++++++++++++++++++ .../templates/gather2a-set.yaml | 231 ++++++++++++++++++ .../templates/mountpoint-pvc.yaml | 26 ++ .../templates/redis-service.yaml | 21 ++ .../templates/redis-statefulset.yaml | 90 +++++++ .../templates/vault-secret.yaml | 44 ++++ .../rapid-analysis/templates/worker-set.yaml | 231 ++++++++++++++++++ .../rapid-analysis/values-summit.yaml | 151 ++++++++++++ applications/rapid-analysis/values.yaml | 130 ++++++++++ docs/applications/rapid-analysis/index.rst | 29 +++ docs/applications/rapid-analysis/values.md | 12 + docs/applications/rubin.rst | 1 + .../applications/rubin/rapid-analysis.yaml | 34 +++ environments/values-summit.yaml | 1 + 20 files changed, 1649 insertions(+) create mode 100644 applications/rapid-analysis/Chart.yaml create mode 100644 applications/rapid-analysis/README.md create mode 100644 applications/rapid-analysis/secrets.yaml create mode 100644 applications/rapid-analysis/templates/_helpers.tpl create mode 100644 applications/rapid-analysis/templates/configmap.yaml create mode 100644 applications/rapid-analysis/templates/deployment.yaml create mode 100644 applications/rapid-analysis/templates/gather-rollup-set.yaml create mode 100644 applications/rapid-analysis/templates/gather2a-set.yaml create mode 100644 applications/rapid-analysis/templates/mountpoint-pvc.yaml create mode 100644 applications/rapid-analysis/templates/redis-service.yaml create mode 100644 applications/rapid-analysis/templates/redis-statefulset.yaml create mode 100644 applications/rapid-analysis/templates/vault-secret.yaml create mode 100644 applications/rapid-analysis/templates/worker-set.yaml create mode 100644 applications/rapid-analysis/values-summit.yaml create mode 100644 applications/rapid-analysis/values.yaml create mode 100644 docs/applications/rapid-analysis/index.rst create mode 100644 docs/applications/rapid-analysis/values.md create mode 100644 environments/templates/applications/rubin/rapid-analysis.yaml diff --git a/applications/rapid-analysis/Chart.yaml b/applications/rapid-analysis/Chart.yaml new file mode 100644 index 0000000000..c4a7da146e --- /dev/null +++ b/applications/rapid-analysis/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v2 +name: rapid-analysis +version: 1.0.0 +description: A Helm chart for deploying the Rapid Analysis services. diff --git a/applications/rapid-analysis/README.md b/applications/rapid-analysis/README.md new file mode 100644 index 0000000000..089df8b114 --- /dev/null +++ b/applications/rapid-analysis/README.md @@ -0,0 +1,50 @@ +# rapid-analysis + +A Helm chart for deploying the Rapid Analysis services. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | This specifies the scheduling constraints of the pod. | +| butlerSecret | object | `{}` | This section allows for specification of Butler secret information. If this section is used, it must contain the following attributes: _key_ (The vault key for the Butler secret), _containerPath_ (The directory location for the Butler secret), _dbUser_ (The username for the Butler backend database) | +| credentialFile | string | `""` | The name of the expected credential file for the broadcasters | +| credentialSecretsPath | string | `""` | The key for the credentials including any sub-paths. | +| env | object | `{}` | This section holds a set of key, value pairs for environmental variables (ENV_VAR: value). NOTE: RUN_ARG is taken care of by the chart using _script_. | +| envSecrets | list | `[]` | This section holds specifications for secret injection. If this section is used, each object listed must have the following attributes defined: _name_ (The label for the secret), _secretName_ (The name of the vault store reference. Uses the _namespace_ attribute to construct the full name), _secretKey_ (The key in the vault store containing the necessary secret) | +| fullnameOverride | string | `""` | Specify the deployed application name specifically. Overrides all other names. | +| gather2aSet | object | `{}` | This configures a StatefulSet used for visit-level gather processing. | +| gatherRollupSet | object | `{}` | This configures a StatefulSet used for night-summary rollup. | +| image.pullPolicy | string | `"IfNotPresent"` | The policy to apply when pulling an image for deployment. | +| image.repository | string | `"ts-dockerhub.lsst.org/rubintv-broadcaster"` | The Docker registry name for the container image. | +| image.tag | string | `"develop"` | The tag of the container image to use. | +| imagePullSecrets | list | `[]` | The list of pull secrets needed for the images. If this section is used, each object listed can have the following attributes defined: _name_ (The label identifying the pull-secret to use) | +| location | string | `""` | Provide the location where the system is running. | +| nameOverride | string | `""` | Adds an extra string to the release name. | +| namespace | string | `"rapid-analysis"` | This is the namespace where the applications will be deployed. | +| nfsMountpoint | list | `[]` | This section holds the information necessary to create a NFS mount for the container. If this section is used, each object listed can have the following attributes defined: _name_ (A label identifier for the mountpoint), _containerPath_ (The path inside the container to mount), _readOnly_ (This sets if the NFS mount is read only or read/write), _server_ (The hostname of the NFS server), _serverPath_ (The path exported by the NFS server) | +| nodeSelector | object | `{}` | This allows the specification of using specific nodes to run the pod. | +| podAnnotations | object | `{}` | This allows the specification of pod annotations. | +| pullSecretsPath | string | `""` | | +| pvcMountpoint | list | `[]` | This section holds information about existing volume claims. If the section is used, each object listed can have the following attributes defined: _name_ (The name ot the persistent volume), _containerPath_ (The path inside the container to mount), _subPath_ (persistent volume subpath, optional) | +| pvcMountpointClaim | list | `[]` | This section holds the information necessary to claim persistent volumes. If the section is used, each object listed can have the following attributes defined: _name_ (The name ot the persistent volume), _containerPath_ (The path inside the container to mount), _subPath_ (persistent volume subpath, optional) | +| redis.affinity | object | `{}` | Affinity rules for the redis pods | +| redis.enabled | bool | `false` | This specifies whether to use redis or not. | +| redis.env | object | `{}` | This section holds a set of key, value pairs for environmental variables (ENV_VAR: value). NOTE: RUN_ARG is taken care of by the chart using _script_. | +| redis.envSecrets | list | `[]` | This section holds specifications for secret injection. If this section is used, each object listed must have the following attributes defined: _name_ (The label for the secret), _secretName_ (The name of the vault store reference. Uses the _namespace_ attribute to construct the full name), _secretKey_ (The key in the vault store containing the necessary secret) | +| redis.image.pullPolicy | string | `"IfNotPresent"` | The policy to apply when pulling an image for deployment. | +| redis.image.repository | string | `"docker.io/redis"` | The Docker registry name for the redis container image. | +| redis.image.tag | string | `"latest"` | The tag of the redis container image to use. | +| redis.nodeSelector | object | `{}` | Node selection rules for the redis pods | +| redis.resources | object | `{}` | This allows the specification of resources (CPU, memory) requires to run the redis container. | +| redis.storage.classname | string | `nil` | | +| redis.storage.request | string | `"1Gi"` | The size of the storage request. | +| redis.tolerations | list | `[]` | Toleration specifications for the redis pods | +| resources | object | `{}` | This allows the specification of resources (CPU, memory) requires to run the container. | +| rubinTvSecretsPath | string | `""` | | +| scripts | object | `{}` | List of script objects to run for the broadcaster. This section MUST have the following attribute specified for each entry. _name_ (The full path for the script) The following attributes are optional _resources_ (A resource object specification) _nodeSelector_ (A node selector object specification) _tolerations_ (A list of tolerations) _affinity_ (An affinity object specification) | +| securityContext | object | `{}` | This section allows for specification of security context information. If the section is used, at least one of the following attributes must be specified. _uid_ (User id to run application as), _gid_ (Group id of the user that runs the application), _fid_ (File system context user id), | +| siteTag | string | `""` | A special tag for letting the scripts know where they are running. | +| tolerations | list | `[]` | This specifies the tolerations of the pod for any system taints. | +| vaultPrefixPath | string | `""` | The Vault prefix path | +| workerSet | object | `{}` | This configures a StatefulSet used for single frame workers. | diff --git a/applications/rapid-analysis/secrets.yaml b/applications/rapid-analysis/secrets.yaml new file mode 100644 index 0000000000..eda73c3be5 --- /dev/null +++ b/applications/rapid-analysis/secrets.yaml @@ -0,0 +1,8 @@ +redis-password: + description: >- + Password used to authenticate rubintv worker pods to their shared + redis pod. If this secret changes, both the Redis server and all + worker pods will require a restart. + generate: + type: + password diff --git a/applications/rapid-analysis/templates/_helpers.tpl b/applications/rapid-analysis/templates/_helpers.tpl new file mode 100644 index 0000000000..fe0a7eaf8e --- /dev/null +++ b/applications/rapid-analysis/templates/_helpers.tpl @@ -0,0 +1,124 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "rapid-analysis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "rapid-analysis.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "rapid-analysis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "rapid-analysis.labels" -}} +helm.sh/chart: {{ include "rapid-analysis.chart" . }} +{{ include "rapid-analysis.selectorLabels" . }} +{{- end }} + +{{/* +Script name +*/}} +{{- define "rapid-analysis.scriptName" -}} +{{- regexSplit "/" .Values.script.name -1 | last | trimSuffix ".py" | kebabcase }} +{{- end }} + +{{/* +Deployment name +*/}} +{{- define "rapid-analysis.deploymentName" -}} +{{- $name := regexSplit "/" .Values.script.name -1 | last | trimSuffix ".py" | kebabcase }} +{{- $cameraName := regexSplit "/" .Values.script.name -1 | rest | first | lower }} +{{- $camera := "" }} +{{- if eq $cameraName "auxtel" }} +{{- $camera = "at"}} +{{- else if eq $cameraName "comcam" }} +{{- $camera = "cc"}} +{{- else }} +{{- $camera = $cameraName}} +{{- end }} +{{- printf "s-%s-%s" $camera $name }} +{{- end }} + + +{{/* +Selector labels +*/}} +{{- define "rapid-analysis.selectorLabels" -}} +app.kubernetes.io/name: {{ include "rapid-analysis.deploymentName" . }} +app.kubernetes.io/instance: {{ include "rapid-analysis.name" . }} +{{- $values := regexSplit "/" .Values.script.name -1 }} +{{- if eq 1 (len $values) }} +all: misc +{{- else }} +{{- $all_label := lower (index $values 1) }} +{{- $script := index $values 2 }} +{{- if contains "Isr" $script }} +isr: {{ $all_label }} +{{- end }} +all: {{ $all_label }} +{{- if has $all_label (list "auxtel" "comcam" "bot" "ts8") }} +camera: {{ $all_label }} +{{- else }} +{{- if contains "StarTracker" $script }} +camera: startracker +{{- end }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create a default fully qualified app name for redis. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "rapid-analysis.redis.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- printf "%s-redis" .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s-redis" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Common labels - redis +*/}} +{{- define "rapid-analysis.redis.labels" -}} +helm.sh/chart: {{ include "rapid-analysis.chart" . }} +{{ include "rapid-analysis.redis.selectorLabels" . }} +{{- end }} + +{{/* +Selector labels - redis +*/}} +{{- define "rapid-analysis.redis.selectorLabels" -}} +app.kubernetes.io/name: {{ include "rapid-analysis.name" . }} +app.kubernetes.io/instance: {{ include "rapid-analysis.redis.fullname" . }} +{{- end }} diff --git a/applications/rapid-analysis/templates/configmap.yaml b/applications/rapid-analysis/templates/configmap.yaml new file mode 100644 index 0000000000..65aa6db601 --- /dev/null +++ b/applications/rapid-analysis/templates/configmap.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: env-configmap + namespace: {{ .Values.namespace }} +data: + GOOGLE_APPLICATION_CREDENTIALS: "/etc/rubintv/creds/{{ .Values.credentialFile }}" diff --git a/applications/rapid-analysis/templates/deployment.yaml b/applications/rapid-analysis/templates/deployment.yaml new file mode 100644 index 0000000000..1f5e13bd87 --- /dev/null +++ b/applications/rapid-analysis/templates/deployment.yaml @@ -0,0 +1,224 @@ +{{ range $script := .Values.scripts }} +{{ $_ := set $.Values "script" $script }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "rapid-analysis.deploymentName" $ }} + namespace: {{ $.Values.namespace }} + labels: + {{- include "rapid-analysis.labels" $ | nindent 4 }} +spec: + revisionHistoryLimit: 0 + selector: + matchLabels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 6 }} + template: + metadata: + {{- with $.Values.podAnnotations }} + annotations: + {{- toYaml $ | nindent 8 }} + {{- end }} + labels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 8 }} + spec: + {{- with $.Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $.Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if $.Values.securityContext }} + securityContext: + {{- if $.Values.securityContext.uid }} + runAsUser: {{ $.Values.securityContext.uid }} + {{- end }} + {{- if $.Values.securityContext.gid }} + runAsGroup: {{ $.Values.securityContext.gid }} + {{- end }} + {{- if $.Values.securityContext.fid }} + fsGroup: {{ $.Values.securityContext.fid }} + {{- end }} + {{- end }} + containers: + - name: {{ include "rapid-analysis.scriptName" $ }} + image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + env: + - name: RUN_ARG + value: {{ $script.name }} {{ $.Values.siteTag }} + - name: RAPID_ANALYSIS_LOCATION + value: {{ $.Values.location | upper | quote }} + {{- if or $.Values.env $.Values.envSecrets }} + {{- range $env_var, $env_value := $.Values.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- range $env := $.Values.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: PGPASSFILE + value: "{{ $.Values.butlerSecret.containerPath }}/postgres-credentials.txt" + - name: PGUSER + value: {{ $.Values.butlerSecret.dbUser | quote }} + - name: AWS_SHARED_CREDENTIALS_FILE + value: "{{ $.Values.butlerSecret.containerPath }}/aws-credentials.ini" + {{- end }} + {{- if $.Values.redis.enabled }} + - name: REDIS_HOST + value: "redis-service" + {{- if $.Values.redis.envSecrets }} + {{- range $env := $.Values.redis.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- end }} + envFrom: + - configMapRef: + name: env-configmap + volumeMounts: + - name: rubintv-creds + mountPath: "/etc/rubintv/creds" + readOnly: true + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + readOnly: {{ $values.readOnly }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + mountPath: {{ $.Values.butlerSecret.containerPath }} + {{- end }} + {{- if or $.Values.resources $script.resources }} + {{- $resources := "" }} + {{- if $script.resources }} + {{- $resources = $script.resources }} + {{- else }} + {{- $resources = $.Values.resources }} + {{- end }} + resources: + {{- toYaml $resources | nindent 12 }} + {{- end }} + {{- if $.Values.butlerSecret }} + initContainers: + - name: {{ $.Release.Name }}-butler-secret-perm-fixer + image: "alpine:latest" + command: + - "/bin/ash" + - "-c" + - | + cp -RL /secrets-raw/* /secrets + cat /secrets/aws-credentials.ini > new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + cat /secrets-rubintv/aws-credentials.ini >> new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + mv new-aws-credentials.ini /secrets/aws-credentials.ini + chown 73006:73006 /secrets/* + chmod 0600 /secrets/* + volumeMounts: + - name: {{ $.Release.Name }}-raw-butler-secret + mountPath: /secrets-raw + readOnly: true + - name: {{ $.Release.Name }}-butler-secret + mountPath: /secrets + - name: rubintv-aws-creds + mountPath: /secrets-rubintv + readOnly: true + {{- end }} + volumes: + - name: rubintv-creds + secret: + secretName: google-creds + - name: rubintv-aws-creds + secret: + secretName: rubintv-secrets + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + - name: {{ $values.name }} + nfs: + path: {{ $values.serverPath }} + readOnly: {{ $values.readOnly }} + server: {{ $values.server }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + emptyDir: {} + - name: {{ $.Release.Name }}-raw-butler-secret + secret: + secretName: butler-secret + defaultMode: 0600 + {{- end }} + {{- if or $.Values.nodeSelector $script.nodeSelector }} + {{- $nodeSelector := "" }} + {{- if $script.nodeSelector }} + {{- $nodeSelector = $script.nodeSelector }} + {{- else }} + {{- $nodeSelector = $.Values.nodeSelector }} + {{- end }} + nodeSelector: + {{- toYaml $nodeSelector | nindent 8 }} + {{- end }} + {{- if or $.Values.affinity $script.affinity }} + {{- $affinity := "" }} + {{- if $script.affinity }} + {{- $affinity = $script.affinity }} + {{- else }} + {{- $affinity = $.Values.affinity }} + {{- end }} + affinity: + {{- toYaml $affinity | nindent 8 }} + {{- end }} + {{- if or $.Values.tolerations $script.tolerations }} + {{- $tolerations := "" }} + {{- if $script.tolerations }} + {{- $tolerations = $script.tolerations }} + {{- else }} + {{- $tolerations = $.Values.tolerations }} + {{- end }} + tolerations: + {{- toYaml $tolerations | nindent 8 }} + {{- end }} +{{- end }} diff --git a/applications/rapid-analysis/templates/gather-rollup-set.yaml b/applications/rapid-analysis/templates/gather-rollup-set.yaml new file mode 100644 index 0000000000..ac8958cddf --- /dev/null +++ b/applications/rapid-analysis/templates/gather-rollup-set.yaml @@ -0,0 +1,231 @@ +{{ $_ := set $.Values "script" $.Values.gatherRollupSet }} +{{ $script := $.Values.gatherRollupSet }} +{{- if $script.name }} +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "rapid-analysis.deploymentName" $ }}-gatherrollupset + namespace: {{ $.Values.namespace }} + labels: + {{- include "rapid-analysis.labels" $ | nindent 4 }} +spec: + revisionHistoryLimit: 0 + selector: + matchLabels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 6 }} + replicas: {{ ($script.replicas | int) }} + podManagementPolicy: Parallel + template: + metadata: + {{- with $.Values.podAnnotations }} + annotations: + {{- toYaml $ | nindent 8 }} + {{- end }} + labels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 8 }} + spec: + {{- with $.Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $.Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if $.Values.securityContext }} + securityContext: + {{- if $.Values.securityContext.uid }} + runAsUser: {{ $.Values.securityContext.uid }} + {{- end }} + {{- if $.Values.securityContext.gid }} + runAsGroup: {{ $.Values.securityContext.gid }} + {{- end }} + {{- if $.Values.securityContext.fid }} + fsGroup: {{ $.Values.securityContext.fid }} + {{- end }} + {{- end }} + containers: + - name: {{ include "rapid-analysis.scriptName" $ }} + image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + env: + - name: RUN_ARG + value: {{ $script.name }} + - name: WORKER_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['statefulset.kubernetes.io/pod-name'] + - name: RAPID_ANALYSIS_LOCATION + value: {{ $.Values.location | upper | quote }} + {{- if or $.Values.env $.Values.envSecrets }} + {{- range $env_var, $env_value := $.Values.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- range $env := $.Values.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: PGPASSFILE + value: "{{ $.Values.butlerSecret.containerPath }}/postgres-credentials.txt" + - name: PGUSER + value: {{ $.Values.butlerSecret.dbUser | quote }} + - name: AWS_SHARED_CREDENTIALS_FILE + value: "{{ $.Values.butlerSecret.containerPath }}/aws-credentials.ini" + {{- end }} + {{- if $.Values.redis.enabled }} + - name: REDIS_HOST + value: "redis-service" + {{- if $.Values.redis.envSecrets }} + {{- range $env := $.Values.redis.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- end }} + envFrom: + - configMapRef: + name: env-configmap + volumeMounts: + - name: rubintv-creds + mountPath: "/etc/rubintv/creds" + readOnly: true + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + readOnly: {{ $values.readOnly }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + mountPath: {{ $.Values.butlerSecret.containerPath }} + {{- end }} + {{- if or $.Values.resources $script.resources }} + {{- $resources := "" }} + {{- if $script.resources }} + {{- $resources = $script.resources }} + {{- else }} + {{- $resources = $.Values.resources }} + {{- end }} + resources: + {{- toYaml $resources | nindent 12 }} + {{- end }} + {{- if $.Values.butlerSecret }} + initContainers: + - name: {{ $.Release.Name }}-butler-secret-perm-fixer + image: "alpine:latest" + command: + - "/bin/ash" + - "-c" + - | + cp -RL /secrets-raw/* /secrets + cat /secrets/aws-credentials.ini > new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + cat /secrets-rubintv/aws-credentials.ini >> new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + mv new-aws-credentials.ini /secrets/aws-credentials.ini + chown 73006:73006 /secrets/* + chmod 0600 /secrets/* + volumeMounts: + - name: {{ $.Release.Name }}-raw-butler-secret + mountPath: /secrets-raw + readOnly: true + - name: {{ $.Release.Name }}-butler-secret + mountPath: /secrets + - name: rubintv-aws-creds + mountPath: /secrets-rubintv + readOnly: true + {{- end }} + volumes: + - name: rubintv-creds + secret: + secretName: google-creds + - name: rubintv-aws-creds + secret: + secretName: rubintv-secrets + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + - name: {{ $values.name }} + nfs: + path: {{ $values.serverPath }} + readOnly: {{ $values.readOnly }} + server: {{ $values.server }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + emptyDir: {} + - name: {{ $.Release.Name }}-raw-butler-secret + secret: + secretName: butler-secret + defaultMode: 0600 + {{- end }} + {{- if or $.Values.nodeSelector $script.nodeSelector }} + {{- $nodeSelector := "" }} + {{- if $script.nodeSelector }} + {{- $nodeSelector = $script.nodeSelector }} + {{- else }} + {{- $nodeSelector = $.Values.nodeSelector }} + {{- end }} + nodeSelector: + {{- toYaml $nodeSelector | nindent 8 }} + {{- end }} + {{- if or $.Values.affinity $script.affinity }} + {{- $affinity := "" }} + {{- if $script.affinity }} + {{- $affinity = $script.affinity }} + {{- else }} + {{- $affinity = $.Values.affinity }} + {{- end }} + affinity: + {{- toYaml $affinity | nindent 8 }} + {{- end }} + {{- if or $.Values.tolerations $script.tolerations }} + {{- $tolerations := "" }} + {{- if $script.tolerations }} + {{- $tolerations = $script.tolerations }} + {{- else }} + {{- $tolerations = $.Values.tolerations }} + {{- end }} + tolerations: + {{- toYaml $tolerations | nindent 8 }} + {{- end }} +{{- end }} diff --git a/applications/rapid-analysis/templates/gather2a-set.yaml b/applications/rapid-analysis/templates/gather2a-set.yaml new file mode 100644 index 0000000000..2c1fdbee4f --- /dev/null +++ b/applications/rapid-analysis/templates/gather2a-set.yaml @@ -0,0 +1,231 @@ +{{ $_ := set $.Values "script" $.Values.gather2aSet }} +{{ $script := $.Values.gather2aSet }} +{{- if $script.name }} +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "rapid-analysis.deploymentName" $ }}-gather2aset + namespace: {{ $.Values.namespace }} + labels: + {{- include "rapid-analysis.labels" $ | nindent 4 }} +spec: + revisionHistoryLimit: 0 + selector: + matchLabels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 6 }} + replicas: {{ ($script.replicas | int) }} + podManagementPolicy: Parallel + template: + metadata: + {{- with $.Values.podAnnotations }} + annotations: + {{- toYaml $ | nindent 8 }} + {{- end }} + labels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 8 }} + spec: + {{- with $.Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $.Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if $.Values.securityContext }} + securityContext: + {{- if $.Values.securityContext.uid }} + runAsUser: {{ $.Values.securityContext.uid }} + {{- end }} + {{- if $.Values.securityContext.gid }} + runAsGroup: {{ $.Values.securityContext.gid }} + {{- end }} + {{- if $.Values.securityContext.fid }} + fsGroup: {{ $.Values.securityContext.fid }} + {{- end }} + {{- end }} + containers: + - name: {{ include "rapid-analysis.scriptName" $ }} + image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + env: + - name: RUN_ARG + value: {{ $script.name }} + - name: WORKER_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['statefulset.kubernetes.io/pod-name'] + - name: RAPID_ANALYSIS_LOCATION + value: {{ $.Values.location | upper | quote }} + {{- if or $.Values.env $.Values.envSecrets }} + {{- range $env_var, $env_value := $.Values.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- range $env := $.Values.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: PGPASSFILE + value: "{{ $.Values.butlerSecret.containerPath }}/postgres-credentials.txt" + - name: PGUSER + value: {{ $.Values.butlerSecret.dbUser | quote }} + - name: AWS_SHARED_CREDENTIALS_FILE + value: "{{ $.Values.butlerSecret.containerPath }}/aws-credentials.ini" + {{- end }} + {{- if $.Values.redis.enabled }} + - name: REDIS_HOST + value: "redis-service" + {{- if $.Values.redis.envSecrets }} + {{- range $env := $.Values.redis.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- end }} + envFrom: + - configMapRef: + name: env-configmap + volumeMounts: + - name: rubintv-creds + mountPath: "/etc/rubintv/creds" + readOnly: true + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + readOnly: {{ $values.readOnly }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + mountPath: {{ $.Values.butlerSecret.containerPath }} + {{- end }} + {{- if or $.Values.resources $script.resources }} + {{- $resources := "" }} + {{- if $script.resources }} + {{- $resources = $script.resources }} + {{- else }} + {{- $resources = $.Values.resources }} + {{- end }} + resources: + {{- toYaml $resources | nindent 12 }} + {{- end }} + {{- if $.Values.butlerSecret }} + initContainers: + - name: {{ $.Release.Name }}-butler-secret-perm-fixer + image: "alpine:latest" + command: + - "/bin/ash" + - "-c" + - | + cp -RL /secrets-raw/* /secrets + cat /secrets/aws-credentials.ini > new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + cat /secrets-rubintv/aws-credentials.ini >> new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + mv new-aws-credentials.ini /secrets/aws-credentials.ini + chown 73006:73006 /secrets/* + chmod 0600 /secrets/* + volumeMounts: + - name: {{ $.Release.Name }}-raw-butler-secret + mountPath: /secrets-raw + readOnly: true + - name: {{ $.Release.Name }}-butler-secret + mountPath: /secrets + - name: rubintv-aws-creds + mountPath: /secrets-rubintv + readOnly: true + {{- end }} + volumes: + - name: rubintv-creds + secret: + secretName: google-creds + - name: rubintv-aws-creds + secret: + secretName: rubintv-secrets + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + - name: {{ $values.name }} + nfs: + path: {{ $values.serverPath }} + readOnly: {{ $values.readOnly }} + server: {{ $values.server }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + emptyDir: {} + - name: {{ $.Release.Name }}-raw-butler-secret + secret: + secretName: butler-secret + defaultMode: 0600 + {{- end }} + {{- if or $.Values.nodeSelector $script.nodeSelector }} + {{- $nodeSelector := "" }} + {{- if $script.nodeSelector }} + {{- $nodeSelector = $script.nodeSelector }} + {{- else }} + {{- $nodeSelector = $.Values.nodeSelector }} + {{- end }} + nodeSelector: + {{- toYaml $nodeSelector | nindent 8 }} + {{- end }} + {{- if or $.Values.affinity $script.affinity }} + {{- $affinity := "" }} + {{- if $script.affinity }} + {{- $affinity = $script.affinity }} + {{- else }} + {{- $affinity = $.Values.affinity }} + {{- end }} + affinity: + {{- toYaml $affinity | nindent 8 }} + {{- end }} + {{- if or $.Values.tolerations $script.tolerations }} + {{- $tolerations := "" }} + {{- if $script.tolerations }} + {{- $tolerations = $script.tolerations }} + {{- else }} + {{- $tolerations = $.Values.tolerations }} + {{- end }} + tolerations: + {{- toYaml $tolerations | nindent 8 }} + {{- end }} +{{- end }} diff --git a/applications/rapid-analysis/templates/mountpoint-pvc.yaml b/applications/rapid-analysis/templates/mountpoint-pvc.yaml new file mode 100644 index 0000000000..4cf1a55df3 --- /dev/null +++ b/applications/rapid-analysis/templates/mountpoint-pvc.yaml @@ -0,0 +1,26 @@ +{{- if .Values.pvcMountpointClaim }} +{{- range $values := .Values.pvcMountpointClaim }} +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ $values.name }} + namespace: {{ $.Values.namespace }} + {{- if $values.ids }} + annotations: + {{- if $values.ids.uid }} + pv.beta.kubernetes.io/uid: "{{ $values.ids.uid }}" + {{- end }} + {{- if $values.ids.gid }} + pv.beta.kubernetes.io/gid: "{{ $values.ids.gid }}" + {{- end }} + {{- end }} +spec: + accessModes: + - {{ $values.accessMode | quote }} + resources: + requests: + storage: {{ $values.claimSize }} + storageClassName: {{ $values.name }} +{{- end }} +{{- end }} diff --git a/applications/rapid-analysis/templates/redis-service.yaml b/applications/rapid-analysis/templates/redis-service.yaml new file mode 100644 index 0000000000..0ac2c01ced --- /dev/null +++ b/applications/rapid-analysis/templates/redis-service.yaml @@ -0,0 +1,21 @@ +{{- if .Values.redis.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: redis-service + namespace: {{ .Values.namespace }} + {{- with $.Values.redis.serviceAnnotations }} + annotations: + {{- toYaml $.Values.redis.serviceAnnotations | nindent 4 }} + {{- end }} +spec: + type: LoadBalancer + internalTrafficPolicy: Cluster + selector: + app.kubernetes.io/instance: {{ include "rapid-analysis.redis.fullname" . }} + ports: + - name: redis + protocol: TCP + port: {{ .Values.redis.port }} + targetPort: {{ .Values.redis.port }} +{{- end }} diff --git a/applications/rapid-analysis/templates/redis-statefulset.yaml b/applications/rapid-analysis/templates/redis-statefulset.yaml new file mode 100644 index 0000000000..224d83c500 --- /dev/null +++ b/applications/rapid-analysis/templates/redis-statefulset.yaml @@ -0,0 +1,90 @@ +{{- if .Values.redis.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: redis + namespace: {{ .Values.namespace }} + labels: + {{- include "rapid-analysis.redis.labels" . | nindent 4 }} +spec: + serviceName: redis-service + selector: + matchLabels: + {{- include "rapid-analysis.redis.selectorLabels" . | nindent 6 }} + replicas: {{ .Values.redis.replicas | default 1 }} + template: + metadata: + labels: + {{- include "rapid-analysis.redis.selectorLabels" . | nindent 8 }} + spec: + securityContext: + fsGroup: 999 + runAsGroup: 999 + runAsNonRoot: true + runAsUser: 999 + containers: + - name: redis + image: "{{ .Values.redis.image.repository }}:{{ .Values.redis.image.tag }}" + imagePullPolicy: {{ .Values.redis.image.pullPolicy }} + command: [ "redis-server", "--appendonly", "yes", "--requirepass", "$(REDIS_PASSWORD)" ] + ports: + - containerPort: {{ .Values.redis.port }} + env: + {{- range $env_var, $env_value := .Values.redis.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- range $env := .Values.redis.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + livenessProbe: + exec: + command: + - sh + - '-c' + - 'redis-cli -h $(hostname) -a $(REDIS_PASSWORD) incr health:counter' + failureThreshold: 3 + initialDelaySeconds: 15 + periodSeconds: 60 + successThreshold: 1 + timeoutSeconds: 1 + {{- with $.Values.redis.resources }} + resources: + {{- toYaml $.Values.redis.resources | nindent 10 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + volumeMounts: + - mountPath: /data + name: data + {{- with $.Values.redis.nodeSelector }} + nodeSelector: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.redis.affinity }} + affinity: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.redis.tolerations }} + tolerations: + {{- toYaml $ | nindent 8 }} + {{- end }} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + {{- if $.Values.redis.storage.classname }} + storageClassName: {{ $.Values.redis.storage.classname }} + {{- end }} + resources: + requests: + storage: {{ $.Values.redis.storage.request }} +{{- end }} diff --git a/applications/rapid-analysis/templates/vault-secret.yaml b/applications/rapid-analysis/templates/vault-secret.yaml new file mode 100644 index 0000000000..7b3ccf0a19 --- /dev/null +++ b/applications/rapid-analysis/templates/vault-secret.yaml @@ -0,0 +1,44 @@ +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: pull-secret + namespace: {{ .Values.namespace }} + labels: + app.kubernetes.io/name: {{ include "rapid-analysis.name" . }} +spec: + path: {{ required "vaultSecretsPath must be set" .Values.global.vaultSecretsPath }}/{{ required "pullSecretsPath must be set" .Values.pullSecretsPath }} + type: kubernetes.io/dockerconfigjson +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: google-creds + namespace: {{ .Values.namespace }} + labels: + app.kubernetes.io/name: {{ include "rapid-analysis.name" . }} +spec: + path: {{ required "vaultSecretsPath must be set" .Values.global.vaultSecretsPath }}/{{ required "credentialSecretsPath must be set" .Values.credentialSecretsPath }} + type: Opaque +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: butler-secret + namespace: {{ .Values.namespace }} + labels: + app.kubernetes.io/name: {{ include "rapid-analysis.name" . }} +spec: + path: {{ required "vaultSecretsPath must be set" .Values.global.vaultSecretsPath }}/{{ required "butlerSecret.key must be set" .Values.butlerSecret.key }} + type: Opaque +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: rubintv-secrets + namespace: {{ .Values.namespace }} + labels: + app.kubernetes.io/name: {{ include "rapid-analysis.name" . }} +spec: + path: {{ required "vaultSecretsPath must be set" .Values.global.vaultSecretsPath }}/{{ required "rubinTvSecretsPath must be set" .Values.rubinTvSecretsPath }} + type: Opaque diff --git a/applications/rapid-analysis/templates/worker-set.yaml b/applications/rapid-analysis/templates/worker-set.yaml new file mode 100644 index 0000000000..ad87fbc2b8 --- /dev/null +++ b/applications/rapid-analysis/templates/worker-set.yaml @@ -0,0 +1,231 @@ +{{ $_ := set $.Values "script" $.Values.workerSet }} +{{ $script := $.Values.workerSet }} +{{- if $script.name }} +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "rapid-analysis.deploymentName" $ }}-workerset + namespace: {{ $.Values.namespace }} + labels: + {{- include "rapid-analysis.labels" $ | nindent 4 }} +spec: + revisionHistoryLimit: 0 + selector: + matchLabels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 6 }} + replicas: {{ ($script.replicas | int) }} + podManagementPolicy: Parallel + template: + metadata: + {{- with $.Values.podAnnotations }} + annotations: + {{- toYaml $ | nindent 8 }} + {{- end }} + labels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 8 }} + spec: + {{- with $.Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $.Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if $.Values.securityContext }} + securityContext: + {{- if $.Values.securityContext.uid }} + runAsUser: {{ $.Values.securityContext.uid }} + {{- end }} + {{- if $.Values.securityContext.gid }} + runAsGroup: {{ $.Values.securityContext.gid }} + {{- end }} + {{- if $.Values.securityContext.fid }} + fsGroup: {{ $.Values.securityContext.fid }} + {{- end }} + {{- end }} + containers: + - name: {{ include "rapid-analysis.scriptName" $ }} + image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + env: + - name: RUN_ARG + value: {{ $script.name }} + - name: WORKER_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['statefulset.kubernetes.io/pod-name'] + - name: RAPID_ANALYSIS_LOCATION + value: {{ $.Values.location | upper | quote }} + {{- if or $.Values.env $.Values.envSecrets }} + {{- range $env_var, $env_value := $.Values.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- range $env := $.Values.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: PGPASSFILE + value: "{{ $.Values.butlerSecret.containerPath }}/postgres-credentials.txt" + - name: PGUSER + value: {{ $.Values.butlerSecret.dbUser | quote }} + - name: AWS_SHARED_CREDENTIALS_FILE + value: "{{ $.Values.butlerSecret.containerPath }}/aws-credentials.ini" + {{- end }} + {{- if $.Values.redis.enabled }} + - name: REDIS_HOST + value: "redis-service" + {{- if $.Values.redis.envSecrets }} + {{- range $env := $.Values.redis.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- end }} + envFrom: + - configMapRef: + name: env-configmap + volumeMounts: + - name: rubintv-creds + mountPath: "/etc/rubintv/creds" + readOnly: true + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + readOnly: {{ $values.readOnly }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + mountPath: {{ $.Values.butlerSecret.containerPath }} + {{- end }} + {{- if or $.Values.resources $script.resources }} + {{- $resources := "" }} + {{- if $script.resources }} + {{- $resources = $script.resources }} + {{- else }} + {{- $resources = $.Values.resources }} + {{- end }} + resources: + {{- toYaml $resources | nindent 12 }} + {{- end }} + {{- if $.Values.butlerSecret }} + initContainers: + - name: {{ $.Release.Name }}-butler-secret-perm-fixer + image: "alpine:latest" + command: + - "/bin/ash" + - "-c" + - | + cp -RL /secrets-raw/* /secrets + cat /secrets/aws-credentials.ini > new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + cat /secrets-rubintv/aws-credentials.ini >> new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + mv new-aws-credentials.ini /secrets/aws-credentials.ini + chown 73006:73006 /secrets/* + chmod 0600 /secrets/* + volumeMounts: + - name: {{ $.Release.Name }}-raw-butler-secret + mountPath: /secrets-raw + readOnly: true + - name: {{ $.Release.Name }}-butler-secret + mountPath: /secrets + - name: rubintv-aws-creds + mountPath: /secrets-rubintv + readOnly: true + {{- end }} + volumes: + - name: rubintv-creds + secret: + secretName: google-creds + - name: rubintv-aws-creds + secret: + secretName: rubintv-secrets + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + - name: {{ $values.name }} + nfs: + path: {{ $values.serverPath }} + readOnly: {{ $values.readOnly }} + server: {{ $values.server }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + emptyDir: {} + - name: {{ $.Release.Name }}-raw-butler-secret + secret: + secretName: butler-secret + defaultMode: 0600 + {{- end }} + {{- if or $.Values.nodeSelector $script.nodeSelector }} + {{- $nodeSelector := "" }} + {{- if $script.nodeSelector }} + {{- $nodeSelector = $script.nodeSelector }} + {{- else }} + {{- $nodeSelector = $.Values.nodeSelector }} + {{- end }} + nodeSelector: + {{- toYaml $nodeSelector | nindent 8 }} + {{- end }} + {{- if or $.Values.affinity $script.affinity }} + {{- $affinity := "" }} + {{- if $script.affinity }} + {{- $affinity = $script.affinity }} + {{- else }} + {{- $affinity = $.Values.affinity }} + {{- end }} + affinity: + {{- toYaml $affinity | nindent 8 }} + {{- end }} + {{- if or $.Values.tolerations $script.tolerations }} + {{- $tolerations := "" }} + {{- if $script.tolerations }} + {{- $tolerations = $script.tolerations }} + {{- else }} + {{- $tolerations = $.Values.tolerations }} + {{- end }} + tolerations: + {{- toYaml $tolerations | nindent 8 }} + {{- end }} +{{- end }} diff --git a/applications/rapid-analysis/values-summit.yaml b/applications/rapid-analysis/values-summit.yaml new file mode 100644 index 0000000000..185b063e84 --- /dev/null +++ b/applications/rapid-analysis/values-summit.yaml @@ -0,0 +1,151 @@ +image: + repository: ts-dockerhub.lsst.org/rapid-analysis + tag: c0039 + pullPolicy: Always +location: SUMMIT +env: + DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml +scripts: +- name: summit/auxTel/runBackgroundService.py + resources: + requests: + cpu: 0.5 + memory: 4G + limits: + cpu: 1.0 + memory: 10G +- name: summit/auxTel/runButlerWatcher.py +- name: summit/auxTel/runCalibrateCcdRunner.py +- name: summit/auxTel/runImExaminer.py +- name: summit/auxTel/runIsrRunner.py +- name: summit/auxTel/runMetadataCreator.py +- name: summit/auxTel/runMetadataServer.py +- name: summit/auxTel/runMonitor.py + resources: + requests: + cpu: 0.5 + memory: 1G + limits: + cpu: 1.0 + memory: 10G +- name: summit/auxTel/runMountTorquePlotter.py +- name: summit/auxTel/runNightReporter.py +- name: summit/auxTel/runSpecExaminer.py + resources: + requests: + cpu: 0.5 + memory: 2G + limits: + cpu: 1.0 + memory: 4G +- name: summit/misc/runAllSky.py + resources: + requests: + cpu: 1.0 + memory: 4G + limits: + cpu: 2 + memory: 6G +- name: summit/misc/runStarTracker.py +- name: summit/misc/runStarTrackerCatchup.py +- name: summit/misc/runStarTrackerFast.py +- name: summit/misc/runStarTrackerMetadata.py +- name: summit/misc/runStarTrackerNightReport.py +- name: summit/misc/runStarTrackerWide.py +- name: summit/misc/runTmaTelemetry.py +- name: summit/LSSTComCam/runButlerWatcher.py +- name: summit/LSSTComCam/runHeadNode.py +- name: summit/LSSTComCam/runMetadataServer.py +- name: summit/LSSTComCam/runPlotter.py +workerSet: + name: summit/LSSTComCam/runSfmRunner.py + replicas: 36 + resources: + requests: + cpu: 1.0 + memory: 4G + limits: + cpu: 1.0 + memory: 8G +credentialFile: google_write_creds +pullSecretsPath: pull-secret +rubinTvSecretsPath: rubintv +credentialSecretsPath: rubintv-broadcaster +butlerSecret: + key: butler-secret + containerPath: /home/saluser/.lsst + dbUser: oods +imagePullSecrets: +- name: pull-secret +nfsMountpoint: +- name: auxtel-gen3-data + containerPath: /repo/LATISS + readOnly: false + server: nfs-auxtel.cp.lsst.org + serverPath: /auxtel/repo/LATISS +- name: comcam-gen3-data + containerPath: /repo/LSSTComCam + readOnly: false + server: nfs3.cp.lsst.org + serverPath: /comcam/repo/LSSTComCam +- name: auxtel-data + containerPath: /readonly/lsstdata/auxtel + readOnly: true + server: nfs-auxtel.cp.lsst.org + serverPath: /auxtel/lsstdata +- name: comcam-data + containerPath: /readonly/lsstdata/comcam + readOnly: true + server: nfs3.cp.lsst.org + serverPath: /comcam/lsstdata +- name: project-shared + containerPath: /project + readOnly: false + server: nfs1.cp.lsst.org + serverPath: /project +- name: auxtel-gen3-data-temp + containerPath: /data/lsstdata/base/auxtel + readOnly: true + server: nfs-auxtel.cp.lsst.org + serverPath: /auxtel/lsstdata/base/auxtel +- name: comcam-gen3-data-temp + containerPath: /data/lsstdata/base/comcam + readOnly: true + server: nfs3.cp.lsst.org + serverPath: /comcam/lsstdata/base/comcam +- name: allsky-data + containerPath: /data/allsky + readOnly: true + server: nfs-auxtel.cp.lsst.org + serverPath: /auxtel/allsky +- name: scratch-shared + containerPath: /scratch + readOnly: false + server: nfs1.cp.lsst.org + serverPath: /scratch/rubintv +resources: + requests: + cpu: 0.5 + memory: 1G + limits: + cpu: 1.0 + memory: 2.5G +redis: + enabled: true + port: 6379 + env: + MASTER: true + envSecrets: + - name: REDIS_PASSWORD + secretName: rubintv-secrets + secretKey: redis-password + storage: + classname: rook-ceph-block + request: 10Gi + resources: + requests: + cpu: 100m + memory: 1Gi + limits: + cpu: 1 + memory: 50Gi diff --git a/applications/rapid-analysis/values.yaml b/applications/rapid-analysis/values.yaml new file mode 100644 index 0000000000..7151ddc993 --- /dev/null +++ b/applications/rapid-analysis/values.yaml @@ -0,0 +1,130 @@ +image: + # -- The Docker registry name for the container image. + repository: ts-dockerhub.lsst.org/rubintv-broadcaster + # -- The tag of the container image to use. + tag: develop + # -- The policy to apply when pulling an image for deployment. + pullPolicy: IfNotPresent +# -- This is the namespace where the applications will be deployed. +namespace: rapid-analysis +# -- A special tag for letting the scripts know where they are running. +siteTag: "" +# -- Provide the location where the system is running. +location: "" +# -- List of script objects to run for the broadcaster. +# This section MUST have the following attribute specified for each entry. +# _name_ (The full path for the script) +# The following attributes are optional +# _resources_ (A resource object specification) +# _nodeSelector_ (A node selector object specification) +# _tolerations_ (A list of tolerations) +# _affinity_ (An affinity object specification) +scripts: {} +# -- This section holds a set of key, value pairs for environmental variables (ENV_VAR: value). +# NOTE: RUN_ARG is taken care of by the chart using _script_. +env: {} +# -- This section holds specifications for secret injection. +# If this section is used, each object listed must have the following attributes defined: +# _name_ (The label for the secret), +# _secretName_ (The name of the vault store reference. Uses the _namespace_ attribute to construct the full name), +# _secretKey_ (The key in the vault store containing the necessary secret) +envSecrets: [] +# -- The Vault prefix path +vaultPrefixPath: "" +# The key for the pull secrets including any sub-paths. +pullSecretsPath: "" +# Path for the rubin tv specific secrets vault. +rubinTvSecretsPath: "" +# -- This key allows specification of a script to override the entrypoint. +# -- The list of pull secrets needed for the images. +# If this section is used, each object listed can have the following attributes defined: +# _name_ (The label identifying the pull-secret to use) +imagePullSecrets: [] +# -- This section allows for specification of Butler secret information. +# If this section is used, it must contain the following attributes: +# _key_ (The vault key for the Butler secret), +# _containerPath_ (The directory location for the Butler secret), +# _dbUser_ (The username for the Butler backend database) +butlerSecret: {} +# -- This section holds the information necessary to create a NFS mount for the container. +# If this section is used, each object listed can have the following attributes defined: +# _name_ (A label identifier for the mountpoint), +# _containerPath_ (The path inside the container to mount), +# _readOnly_ (This sets if the NFS mount is read only or read/write), +# _server_ (The hostname of the NFS server), +# _serverPath_ (The path exported by the NFS server) +nfsMountpoint: [] +# -- This section holds information about existing volume claims. +# If the section is used, each object listed can have the following attributes defined: +# _name_ (The name ot the persistent volume), +# _containerPath_ (The path inside the container to mount), +# _subPath_ (persistent volume subpath, optional) +pvcMountpoint: [] +# -- This section holds the information necessary to claim persistent volumes. +# If the section is used, each object listed can have the following attributes defined: +# _name_ (The name ot the persistent volume), +# _containerPath_ (The path inside the container to mount), +# _subPath_ (persistent volume subpath, optional) +pvcMountpointClaim: [] +# -- The key for the credentials including any sub-paths. +credentialSecretsPath: "" +# -- The name of the expected credential file for the broadcasters +credentialFile: "" +# -- Adds an extra string to the release name. +nameOverride: "" +# -- Specify the deployed application name specifically. Overrides all other names. +fullnameOverride: "" +# -- This allows the specification of pod annotations. +podAnnotations: {} +# -- This allows the specification of resources (CPU, memory) requires to run the container. +resources: {} +# -- This allows the specification of using specific nodes to run the pod. +nodeSelector: {} +# -- This specifies the tolerations of the pod for any system taints. +tolerations: [] +# -- This specifies the scheduling constraints of the pod. +affinity: {} +# -- This section allows for specification of security context information. +# If the section is used, at least one of the following attributes must be specified. +# _uid_ (User id to run application as), +# _gid_ (Group id of the user that runs the application), +# _fid_ (File system context user id), +securityContext: {} +# -- This configures a StatefulSet used for single frame workers. +workerSet: {} +# -- This configures a StatefulSet used for visit-level gather processing. +gather2aSet: {} +# -- This configures a StatefulSet used for night-summary rollup. +gatherRollupSet: {} +redis: + # -- This specifies whether to use redis or not. + enabled: false + image: + # -- The Docker registry name for the redis container image. + repository: docker.io/redis + # -- The tag of the redis container image to use. + tag: latest + # -- The policy to apply when pulling an image for deployment. + pullPolicy: IfNotPresent + # -- This section holds a set of key, value pairs for environmental variables (ENV_VAR: value). + # NOTE: RUN_ARG is taken care of by the chart using _script_. + env: {} + # -- This section holds specifications for secret injection. + # If this section is used, each object listed must have the following attributes defined: + # _name_ (The label for the secret), + # _secretName_ (The name of the vault store reference. Uses the _namespace_ attribute to construct the full name), + # _secretKey_ (The key in the vault store containing the necessary secret) + envSecrets: [] + storage: + # str -- The storage class name for the data store request. + classname: + # -- The size of the storage request. + request: 1Gi + # -- This allows the specification of resources (CPU, memory) requires to run the redis container. + resources: {} + # -- Node selection rules for the redis pods + nodeSelector: {} + # -- Toleration specifications for the redis pods + tolerations: [] + # -- Affinity rules for the redis pods + affinity: {} diff --git a/docs/applications/rapid-analysis/index.rst b/docs/applications/rapid-analysis/index.rst new file mode 100644 index 0000000000..fe3d904d0f --- /dev/null +++ b/docs/applications/rapid-analysis/index.rst @@ -0,0 +1,29 @@ +.. px-app:: rapid-analysis + +################################################# +rapid-analysis — Real-time backend of the RubinTV +################################################# + +The Rapid Analysis Framework performes realtime analysis on data from these sources, rendering the outputs destined for RubinTV as PNGs, JPEGs, MP4s, and JSON files, which are put in S3 buckets at the summit and at USDF. +The RubinTV frontend then monitors these buckets and serves these files to users. + +At the summit, the real-time activities currently include: + +.. rst-class:: compact + +- AuxTel observing +- ComCam testing +- All sky camera observations +- StarTracker data taking on the TMA +- TMA testing activities + +.. jinja:: rapid-analysis + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values diff --git a/docs/applications/rapid-analysis/values.md b/docs/applications/rapid-analysis/values.md new file mode 100644 index 0000000000..157ad4ca2c --- /dev/null +++ b/docs/applications/rapid-analysis/values.md @@ -0,0 +1,12 @@ +```{px-app-values} rapid-analysis +``` + +# rapid-analysis Helm values reference + +Helm values reference table for the {px-app}`rapid-analysis` application. + +```{include} ../../../applications/rapid-analysis/README.md +--- +start-after: "## Values" +--- +``` diff --git a/docs/applications/rubin.rst b/docs/applications/rubin.rst index ec91711ff5..a03aad2b38 100644 --- a/docs/applications/rubin.rst +++ b/docs/applications/rubin.rst @@ -19,6 +19,7 @@ Argo CD project: ``rubin`` obsloctap/index plot-navigator/index production-tools/index + rapid-analysis/index rubintv/index rubintv-dev/index schedview-snapshot/index diff --git a/environments/templates/applications/rubin/rapid-analysis.yaml b/environments/templates/applications/rubin/rapid-analysis.yaml new file mode 100644 index 0000000000..8af8557264 --- /dev/null +++ b/environments/templates/applications/rubin/rapid-analysis.yaml @@ -0,0 +1,34 @@ +{{- if (index .Values "applications" "rapid-analysis") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "rapid-analysis" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "rapid-analysis" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "rapid-analysis" + server: "https://kubernetes.default.svc" + project: "rubin" + source: + path: "applications/rapid-analysis" + repoURL: {{ .Values.repoUrl | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} diff --git a/environments/values-summit.yaml b/environments/values-summit.yaml index 1077d688a2..ce63e3bd3f 100644 --- a/environments/values-summit.yaml +++ b/environments/values-summit.yaml @@ -14,6 +14,7 @@ applications: nightreport: true nublado: true portal: true + rapid-analysis: true rubintv: true rubintv-dev: true sasquatch: true From 7db5ced2bab4e5acb604f9d3d005797fb8de097f Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 1 Oct 2024 16:24:26 -0700 Subject: [PATCH 181/193] Makes metric_batch_size=1000 default everywhere --- applications/sasquatch/README.md | 4 ++-- .../sasquatch/charts/telegraf-kafka-consumer/README.md | 2 +- .../charts/telegraf-kafka-consumer/templates/_helpers.tpl | 2 +- .../sasquatch/charts/telegraf-kafka-consumer/values.yaml | 4 ++-- applications/sasquatch/values-base.yaml | 1 - applications/sasquatch/values-tucson-teststand.yaml | 1 - applications/sasquatch/values-usdfprod.yaml | 1 - 7 files changed, 6 insertions(+), 9 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index aac4c033b9..13cbefb735 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -419,7 +419,7 @@ Rubin Observatory's telemetry service | telegraf-kafka-consumer.kafkaConsumers.test.flush_jitter | string | "0s" | Jitter the flush interval by a random amount. This is primarily to avoid large write spikes for users running a large number of telegraf instances. | | telegraf-kafka-consumer.kafkaConsumers.test.max_processing_time | string | "5s" | Maximum processing time for a single message. | | telegraf-kafka-consumer.kafkaConsumers.test.max_undelivered_messages | int | 10000 | Maximum number of undelivered messages. Should be a multiple of metric_batch_size, setting it too low may never flush the broker's messages. | -| telegraf-kafka-consumer.kafkaConsumers.test.metric_batch_size | int | 5000 | Sends metrics to the output in batches of at most metric_batch_size metrics. | +| telegraf-kafka-consumer.kafkaConsumers.test.metric_batch_size | int | 1000 | Sends metrics to the output in batches of at most metric_batch_size metrics. | | telegraf-kafka-consumer.kafkaConsumers.test.metric_buffer_limit | int | 100000 | Caches metric_buffer_limit metrics for each output, and flushes this buffer on a successful write. This should be a multiple of metric_batch_size and could not be less than 2 times metric_batch_size. | | telegraf-kafka-consumer.kafkaConsumers.test.offset | string | `"oldest"` | Kafka consumer offset. Possible values are `oldest` and `newest`. | | telegraf-kafka-consumer.kafkaConsumers.test.precision | string | "1us" | Data precision. | @@ -456,7 +456,7 @@ Rubin Observatory's telemetry service | telegraf-kafka-consumer-oss.kafkaConsumers.test.flush_jitter | string | "0s" | Jitter the flush interval by a random amount. This is primarily to avoid large write spikes for users running a large number of telegraf instances. | | telegraf-kafka-consumer-oss.kafkaConsumers.test.max_processing_time | string | "5s" | Maximum processing time for a single message. | | telegraf-kafka-consumer-oss.kafkaConsumers.test.max_undelivered_messages | int | 10000 | Maximum number of undelivered messages. Should be a multiple of metric_batch_size, setting it too low may never flush the broker's messages. | -| telegraf-kafka-consumer-oss.kafkaConsumers.test.metric_batch_size | int | 5000 | Sends metrics to the output in batches of at most metric_batch_size metrics. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.metric_batch_size | int | 1000 | Sends metrics to the output in batches of at most metric_batch_size metrics. | | telegraf-kafka-consumer-oss.kafkaConsumers.test.metric_buffer_limit | int | 100000 | Caches metric_buffer_limit metrics for each output, and flushes this buffer on a successful write. This should be a multiple of metric_batch_size and could not be less than 2 times metric_batch_size. | | telegraf-kafka-consumer-oss.kafkaConsumers.test.offset | string | `"oldest"` | Kafka consumer offset. Possible values are `oldest` and `newest`. | | telegraf-kafka-consumer-oss.kafkaConsumers.test.precision | string | "1us" | Data precision. | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md index e361988887..893371b373 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md @@ -27,7 +27,7 @@ Telegraf is an agent written in Go for collecting, processing, aggregating, and | kafkaConsumers.test.flush_jitter | string | "0s" | Jitter the flush interval by a random amount. This is primarily to avoid large write spikes for users running a large number of telegraf instances. | | kafkaConsumers.test.max_processing_time | string | "5s" | Maximum processing time for a single message. | | kafkaConsumers.test.max_undelivered_messages | int | 10000 | Maximum number of undelivered messages. Should be a multiple of metric_batch_size, setting it too low may never flush the broker's messages. | -| kafkaConsumers.test.metric_batch_size | int | 5000 | Sends metrics to the output in batches of at most metric_batch_size metrics. | +| kafkaConsumers.test.metric_batch_size | int | 1000 | Sends metrics to the output in batches of at most metric_batch_size metrics. | | kafkaConsumers.test.metric_buffer_limit | int | 100000 | Caches metric_buffer_limit metrics for each output, and flushes this buffer on a successful write. This should be a multiple of metric_batch_size and could not be less than 2 times metric_batch_size. | | kafkaConsumers.test.offset | string | `"oldest"` | Kafka consumer offset. Possible values are `oldest` and `newest`. | | kafkaConsumers.test.precision | string | "1us" | Data precision. | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/_helpers.tpl b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/_helpers.tpl index 72e8d824c3..11dae28e5a 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/_helpers.tpl +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/_helpers.tpl @@ -12,7 +12,7 @@ metadata: data: telegraf.conf: |+ [agent] - metric_batch_size = {{ default 5000 .value.metric_batch_size }} + metric_batch_size = {{ default 1000 .value.metric_batch_size }} metric_buffer_limit = {{ default 100000 .value.metric_buffer_limit }} collection_jitter = {{ default "0s" .value.collection_jitter | quote }} flush_interval = {{ default "10s" .value.flush_interval | quote }} diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml index 28b3081941..2520358ae6 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml @@ -62,8 +62,8 @@ kafkaConsumers: # -- Sends metrics to the output in batches of at most metric_batch_size # metrics. - # @default -- 5000 - metric_batch_size: 5000 + # @default -- 1000 + metric_batch_size: 1000 # -- Caches metric_buffer_limit metrics for each output, and flushes this # buffer on a successful write. This should be a multiple of metric_batch_size diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 8bd0138b2b..6b445bdcd9 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -172,7 +172,6 @@ telegraf-kafka-consumer: database: "efd" topicRegexps: | [ "lsst.sal.MTM1M3" ] - metric_batch_size: 2500 debug: true m2: enabled: true diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 1df0bcd307..21dca61f0c 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -116,7 +116,6 @@ telegraf-kafka-consumer: database: "efd" topicRegexps: | [ "lsst.sal.MTM1M3" ] - metric_batch_size: 2500 debug: true m2: enabled: true diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index f210ecb710..d3adca0d93 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -182,7 +182,6 @@ telegraf-kafka-consumer: timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTM1M3" ] - metric_batch_size: 2500 debug: true m2: enabled: true From c54221a403c2731dd7c114f3e42ce8e34e66aaae Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 1 Oct 2024 16:26:30 -0700 Subject: [PATCH 182/193] Use image with the race condition fix everywhere - Also change pull policy to IfNotPresent --- applications/sasquatch/README.md | 12 ++++++------ .../charts/telegraf-kafka-consumer/README.md | 6 +++--- .../charts/telegraf-kafka-consumer/values.yaml | 6 +++--- applications/sasquatch/values-base.yaml | 3 --- applications/sasquatch/values-tucson-teststand.yaml | 3 --- 5 files changed, 12 insertions(+), 18 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 13cbefb735..7de173c6ca 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -403,9 +403,9 @@ Rubin Observatory's telemetry service | telegraf-kafka-consumer.enabled | bool | `false` | Wether the Telegraf Kafka Consumer is enabled | | telegraf-kafka-consumer.env | list | See `values.yaml` | Telegraf agent enviroment variables | | telegraf-kafka-consumer.envFromSecret | string | `""` | Name of the secret with values to be added to the environment. | -| telegraf-kafka-consumer.image.pullPolicy | string | `"Always"` | Image pull policy | -| telegraf-kafka-consumer.image.repo | string | `"docker.io/library/telegraf"` | Telegraf image repository | -| telegraf-kafka-consumer.image.tag | string | `"1.30.2-alpine"` | Telegraf image tag | +| telegraf-kafka-consumer.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | +| telegraf-kafka-consumer.image.repo | string | `"docker.io/lsstsqre/telegraf"` | Telegraf image repository | +| telegraf-kafka-consumer.image.tag | string | `"avro-mutex"` | Telegraf image tag | | telegraf-kafka-consumer.imagePullSecrets | list | `[]` | Secret names to use for Docker pulls | | telegraf-kafka-consumer.influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to | | telegraf-kafka-consumer.influxdb.url | string | `"http://sasquatch-influxdb.sasquatch:8086"` | URL of the InfluxDB v1 instance to write to | @@ -440,9 +440,9 @@ Rubin Observatory's telemetry service | telegraf-kafka-consumer-oss.enabled | bool | `false` | Wether the Telegraf Kafka Consumer is enabled | | telegraf-kafka-consumer-oss.env | list | See `values.yaml` | Telegraf agent enviroment variables | | telegraf-kafka-consumer-oss.envFromSecret | string | `""` | Name of the secret with values to be added to the environment. | -| telegraf-kafka-consumer-oss.image.pullPolicy | string | `"Always"` | Image pull policy | -| telegraf-kafka-consumer-oss.image.repo | string | `"docker.io/library/telegraf"` | Telegraf image repository | -| telegraf-kafka-consumer-oss.image.tag | string | `"1.30.2-alpine"` | Telegraf image tag | +| telegraf-kafka-consumer-oss.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | +| telegraf-kafka-consumer-oss.image.repo | string | `"docker.io/lsstsqre/telegraf"` | Telegraf image repository | +| telegraf-kafka-consumer-oss.image.tag | string | `"avro-mutex"` | Telegraf image tag | | telegraf-kafka-consumer-oss.imagePullSecrets | list | `[]` | Secret names to use for Docker pulls | | telegraf-kafka-consumer-oss.influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to | | telegraf-kafka-consumer-oss.influxdb.url | string | `"http://sasquatch-influxdb.sasquatch:8086"` | URL of the InfluxDB v1 instance to write to | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md index 893371b373..0be7c27bdb 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md @@ -11,9 +11,9 @@ Telegraf is an agent written in Go for collecting, processing, aggregating, and | enabled | bool | `false` | Wether the Telegraf Kafka Consumer is enabled | | env | list | See `values.yaml` | Telegraf agent enviroment variables | | envFromSecret | string | `""` | Name of the secret with values to be added to the environment. | -| image.pullPolicy | string | `"Always"` | Image pull policy | -| image.repo | string | `"docker.io/library/telegraf"` | Telegraf image repository | -| image.tag | string | `"1.30.2-alpine"` | Telegraf image tag | +| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | +| image.repo | string | `"docker.io/lsstsqre/telegraf"` | Telegraf image repository | +| image.tag | string | `"avro-mutex"` | Telegraf image tag | | imagePullSecrets | list | `[]` | Secret names to use for Docker pulls | | influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to | | influxdb.url | string | `"http://sasquatch-influxdb.sasquatch:8086"` | URL of the InfluxDB v1 instance to write to | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml index 2520358ae6..dd0fc7cb4f 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml @@ -5,13 +5,13 @@ enabled: false image: # -- Telegraf image repository - repo: "docker.io/library/telegraf" + repo: "docker.io/lsstsqre/telegraf" # -- Telegraf image tag - tag: "1.30.2-alpine" + tag: "avro-mutex" # -- Image pull policy - pullPolicy: "Always" + pullPolicy: "IfNotPresent" # -- Annotations for telegraf-kafka-consumers pods podAnnotations: {} diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 6b445bdcd9..4440c387ea 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -133,9 +133,6 @@ influxdb: telegraf-kafka-consumer: enabled: true - image: - repo: "docker.io/lsstsqre/telegraf" - tag: "avro-mutex" kafkaConsumers: auxtel: enabled: true diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 21dca61f0c..64f30615a3 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -77,9 +77,6 @@ influxdb: telegraf-kafka-consumer: enabled: true - image: - repo: "docker.io/lsstsqre/telegraf" - tag: "avro-mutex" kafkaConsumers: auxtel: enabled: true From 4f201c16336ad733d13d953876e71aa82f141b34 Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Tue, 1 Oct 2024 20:37:42 -0300 Subject: [PATCH 183/193] rapid-analysis: slight typo fix for AWS credentials that comes from rubintv vault --- applications/rapid-analysis/templates/deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/rapid-analysis/templates/deployment.yaml b/applications/rapid-analysis/templates/deployment.yaml index 1f5e13bd87..d6a44033ca 100644 --- a/applications/rapid-analysis/templates/deployment.yaml +++ b/applications/rapid-analysis/templates/deployment.yaml @@ -138,7 +138,7 @@ spec: cp -RL /secrets-raw/* /secrets cat /secrets/aws-credentials.ini > new-aws-credentials.ini printf "\n" >> new-aws-credentials.ini - cat /secrets-rubintv/aws-credentials.ini >> new-aws-credentials.ini + cat /secrets-rubintv/aws_credentials.ini >> new-aws-credentials.ini printf "\n" >> new-aws-credentials.ini mv new-aws-credentials.ini /secrets/aws-credentials.ini chown 73006:73006 /secrets/* From 8d6b15db64d27c28a9e274994f3e9088568ef453 Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Date: Wed, 2 Oct 2024 09:07:29 -0300 Subject: [PATCH 184/193] exposurelog: update ComCam nfs mounts --- applications/exposurelog/values-summit.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/exposurelog/values-summit.yaml b/applications/exposurelog/values-summit.yaml index dac0d8412a..61ba1cf848 100644 --- a/applications/exposurelog/values-summit.yaml +++ b/applications/exposurelog/values-summit.yaml @@ -1,7 +1,7 @@ config: site_id: summit - nfs_path_1: /repo/LSSTComCam # Mounted as /volume_1 - nfs_server_1: comcam-archiver.cp.lsst.org + nfs_path_1: /comcam/repo/LSSTComCam # Mounted as /volume_1 + nfs_server_1: nfs3.cp.lsst.org butler_uri_1: /volume_1 nfs_path_2: /auxtel/repo/LATISS # Mounted as /volume_2 From b77bd8f6017c9f5ac3db99e5bcd01a7892c5fed4 Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Date: Wed, 2 Oct 2024 10:55:40 -0300 Subject: [PATCH 185/193] rubintv: add application to TTS --- .../rubintv/values-tucson-teststand.yaml | 42 +++++++++++++++++++ environments/values-tucson-teststand.yaml | 1 + 2 files changed, 43 insertions(+) create mode 100644 applications/rubintv/values-tucson-teststand.yaml diff --git a/applications/rubintv/values-tucson-teststand.yaml b/applications/rubintv/values-tucson-teststand.yaml new file mode 100644 index 0000000000..64526e159d --- /dev/null +++ b/applications/rubintv/values-tucson-teststand.yaml @@ -0,0 +1,42 @@ +rubintv: + siteTag: "tucson" + separateSecrets: true + + imagePullSecrets: + - name: pull-secret + + frontend: + debug: true + env: + - name: S3_ENDPOINT_URL + value: &s3E "https://s3.rubintv.tu.lsst.org" + - name: RAPID_ANALYSIS_LOCATION + value: "TTS" + image: + tag: deploy + pullPolicy: Always + + workers: + replicas: 1 + image: + repository: ts-dockerhub.lsst.org/rapid-analysis + tag: c0037 + pullPolicy: Always + uid: 73006 + gid: 73006 + scriptsLocation: /repos/rubintv_analysis_service/scripts + script: rubintv_worker.py -a rubintv-dev -p 8080 -c /repos/rubintv_analysis_service/scripts/config-temporal.yaml + env: + - name: S3_ENDPOINT_URL + value: *s3E + - name: DAF_BUTLER_REPOSITORY_INDEX + value: "s3://rubin-summit-users/data-repos.yaml" + - name: DAF_BUTLER_REPOSITORY + value: "/sdf/group/rubin/repo/ir2/butler.yaml" + resources: + limits: + cpu: 2.0 + memory: "8Gi" + +global: + tsVaultSecretsPath: "" diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index ae6ec29038..3e47b7fcc5 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -19,6 +19,7 @@ applications: nublado: true obsenv-management: true portal: true + rubintv: true sasquatch: true squareone: true strimzi: true From a6ed075db6c5ed8c95e11b96fad132b3572e2cdb Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Date: Wed, 2 Oct 2024 11:28:37 -0300 Subject: [PATCH 186/193] rapid-analysis: add application to TTS --- .../values-tucson-teststand.yaml | 121 ++++++++++++++++++ environments/values-tucson-teststand.yaml | 1 + 2 files changed, 122 insertions(+) create mode 100644 applications/rapid-analysis/values-tucson-teststand.yaml diff --git a/applications/rapid-analysis/values-tucson-teststand.yaml b/applications/rapid-analysis/values-tucson-teststand.yaml new file mode 100644 index 0000000000..8604e12165 --- /dev/null +++ b/applications/rapid-analysis/values-tucson-teststand.yaml @@ -0,0 +1,121 @@ +image: + repository: ts-dockerhub.lsst.org/rapid-analysis + tag: c0039 + pullPolicy: Always +env: + DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml + DEPLOY_BRANCH: deploy-tts +siteTag: tts +location: TTS +scripts: +- name: summit/auxTel/runBackgroundService.py +- name: summit/auxTel/runButlerWatcher.py +- name: summit/auxTel/runCalibrateCcdRunner.py +- name: summit/auxTel/runImExaminer.py +- name: summit/auxTel/runIsrRunner.py +- name: summit/auxTel/runMetadataCreator.py +- name: summit/auxTel/runMetadataServer.py +- name: summit/auxTel/runMonitor.py +- name: summit/auxTel/runMountTorquePlotter.py +- name: summit/auxTel/runNightReporter.py +- name: summit/auxTel/runSpecExaminer.py +- name: summit/comCam/runButlerWatcher.py +- name: summit/comCam/runIsrRunner_000.py +- name: summit/comCam/runIsrRunner_001.py +- name: summit/comCam/runIsrRunner_002.py +- name: summit/comCam/runIsrRunner_003.py +- name: summit/comCam/runIsrRunner_004.py +- name: summit/comCam/runIsrRunner_005.py +- name: summit/comCam/runIsrRunner_006.py +- name: summit/comCam/runIsrRunner_007.py +- name: summit/comCam/runIsrRunner_008.py +- name: summit/comCam/runMetadataServer.py +- name: summit/comCam/runPlotter.py + resources: + requests: + cpu: 0.5 + memory: 4G + limits: + cpu: 1.0 + memory: 6G +- name: summit/misc/runTmaTelemetry.py +# TODO: remove google credentials +credentialFile: google_write_creds +vaultPrefixPath: secret/k8s_operator/tucson-teststand.lsst.codes +pullSecretsPath: pull-secret +rubinTvSecretsPath: rubintv +# TODO: remove google credentials +credentialSecretsPath: rubintv +butlerSecret: + key: butler-secret + containerPath: /home/saluser/.lsst + dbUser: oods +imagePullSecrets: +- name: pull-secret +nfsMountpoint: +- name: auxtel-gen3-data + containerPath: /repo/LATISS + readOnly: false + server: nfs-auxtel.tu.lsst.org + serverPath: /auxtel/repo/LATISS +- name: comcam-gen3-data + containerPath: /repo/LSSTComCam + readOnly: false + server: comcam-archiver.tu.lsst.org + serverPath: /repo/LSSTComCam +- name: auxtel-data + containerPath: /readonly/lsstdata/auxtel + readOnly: true + server: nfs-auxtel.tu.lsst.org + serverPath: /auxtel/lsstdata +- name: comcam-data + containerPath: /readonly/lsstdata/comcam + readOnly: true + server: comcam-archiver.tu.lsst.org + serverPath: /lsstdata +- name: project-shared + containerPath: /project + readOnly: false + server: nfs-project.tu.lsst.org + serverPath: /project +- name: auxtel-gen3-data-temp + containerPath: /data/lsstdata/TTS/auxtel + readOnly: true + server: nfs-auxtel.tu.lsst.org + serverPath: /auxtel/lsstdata/TTS/auxtel +- name: comcam-gen3-data-temp + containerPath: /data/lsstdata/TTS/comcam + readOnly: true + server: comcam-archiver.tu.lsst.org + serverPath: /lsstdata/TTS/comcam +- name: scratch-shared + containerPath: /scratch + readOnly: false + server: nfs-scratch.tu.lsst.org + serverPath: /scratch/rubintv +resources: + requests: + cpu: 0.5 + memory: 1G + limits: + cpu: 1.0 + memory: 2.5G +redis: + enabled: true + port: 6379 + env: + MASTER: true + envSecrets: + - name: REDIS_PASSWORD + secretName: rubintv-secrets + secretKey: redis-password + storage: + classname: rook-ceph-block + request: 10Gi + resources: + requests: + cpu: 100m + memory: 1Gi + limits: + cpu: 1 + memory: 50Gi diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index 3e47b7fcc5..6554e28ab3 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -19,6 +19,7 @@ applications: nublado: true obsenv-management: true portal: true + rapid-analysis: true rubintv: true sasquatch: true squareone: true From 35ba2224aac75ddbf2fdcba64446deb669fd2dcf Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 1 Oct 2024 16:49:31 -0700 Subject: [PATCH 187/193] Merge eas and auxtel connectors back - They were split in DM-46203 to alleviate the race condition found in the Telegraf Avro parsers. This is fixed in the new image. --- applications/sasquatch/values-usdfprod.yaml | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index d3adca0d93..9e02f4ea5e 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -167,14 +167,7 @@ telegraf-kafka-consumer: database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | - [ "lsst.sal.ESS" ] - debug: true - eas2: - enabled: true - database: "efd" - timestamp_field: "private_efdStamp" - topicRegexps: | - [ "lsst.sal.DIMM", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] + [ "lsst.sal.DIMM", "lsst.sal.ESS", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] debug: true m1m3: enabled: true @@ -251,14 +244,7 @@ telegraf-kafka-consumer: database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | - [ "lsst.sal.ATAOS", "lsst.sal.ATDome", "lsst.sal.ATDomeTrajectory" ] - debug: true - auxtel2: - enabled: true - database: "efd" - timestamp_field: "private_efdStamp" - topicRegexps: | - [ "lsst.sal.ATHexapod", "lsst.sal.ATPneumatics", "lsst.sal.ATPtg", "lsst.sal.ATMCS" ] + [ "lsst.sal.ATAOS", "lsst.sal.ATDome", "lsst.sal.ATDomeTrajectory", "lsst.sal.ATHexapod", "lsst.sal.ATPneumatics", "lsst.sal.ATPtg", "lsst.sal.ATMCS" ] debug: true latiss: enabled: true From 1edc4677d27c0c2bf96bd7618539e42bb12edbe3 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Wed, 2 Oct 2024 12:45:52 -0700 Subject: [PATCH 188/193] gmegias usdf-rsp-dev argocd --- applications/argocd/values-usdfdev.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/argocd/values-usdfdev.yaml b/applications/argocd/values-usdfdev.yaml index a58a82f77e..effe6126c4 100644 --- a/applications/argocd/values-usdfdev.yaml +++ b/applications/argocd/values-usdfdev.yaml @@ -59,6 +59,7 @@ argo-cd: g, spothi@slac.stanford.edu, role:developer g, bbrond@slac.stanford.edu, role:developer g, vbecker@slac.stanford.edu, role:developer + g, gmegias@slac.stanford.edu, role:developer scopes: "[email]" server: From b17071ac599a11db4ab017b78936119c803032e7 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Wed, 2 Oct 2024 13:01:29 -0700 Subject: [PATCH 189/193] salnikov usdf-rsp-dev argocd --- applications/argocd/values-usdfdev.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/argocd/values-usdfdev.yaml b/applications/argocd/values-usdfdev.yaml index effe6126c4..3a05ea872d 100644 --- a/applications/argocd/values-usdfdev.yaml +++ b/applications/argocd/values-usdfdev.yaml @@ -60,6 +60,7 @@ argo-cd: g, bbrond@slac.stanford.edu, role:developer g, vbecker@slac.stanford.edu, role:developer g, gmegias@slac.stanford.edu, role:developer + g, salnikov@slac.stanford.edu, role:developer scopes: "[email]" server: From 8085525817e017819ae8797904eab23020f6580e Mon Sep 17 00:00:00 2001 From: Jeremy McCormick Date: Thu, 26 Sep 2024 12:36:59 -0500 Subject: [PATCH 190/193] Add initial version of ppdb-replication application Add the initial version of an application for replicating data between the APDB and the PPDB. --- applications/ppdb-replication/.helmignore | 23 ++++ applications/ppdb-replication/Chart.yaml | 8 ++ applications/ppdb-replication/README.md | 44 +++++++ applications/ppdb-replication/secrets.yaml | 9 ++ .../ppdb-replication/templates/_helpers.tpl | 44 +++++++ .../ppdb-replication/templates/configmap.yaml | 19 +++ .../templates/deployment.yaml | 89 +++++++++++++ .../ppdb-replication/templates/ingress.yaml | 30 +++++ .../templates/networkpolicy.yaml | 21 ++++ .../ppdb-replication/templates/pvc.yaml | 18 +++ .../ppdb-replication/templates/service.yaml | 15 +++ .../templates/vault-secrets.yaml | 9 ++ .../ppdb-replication/values-usdfdev.yaml | 44 +++++++ applications/ppdb-replication/values.yaml | 118 ++++++++++++++++++ docs/applications/ppdb-replication/index.rst | 19 +++ docs/applications/ppdb-replication/values.md | 12 ++ docs/applications/rsp.rst | 1 + docs/applications/rubin.rst | 1 + environments/README.md | 1 + .../applications/rubin/ppdb-replication.yaml | 34 +++++ environments/values-usdfdev.yaml | 1 + environments/values.yaml | 3 + 22 files changed, 563 insertions(+) create mode 100644 applications/ppdb-replication/.helmignore create mode 100644 applications/ppdb-replication/Chart.yaml create mode 100644 applications/ppdb-replication/README.md create mode 100644 applications/ppdb-replication/secrets.yaml create mode 100644 applications/ppdb-replication/templates/_helpers.tpl create mode 100644 applications/ppdb-replication/templates/configmap.yaml create mode 100644 applications/ppdb-replication/templates/deployment.yaml create mode 100644 applications/ppdb-replication/templates/ingress.yaml create mode 100644 applications/ppdb-replication/templates/networkpolicy.yaml create mode 100644 applications/ppdb-replication/templates/pvc.yaml create mode 100644 applications/ppdb-replication/templates/service.yaml create mode 100644 applications/ppdb-replication/templates/vault-secrets.yaml create mode 100644 applications/ppdb-replication/values-usdfdev.yaml create mode 100644 applications/ppdb-replication/values.yaml create mode 100644 docs/applications/ppdb-replication/index.rst create mode 100644 docs/applications/ppdb-replication/values.md create mode 100644 environments/templates/applications/rubin/ppdb-replication.yaml diff --git a/applications/ppdb-replication/.helmignore b/applications/ppdb-replication/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/applications/ppdb-replication/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/applications/ppdb-replication/Chart.yaml b/applications/ppdb-replication/Chart.yaml new file mode 100644 index 0000000000..1dd8dce332 --- /dev/null +++ b/applications/ppdb-replication/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v2 +appVersion: 0.1.0 +description: Replicates data from the APDB to the PPDB +name: ppdb-replication +sources: +- https://github.com/lsst/dax_ppdb.git +type: application +version: 1.0.0 diff --git a/applications/ppdb-replication/README.md b/applications/ppdb-replication/README.md new file mode 100644 index 0000000000..5598cba6d9 --- /dev/null +++ b/applications/ppdb-replication/README.md @@ -0,0 +1,44 @@ +# ppdb-replication + +Replicates data from the APDB to the PPDB + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the ppdb-replication deployment pod | +| config.additionalS3ProfileName | string | `nil` | Additional S3 profile name | +| config.additionalS3ProfileUrl | string | `nil` | Additional S3 profile URL | +| config.apdbConfig | string | `nil` | APDB config file resource | +| config.apdbIndexUri | string | `nil` | APDB index URI | +| config.checkInterval | string | `nil` | Time to wait before checking for new chunks, if no chunk appears | +| config.disableBucketValidation | int | `1` | Disable bucket validation in LSST S3 tools | +| config.logLevel | string | `"INFO"` | Logging level | +| config.logProfile | string | `"production"` | Logging profile (`production` for JSON, `development` for human-friendly) | +| config.maxWaitTime | string | `nil` | Maximum time to wait before replicating a chunk after next chunk appears | +| config.minWaitTime | string | `nil` | Minimum time to wait before replicating a chunk after next chunk appears | +| config.monLogger | string | `"lsst.dax.ppdb.monitor"` | Name of logger for monitoring | +| config.monRules | string | `nil` | Comma-separated list of monitoring filter rules | +| config.pathPrefix | string | `"/ppdb-replication"` | URL path prefix | +| config.persistentVolumeClaims | list | `[]` | Persistent volume claims | +| config.ppdbConfig | string | `nil` | PPDB config file resource | +| config.s3EndpointUrl | string | `nil` | S3 endpoint URL | +| config.updateExisting | bool | `false` | Allow updates to already replicated data | +| config.volumeMounts | list | `[]` | Volume mounts | +| config.volumes | list | `[]` | Volumes specific to the environment | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"Always"` | Pull policy for the ppdb-replication image | +| image.repository | string | `"ghcr.io/lsst/ppdb-replication"` | Image to use in the ppdb-replication deployment | +| image.tag | string | The appVersion of the chart | Tag of image to use | +| ingress.annotations | object | `{}` | Additional annotations for the ingress rule | +| nodeSelector | object | `{}` | Node selection rules for the ppdb-replication deployment pod | +| podAnnotations | object | `{}` | Annotations for the ppdb-replication deployment pod | +| replicaCount | int | `1` | Number of deployment pods to start | +| resources | object | see `values.yaml` | Resource limits and requests for the ppdb-replication deployment pod | +| tolerations | list | `[]` | Tolerations for the ppdb-replication deployment pod | diff --git a/applications/ppdb-replication/secrets.yaml b/applications/ppdb-replication/secrets.yaml new file mode 100644 index 0000000000..92474ab2c3 --- /dev/null +++ b/applications/ppdb-replication/secrets.yaml @@ -0,0 +1,9 @@ +"aws-credentials.ini": + description: >- + AWS credentials required for acessing configuration files in S3. +"db-auth.yaml": + description: >- + Cassandra database credentials for the APDB. +"postgres-credentials.txt": + description: >- + PostgreSQL credentials in its pgpass format for the PPDB database. diff --git a/applications/ppdb-replication/templates/_helpers.tpl b/applications/ppdb-replication/templates/_helpers.tpl new file mode 100644 index 0000000000..47bdc59cfe --- /dev/null +++ b/applications/ppdb-replication/templates/_helpers.tpl @@ -0,0 +1,44 @@ +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ppdb-replication.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "ppdb-replication.labels" -}} +helm.sh/chart: {{ include "ppdb-replication.chart" . }} +{{ include "ppdb-replication.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "ppdb-replication.selectorLabels" -}} +app.kubernetes.io/name: "ppdb-replication" +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "ppdb-replication.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/applications/ppdb-replication/templates/configmap.yaml b/applications/ppdb-replication/templates/configmap.yaml new file mode 100644 index 0000000000..a66bacce2c --- /dev/null +++ b/applications/ppdb-replication/templates/configmap.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "ppdb-replication" + labels: + {{- include "ppdb-replication.labels" . | nindent 4 }} +data: + DAX_APDB_INDEX_URI: {{ .Values.config.apdbIndexUri | quote }} + PPDB_REPLICATION_LOG_LEVEL: {{ .Values.config.logLevel | quote }} + PPDB_REPLICATION_PATH_PREFIX: {{ .Values.config.pathPrefix | quote }} + PPDB_REPLICATION_PROFILE: {{ .Values.config.logProfile | quote }} + PPDB_REPLICATION_APDB_CONFIG: {{ .Values.config.apdbConfig | quote }} + PPDB_REPLICATION_PPDB_CONFIG: {{ .Values.config.ppdbConfig | quote }} + PPDB_REPLICATION_MON_LOGGER: {{ .Values.config.monLogger | quote }} + PPDB_REPLICATION_MON_RULES: {{ .Values.config.monRules | quote }} + PPDB_REPLICATION_UPDATE_EXISTING: {{ .Values.config.updateExisting | quote}} + PPDB_REPLICATION_MIN_WAIT_TIME: {{ .Values.config.minWaitTime | quote }} + PPDB_REPLICATION_MAX_WAIT_TIME: {{ .Values.config.maxWaitTime | quote }} + PPDB_REPLICATION_CHECK_INTERVAL: {{ .Values.config.checkInterval | quote}} diff --git a/applications/ppdb-replication/templates/deployment.yaml b/applications/ppdb-replication/templates/deployment.yaml new file mode 100644 index 0000000000..454ec56b56 --- /dev/null +++ b/applications/ppdb-replication/templates/deployment.yaml @@ -0,0 +1,89 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "ppdb-replication.fullname" . }} + labels: + {{- include "ppdb-replication.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "ppdb-replication.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "ppdb-replication.selectorLabels" . | nindent 8 }} + annotations: + # Force the pod to restart when the config maps are updated. + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + volumes: + - name: "ppdb-replication-secrets-raw" + secret: + secretName: {{ include "ppdb-replication.fullname" . }} + - name: "ppdb-replication-secrets" + emptyDir: + sizeLimit: "100Mi" + {{- with .Values.config.volumes }} + {{- . | toYaml | nindent 8 }} + {{- end }} + initContainers: + - name: fix-secret-permissions + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - "/bin/sh" + - "-c" + - | + cp -RL /tmp/ppdb-replication-secrets-raw/* /app/secrets/ + chmod 0400 /app/secrets/* + securityContext: + runAsNonRoot: false + runAsUser: 0 + runAsGroup: 0 + volumeMounts: + - name: "ppdb-replication-secrets" + mountPath: "/app/secrets" + - name: "ppdb-replication-secrets-raw" + mountPath: "/tmp/ppdb-replication-secrets-raw" + readOnly: true + containers: + - name: {{ .Chart.Name }} + envFrom: + - configMapRef: + name: "ppdb-replication" + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: AWS_SHARED_CREDENTIALS_FILE + value: "/app/secrets/aws-credentials.ini" + - name: PGPASSFILE + value: "/app/secrets/postgres-credentials.txt" + - name: LSST_DB_AUTH + value: "/app/secrets/db-auth.yaml" + - name: S3_ENDPOINT_URL + value: {{ .Values.config.s3EndpointUrl | quote }} + - name: LSST_RESOURCES_S3_PROFILE_{{ .Values.config.additionalS3ProfileName }} + value: {{ .Values.config.additionalS3ProfileUrl | quote }} + - name: LSST_DISABLE_BUCKET_VALIDATION + value: {{ .Values.config.disableBucketValidation | quote }} + volumeMounts: + - name: "ppdb-replication-secrets" + mountPath: "/app/secrets" + readOnly: true + {{- with .Values.config.volumeMounts }} + {{- . | toYaml | nindent 12 }} + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/applications/ppdb-replication/templates/ingress.yaml b/applications/ppdb-replication/templates/ingress.yaml new file mode 100644 index 0000000000..381bce084c --- /dev/null +++ b/applications/ppdb-replication/templates/ingress.yaml @@ -0,0 +1,30 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "ppdb-replication" + labels: + {{- include "ppdb-replication.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + all: + - "read:image" +template: + metadata: + name: "ppdb-replication" + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: {{ .Values.config.pathPrefix | quote }} + pathType: "Prefix" + backend: + service: + name: "ppdb-replication" + port: + number: 8080 diff --git a/applications/ppdb-replication/templates/networkpolicy.yaml b/applications/ppdb-replication/templates/networkpolicy.yaml new file mode 100644 index 0000000000..10ddf62820 --- /dev/null +++ b/applications/ppdb-replication/templates/networkpolicy.yaml @@ -0,0 +1,21 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "ppdb-replication" +spec: + podSelector: + matchLabels: + {{- include "ppdb-replication.selectorLabels" . | nindent 6 }} + policyTypes: + - "Ingress" + ingress: + # Allow inbound access from pods (in any namespace) labeled + # gafaelfawr.lsst.io/ingress: true. + - from: + - namespaceSelector: {} + podSelector: + matchLabels: + gafaelfawr.lsst.io/ingress: "true" + ports: + - protocol: "TCP" + port: 8080 diff --git a/applications/ppdb-replication/templates/pvc.yaml b/applications/ppdb-replication/templates/pvc.yaml new file mode 100644 index 0000000000..52af2db47b --- /dev/null +++ b/applications/ppdb-replication/templates/pvc.yaml @@ -0,0 +1,18 @@ +{{- if .Values.config.persistentVolumeClaims }} +{{- $top := . -}} +{{- range $index, $pvc := .Values.config.persistentVolumeClaims }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "{{ $pvc.name }}" +spec: + storageClassName: "{{ $pvc.storageClassName }}" + accessModes: + - ReadOnlyMany + resources: + requests: + storage: 100Mi +{{- end }} +{{- end }} + diff --git a/applications/ppdb-replication/templates/service.yaml b/applications/ppdb-replication/templates/service.yaml new file mode 100644 index 0000000000..27b726bc7b --- /dev/null +++ b/applications/ppdb-replication/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: "ppdb-replication" + labels: + {{- include "ppdb-replication.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - port: 8080 + targetPort: "http" + protocol: "TCP" + name: "http" + selector: + {{- include "ppdb-replication.selectorLabels" . | nindent 4 }} diff --git a/applications/ppdb-replication/templates/vault-secrets.yaml b/applications/ppdb-replication/templates/vault-secrets.yaml new file mode 100644 index 0000000000..96c228968f --- /dev/null +++ b/applications/ppdb-replication/templates/vault-secrets.yaml @@ -0,0 +1,9 @@ +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: {{ include "ppdb-replication.fullname" . }} + labels: + {{- include "ppdb-replication.labels" . | nindent 4 }} +spec: + path: "{{ .Values.global.vaultSecretsPath }}/ppdb-replication" + type: Opaque diff --git a/applications/ppdb-replication/values-usdfdev.yaml b/applications/ppdb-replication/values-usdfdev.yaml new file mode 100644 index 0000000000..b373b91d38 --- /dev/null +++ b/applications/ppdb-replication/values-usdfdev.yaml @@ -0,0 +1,44 @@ +config: + + # -- Logging level + logLevel: "INFO" + + # -- Logging profile (`production` for JSON, `development` for + # human-friendly) + logProfile: "development" + + # -- APDB config file resource + apdbConfig: "label:pp-prod:lsstcomcamsim-or4" + + # -- PPDB config file resource + ppdbConfig: "/sdf/group/rubin/user/jeremym/ppdb-replication/config/ppdb-replication-test-1.yaml" + + # -- APDB index URI + apdbIndexUri: "/sdf/group/rubin/shared/apdb_config/apdb-index.yaml" + + # -- S3 endpoint URL + s3EndpointUrl: https://s3dfrgw.slac.stanford.edu + + # -- S3 profile name for additional S3 profile + additionalS3ProfileName: "embargo" + + # -- S3 profile URL for additional S3 profile + additionalS3ProfileUrl: "https://sdfembs3.sdf.slac.stanford.edu" + + volumes: + - name: sdf-group-rubin + persistentVolumeClaim: + claimName: sdf-group-rubin + - name: sdf-data-rubin + persistentVolumeClaim: + claimName: sdf-data-rubin + volumeMounts: + - name: sdf-group-rubin + mountPath: /sdf/group/rubin + - name: sdf-data-rubin + mountPath: /sdf/data/rubin + persistentVolumeClaims: + - name: sdf-group-rubin + storageClassName: sdf-group-rubin + - name: sdf-data-rubin + storageClassName: sdf-data-rubin diff --git a/applications/ppdb-replication/values.yaml b/applications/ppdb-replication/values.yaml new file mode 100644 index 0000000000..fec71e1776 --- /dev/null +++ b/applications/ppdb-replication/values.yaml @@ -0,0 +1,118 @@ +# Default values for ppdb-replication. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Number of deployment pods to start +replicaCount: 1 + +image: + # -- Image to use in the ppdb-replication deployment + repository: "ghcr.io/lsst/ppdb-replication" + + # -- Pull policy for the ppdb-replication image + pullPolicy: "Always" + + # -- Tag of image to use + # @default -- The appVersion of the chart + tag: "main" + +ingress: + # -- Additional annotations for the ingress rule + annotations: {} + +# -- Affinity rules for the ppdb-replication deployment pod +affinity: {} + +# -- Node selection rules for the ppdb-replication deployment pod +nodeSelector: {} + +# -- Annotations for the ppdb-replication deployment pod +podAnnotations: {} + +# -- Resource limits and requests for the ppdb-replication deployment pod +# @default -- see `values.yaml` +resources: + limits: + cpu: "1" + memory: "16.0Gi" + requests: + cpu: "200m" # 20% of a single core + memory: "4.0Gi" + +# -- Tolerations for the ppdb-replication deployment pod +tolerations: [] + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: null + + # -- Host name for ingress + # @default -- Set by Argo CD + host: null + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: null + +# Application-specific configuration +config: + # -- Logging level + logLevel: "INFO" + + # -- Name of logger for monitoring + monLogger: "lsst.dax.ppdb.monitor" + + # -- Logging profile (`production` for JSON, `development` for + # human-friendly) + logProfile: "production" + + # -- URL path prefix + pathPrefix: "/ppdb-replication" + + # -- APDB config file resource + apdbConfig: null + + # -- PPDB config file resource + ppdbConfig: null + + # -- APDB index URI + apdbIndexUri: null + + # -- Comma-separated list of monitoring filter rules + monRules: null + + # -- Allow updates to already replicated data + updateExisting: false + + # -- Minimum time to wait before replicating a chunk after next chunk appears + minWaitTime: null + + # -- Maximum time to wait before replicating a chunk after next chunk appears + maxWaitTime: null + + # -- Time to wait before checking for new chunks, if no chunk appears + checkInterval: null + + # -- S3 endpoint URL + s3EndpointUrl: null + + # -- Additional S3 profile name + additionalS3ProfileName: null + + # -- Additional S3 profile URL + additionalS3ProfileUrl: null + + # -- Disable bucket validation in LSST S3 tools + disableBucketValidation: 1 + + # -- Volumes specific to the environment + volumes: [] + + # -- Volume mounts + volumeMounts: [] + + # -- Persistent volume claims + persistentVolumeClaims: [] diff --git a/docs/applications/ppdb-replication/index.rst b/docs/applications/ppdb-replication/index.rst new file mode 100644 index 0000000000..ea26aae83a --- /dev/null +++ b/docs/applications/ppdb-replication/index.rst @@ -0,0 +1,19 @@ +.. px-app:: ppdb-replication + +############################################################ +ppdb-replication — Replicates data from the APDB to the PPDB +############################################################ + +The ``ppdb-replication`` application periodically replicates data from the +Alert Production DataBase (APDB) to the Prompt Products DataBase (PPDB). + +.. jinja:: ppdb-replication + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/ppdb-replication/values.md b/docs/applications/ppdb-replication/values.md new file mode 100644 index 0000000000..425e7f6fd2 --- /dev/null +++ b/docs/applications/ppdb-replication/values.md @@ -0,0 +1,12 @@ +```{px-app-values} ppdb-replication +``` + +# ppdb-replication Helm values reference + +Helm values reference table for the {px-app}`ppdb-replication` application. + +```{include} ../../../applications/ppdb-replication/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/rsp.rst b/docs/applications/rsp.rst index 9c631d37af..b395276738 100644 --- a/docs/applications/rsp.rst +++ b/docs/applications/rsp.rst @@ -18,6 +18,7 @@ Argo CD project: ``rsp`` noteburst/index nublado/index portal/index + ppdb-replication/index semaphore/index siav2/index sqlproxy-cross-project/index diff --git a/docs/applications/rubin.rst b/docs/applications/rubin.rst index a03aad2b38..483f483438 100644 --- a/docs/applications/rubin.rst +++ b/docs/applications/rubin.rst @@ -18,6 +18,7 @@ Argo CD project: ``rubin`` nightreport/index obsloctap/index plot-navigator/index + ppdb-replication/index production-tools/index rapid-analysis/index rubintv/index diff --git a/environments/README.md b/environments/README.md index 9ec20b4fda..d44f6c7e2d 100644 --- a/environments/README.md +++ b/environments/README.md @@ -43,6 +43,7 @@ | applications.plot-navigator | bool | `false` | Enable the plot-navigator application | | applications.portal | bool | `false` | Enable the portal application | | applications.postgres | bool | `false` | Enable the in-cluster PostgreSQL server. Use of this server is discouraged in favor of using infrastructure SQL, but will remain supported for use cases such as minikube test deployments. | +| applications.ppdb-replication | bool | `false` | Enable the ppdb-replication application | | applications.production-tools | bool | `false` | Enable the production-tools application | | applications.prompt-proto-service-hsc | bool | `false` | Enable the prompt-proto-service-hsc application | | applications.prompt-proto-service-hsc-gpu | bool | `false` | Enable the prompt-proto-service-hsc-gpu application | diff --git a/environments/templates/applications/rubin/ppdb-replication.yaml b/environments/templates/applications/rubin/ppdb-replication.yaml new file mode 100644 index 0000000000..e9685feb11 --- /dev/null +++ b/environments/templates/applications/rubin/ppdb-replication.yaml @@ -0,0 +1,34 @@ +{{- if (index .Values "applications" "ppdb-replication") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "ppdb-replication" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "ppdb-replication" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "ppdb-replication" + server: "https://kubernetes.default.svc" + project: "rubin" + source: + path: "applications/ppdb-replication" + repoURL: {{ .Values.repoUrl | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} \ No newline at end of file diff --git a/environments/values-usdfdev.yaml b/environments/values-usdfdev.yaml index 9eb8c8c0a7..d186cba8d2 100644 --- a/environments/values-usdfdev.yaml +++ b/environments/values-usdfdev.yaml @@ -27,6 +27,7 @@ applications: plot-navigator: true portal: true postgres: true + ppdb-replication: true rubintv: true sasquatch: true schedview-snapshot: true diff --git a/environments/values.yaml b/environments/values.yaml index b7774a1d1e..cd11a31959 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -153,6 +153,9 @@ applications: # supported for use cases such as minikube test deployments. postgres: false + # -- Enable the ppdb-replication application + ppdb-replication: false + # -- Enable the rubintv application rubintv: false From 4a75b88b0075b3cff6cf10aa454f06ddfe506a55 Mon Sep 17 00:00:00 2001 From: Dan Fuchs Date: Mon, 30 Sep 2024 10:54:27 -0500 Subject: [PATCH 191/193] DM-45522 sasquatch: App metrics events in Sasquatch Sasquatch subchart that creates and configures: * A new telegraf consumer for all app metrics * Per-app Avro tags config for that consumer * KafkaUser and KafkaTopic resources per-app See https://github.com/lsst-sqre/sasquatch/pull/41 for how to use this new subchart. review feedback More review feedback --- applications/sasquatch/Chart.yaml | 3 + applications/sasquatch/README.md | 23 +++- .../sasquatch/charts/app-metrics/Chart.yaml | 6 ++ .../sasquatch/charts/app-metrics/README.md | 28 +++++ .../charts/app-metrics/templates/_helpers.tpl | 10 ++ .../app-metrics/templates/kafka-topics.yaml | 15 +++ .../app-metrics/templates/kafka-users.yaml | 31 ++++++ .../templates/telegraf-configmap.yaml | 68 ++++++++++++ .../templates/telegraf-deployment.yaml | 78 ++++++++++++++ .../sasquatch/charts/app-metrics/values.yaml | 102 ++++++++++++++++++ .../sasquatch/charts/strimzi-kafka/README.md | 1 - .../charts/strimzi-kafka/templates/users.yaml | 38 ------- .../charts/strimzi-kafka/values.yaml | 4 - applications/sasquatch/secrets.yaml | 4 - applications/sasquatch/values-idfdev.yaml | 8 -- applications/sasquatch/values.yaml | 7 ++ 16 files changed, 370 insertions(+), 56 deletions(-) create mode 100644 applications/sasquatch/charts/app-metrics/Chart.yaml create mode 100644 applications/sasquatch/charts/app-metrics/README.md create mode 100644 applications/sasquatch/charts/app-metrics/templates/_helpers.tpl create mode 100644 applications/sasquatch/charts/app-metrics/templates/kafka-topics.yaml create mode 100644 applications/sasquatch/charts/app-metrics/templates/kafka-users.yaml create mode 100644 applications/sasquatch/charts/app-metrics/templates/telegraf-configmap.yaml create mode 100644 applications/sasquatch/charts/app-metrics/templates/telegraf-deployment.yaml create mode 100644 applications/sasquatch/charts/app-metrics/values.yaml diff --git a/applications/sasquatch/Chart.yaml b/applications/sasquatch/Chart.yaml index 600032104e..723f35d9f7 100644 --- a/applications/sasquatch/Chart.yaml +++ b/applications/sasquatch/Chart.yaml @@ -56,6 +56,9 @@ dependencies: - name: square-events condition: squareEvents.enabled version: 1.0.0 + - name: app-metrics + condition: app-metrics.enabled + version: 1.0.0 annotations: phalanx.lsst.io/docs: | diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index aac4c033b9..e922633932 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -18,6 +18,8 @@ Rubin Observatory's telemetry service | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| app-metrics.apps | list | `[]` | The apps to create configuration for. | +| app-metrics.enabled | bool | `false` | Enable the app-metrics subchart with topic, user, and telegraf configurations | | chronograf.enabled | bool | `true` | Whether Chronograf is enabled | | chronograf.env | object | See `values.yaml` | Additional environment variables for Chronograf | | chronograf.envFromSecret | string | `"sasquatch"` | Name of secret to use. The keys `generic_client_id`, `generic_client_secret`, and `token_secret` should be set. | @@ -81,6 +83,26 @@ Rubin Observatory's telemetry service | strimzi-registry-operator.clusterNamespace | string | `"sasquatch"` | Namespace where the Strimzi Kafka cluster is deployed | | strimzi-registry-operator.operatorNamespace | string | `"sasquatch"` | Namespace where the strimzi-registry-operator is deployed | | telegraf-kafka-consumer | object | `{}` | Overrides for telegraf-kafka-consumer configuration | +| app-metrics.affinity | object | `{}` | Affinity for pod assignment | +| app-metrics.apps | list | `[]` | A list of applications that will publish metrics events, and the keys that should be ingested into InfluxDB as tags. The names should be the same as the app names in Phalanx. | +| app-metrics.args | list | `[]` | Arguments passed to the Telegraf agent containers | +| app-metrics.cluster.name | string | `"sasquatch"` | | +| app-metrics.debug | bool | false | Run Telegraf in debug mode. | +| app-metrics.env | list | See `values.yaml` | Telegraf agent enviroment variables | +| app-metrics.envFromSecret | string | `""` | Name of the secret with values to be added to the environment. | +| app-metrics.globalAppConfig | object | `{}` | app-metrics configuration in any environment in which the subchart is enabled. This should stay globally specified here, and it shouldn't be overridden. See [here](https://sasquatch.lsst.io/user-guide/app-metrics.html#configuration) for the structure of this value. | +| app-metrics.globalInfluxTags | list | `["service"]` | Keys in an every event sent by any app that should be recorded in InfluxDB as "tags" (vs. "fields"). These will be concatenated with the `influxTags` from `globalAppConfig` | +| app-metrics.image.pullPolicy | string | `"Always"` | Image pull policy | +| app-metrics.image.repo | string | `"docker.io/library/telegraf"` | Telegraf image repository | +| app-metrics.image.tag | string | `"1.30.2-alpine"` | Telegraf image tag | +| app-metrics.imagePullSecrets | list | `[]` | Secret names to use for Docker pulls | +| app-metrics.influxdb.url | string | `"http://sasquatch-influxdb.sasquatch:8086"` | URL of the InfluxDB v1 instance to write to | +| app-metrics.nodeSelector | object | `{}` | Node labels for pod assignment | +| app-metrics.podAnnotations | object | `{}` | Annotations for telegraf-kafka-consumers pods | +| app-metrics.podLabels | object | `{}` | Labels for telegraf-kafka-consumer pods | +| app-metrics.replicaCount | int | `3` | Number of Telegraf replicas. Multiple replicas increase availability. | +| app-metrics.resources | object | See `values.yaml` | Kubernetes resources requests and limits | +| app-metrics.tolerations | list | `[]` | Tolerations for pod assignment | | influxdb-enterprise.bootstrap.auth.secretName | string | `"sasquatch"` | Enable authentication of the data nodes using this secret, by creating a username and password for an admin account. The secret must contain keys `username` and `password`. | | influxdb-enterprise.bootstrap.ddldml.configMap | string | Do not run DDL or DML | A config map containing DDL and DML that define databases, retention policies, and inject some data. The keys `ddl` and `dml` must exist, even if one of them is empty. DDL is executed before DML to ensure databases and retention policies exist. | | influxdb-enterprise.bootstrap.ddldml.resources | object | `{}` | Kubernetes resources and limits for the bootstrap job | @@ -389,7 +411,6 @@ Rubin Observatory's telemetry service | strimzi-kafka.registry.resources | object | See `values.yaml` | Kubernetes requests and limits for the Schema Registry | | strimzi-kafka.registry.schemaTopic | string | `"registry-schemas"` | Name of the topic used by the Schema Registry | | strimzi-kafka.superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. | -| strimzi-kafka.users.appmetrics.enabled | bool | `false` | Enable user appmetrics | | strimzi-kafka.users.camera.enabled | bool | `false` | Enable user camera, used at the camera environments | | strimzi-kafka.users.consdb.enabled | bool | `false` | Enable user consdb | | strimzi-kafka.users.kafdrop.enabled | bool | `false` | Enable user Kafdrop (deployed by parent Sasquatch chart). | diff --git a/applications/sasquatch/charts/app-metrics/Chart.yaml b/applications/sasquatch/charts/app-metrics/Chart.yaml new file mode 100644 index 0000000000..1152b5b2ca --- /dev/null +++ b/applications/sasquatch/charts/app-metrics/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: app-metrics +version: 1.0.0 +appVersion: "1.0.0" +description: Kafka topics, users, and a telegraf connector for metrics events. +type: application diff --git a/applications/sasquatch/charts/app-metrics/README.md b/applications/sasquatch/charts/app-metrics/README.md new file mode 100644 index 0000000000..1cb6c56b6d --- /dev/null +++ b/applications/sasquatch/charts/app-metrics/README.md @@ -0,0 +1,28 @@ +# app-metrics + +Kafka topics, users, and a telegraf connector for metrics events. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity for pod assignment | +| apps | list | `[]` | A list of applications that will publish metrics events, and the keys that should be ingested into InfluxDB as tags. The names should be the same as the app names in Phalanx. | +| args | list | `[]` | Arguments passed to the Telegraf agent containers | +| cluster.name | string | `"sasquatch"` | | +| debug | bool | false | Run Telegraf in debug mode. | +| env | list | See `values.yaml` | Telegraf agent enviroment variables | +| envFromSecret | string | `""` | Name of the secret with values to be added to the environment. | +| globalAppConfig | object | `{}` | app-metrics configuration in any environment in which the subchart is enabled. This should stay globally specified here, and it shouldn't be overridden. See [here](https://sasquatch.lsst.io/user-guide/app-metrics.html#configuration) for the structure of this value. | +| globalInfluxTags | list | `["service"]` | Keys in an every event sent by any app that should be recorded in InfluxDB as "tags" (vs. "fields"). These will be concatenated with the `influxTags` from `globalAppConfig` | +| image.pullPolicy | string | `"Always"` | Image pull policy | +| image.repo | string | `"docker.io/library/telegraf"` | Telegraf image repository | +| image.tag | string | `"1.30.2-alpine"` | Telegraf image tag | +| imagePullSecrets | list | `[]` | Secret names to use for Docker pulls | +| influxdb.url | string | `"http://sasquatch-influxdb.sasquatch:8086"` | URL of the InfluxDB v1 instance to write to | +| nodeSelector | object | `{}` | Node labels for pod assignment | +| podAnnotations | object | `{}` | Annotations for telegraf-kafka-consumers pods | +| podLabels | object | `{}` | Labels for telegraf-kafka-consumer pods | +| replicaCount | int | `3` | Number of Telegraf replicas. Multiple replicas increase availability. | +| resources | object | See `values.yaml` | Kubernetes resources requests and limits | +| tolerations | list | `[]` | Tolerations for pod assignment | diff --git a/applications/sasquatch/charts/app-metrics/templates/_helpers.tpl b/applications/sasquatch/charts/app-metrics/templates/_helpers.tpl new file mode 100644 index 0000000000..f88a9ae075 --- /dev/null +++ b/applications/sasquatch/charts/app-metrics/templates/_helpers.tpl @@ -0,0 +1,10 @@ +{{/* +Convert a list to a TOML array of quoted string values +*/}} +{{- define "helpers.toTomlArray" -}} +{{- $items := list -}} +{{- range . -}} +{{- $items = (quote .) | append $items -}} +{{- end -}} +[ {{ join ", " $items }} ] +{{- end -}} diff --git a/applications/sasquatch/charts/app-metrics/templates/kafka-topics.yaml b/applications/sasquatch/charts/app-metrics/templates/kafka-topics.yaml new file mode 100644 index 0000000000..70db2590de --- /dev/null +++ b/applications/sasquatch/charts/app-metrics/templates/kafka-topics.yaml @@ -0,0 +1,15 @@ +{{- range .Values.apps }} +--- +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaTopic +metadata: + name: "lsst.square.app-metrics.events.{{ . }}" + labels: + strimzi.io/cluster: {{ $.Values.cluster.name }} +spec: + partitions: 10 + replicas: 3 + config: + # http://kafka.apache.org/documentation/#topicconfigs + retention.ms: 86400000 # 1 day +{{- end }} diff --git a/applications/sasquatch/charts/app-metrics/templates/kafka-users.yaml b/applications/sasquatch/charts/app-metrics/templates/kafka-users.yaml new file mode 100644 index 0000000000..9ddab60b5e --- /dev/null +++ b/applications/sasquatch/charts/app-metrics/templates/kafka-users.yaml @@ -0,0 +1,31 @@ +{{- range .Values.apps }} +--- +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaUser +metadata: + name: app-metrics-{{ . }} + labels: + strimzi.io/cluster: {{ $.Values.cluster.name }} +spec: + authentication: + type: tls + authorization: + type: simple + acls: + - resource: + type: group + name: app-metrics-events + patternType: prefix + operations: + - "Read" + host: "*" + - resource: + type: topic + name: "lsst.square.app-metrics.events.{{ . }}" + patternType: literal + operations: + - "Describe" + - "Read" + - "Write" + host: "*" +{{- end }} diff --git a/applications/sasquatch/charts/app-metrics/templates/telegraf-configmap.yaml b/applications/sasquatch/charts/app-metrics/templates/telegraf-configmap.yaml new file mode 100644 index 0000000000..e8a60a4ae3 --- /dev/null +++ b/applications/sasquatch/charts/app-metrics/templates/telegraf-configmap.yaml @@ -0,0 +1,68 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: sasquatch-telegraf-app-metrics + labels: + app.kubernetes.io/name: sasquatch-telegraf-app-metrics + app.kubernetes.io/instance: sasquatch-telegraf-app-metrics + app.kubernetes.io/part-of: sasquatch +data: + telegraf.conf: |+ + [agent] + metric_batch_size = 5000 + metric_buffer_limit = 100000 + collection_jitter = "0s" + flush_interval = "10s" + flush_jitter = "0s" + debug = {{ default false .Values.debug }} + omit_hostname = true + + [[outputs.influxdb]] + urls = [ + {{ .Values.influxdb.url | quote }} + ] + database = "telegraf-kafka-app-metrics-consumer" + username = "${INFLUXDB_USER}" + password = "${INFLUXDB_PASSWORD}" + + [[outputs.influxdb]] + namepass = ["telegraf_*"] + urls = [ + {{ .Values.influxdb.url | quote }} + ] + database = "telegraf" + username = "${INFLUXDB_USER}" + password = "${INFLUXDB_PASSWORD}" + + {{- range $index, $app := .Values.apps }} + {{- $globalInfluxTags := $.Values.globalInfluxTags | default list }} + {{- $appInfluxTags := (index $.Values.globalAppConfig $app "influxTags") | default list }} + {{- $influxTags := concat $globalInfluxTags $appInfluxTags }} + [[inputs.kafka_consumer]] + brokers = [ + "sasquatch-kafka-brokers.sasquatch:9092" + ] + consumer_group = "telegraf-kafka-consumer-app-metrics" + sasl_mechanism = "SCRAM-SHA-512" + sasl_password = "$TELEGRAF_PASSWORD" + sasl_username = "telegraf" + data_format = "avro" + avro_schema_registry = "http://sasquatch-schema-registry.sasquatch:8081" + avro_timestamp = "timestamp_ns" + avro_timestamp_format = "unix_ns" + avro_union_mode = "nullable" + avro_tags = {{ include "helpers.toTomlArray" $influxTags }} + topics = [ + "lsst.square.app-metrics.events.{{ $app }}", + ] + max_processing_time = "5s" + consumer_fetch_default = "5MB" + max_undelivered_messages = 10000 + compression_codec = 3 + {{- end }} + + [[inputs.internal]] + name_prefix = "telegraf_" + collect_memstats = true + tags = { instance = "app-metrics" } diff --git a/applications/sasquatch/charts/app-metrics/templates/telegraf-deployment.yaml b/applications/sasquatch/charts/app-metrics/templates/telegraf-deployment.yaml new file mode 100644 index 0000000000..9a0c3dd017 --- /dev/null +++ b/applications/sasquatch/charts/app-metrics/templates/telegraf-deployment.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: sasquatch-telegraf-app-metrics + labels: + app.kubernetes.io/name: sasquatch-telegraf-app-metrics + app.kubernetes.io/instance: sasquatch-telegraf-app-metrics + app.kubernetes.io/part-of: sasquatch +spec: + replicas: {{ default 1 .Values.replicaCount }} + selector: + matchLabels: + app.kubernetes.io/instance: sasquatch-telegraf-app-metrics + template: + metadata: + labels: + app.kubernetes.io/instance: sasquatch-telegraf-app-metrics + annotations: + checksum/config: {{ include (print $.Template.BasePath "/telegraf-configmap.yaml") $ | sha256sum }} + {{- if .Values.podAnnotations }} + {{- toYaml .Values.podAnnotations | nindent 8 }} + {{- end }} + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + containers: + - name: telegraf + securityContext: + capabilities: + drop: + - all + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + image: "{{ .Values.image.repo }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: + {{- toYaml .Values.resources | nindent 10 }} + {{- end }} + {{- if .Values.args }} + args: + {{- toYaml .Values.args | nindent 8 }} + {{- end }} + {{- if .Values.env }} + env: + {{- toYaml .Values.env | nindent 8 }} + {{- end }} + {{- if .Values.envFromSecret }} + envFrom: + - secretRef: + name: {{ .Values.envFromSecret }} + {{- end }} + volumeMounts: + - name: config + mountPath: /etc/telegraf + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: + {{- toYaml .Values.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: + {{- toYaml .Values.affinity | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: + {{- toYaml .Values.tolerations | nindent 8 }} + {{- end }} + volumes: + - name: config + configMap: + name: sasquatch-telegraf-app-metrics diff --git a/applications/sasquatch/charts/app-metrics/values.yaml b/applications/sasquatch/charts/app-metrics/values.yaml new file mode 100644 index 0000000000..d5bc17418f --- /dev/null +++ b/applications/sasquatch/charts/app-metrics/values.yaml @@ -0,0 +1,102 @@ +## Default values.yaml for the Metrics Events subchart. + +# -- app-metrics configuration in any environment in which the subchart is +# enabled. This should stay globally specified here, and it shouldn't be +# overridden. +# See [here](https://sasquatch.lsst.io/user-guide/app-metrics.html#configuration) +# for the structure of this value. +globalAppConfig: {} + +# -- A list of applications that will publish metrics events, and the keys that should be ingested into InfluxDB as tags. +# The names should be the same as the app names in Phalanx. +apps: [] + +# -- Keys in an every event sent by any app that should be recorded in InfluxDB +# as "tags" (vs. "fields"). These will be concatenated with the `influxTags` from +# `globalAppConfig` +globalInfluxTags: ["service"] + +cluster: + # The name of the Strimzi cluster. Synchronize this with the cluster name in + # the parent Sasquatch chart. + name: sasquatch + +# These values refer to the telegraf deployment and config + +image: + # -- Telegraf image repository + repo: "docker.io/library/telegraf" + + # -- Telegraf image tag + tag: "1.30.2-alpine" + + # -- Image pull policy + pullPolicy: "Always" + +# -- Annotations for telegraf-kafka-consumers pods +podAnnotations: {} + +# -- Labels for telegraf-kafka-consumer pods +podLabels: {} + +# -- Secret names to use for Docker pulls +imagePullSecrets: [] + +# -- Arguments passed to the Telegraf agent containers +args: [] + +# -- Telegraf agent enviroment variables +# @default -- See `values.yaml` +env: + - name: TELEGRAF_PASSWORD + valueFrom: + secretKeyRef: + name: sasquatch + # Telegraf KafkaUser password. + key: telegraf-password + - name: INFLUXDB_USER + valueFrom: + secretKeyRef: + name: sasquatch + # InfluxDB v1 user + key: influxdb-user + - name: INFLUXDB_PASSWORD + valueFrom: + secretKeyRef: + name: sasquatch + # InfluxDB v1 password + key: influxdb-password + +# -- Name of the secret with values to be added to the environment. +envFromSecret: "" + +# -- Run Telegraf in debug mode. +# @default -- false +debug: false + +influxdb: + # -- URL of the InfluxDB v1 instance to write to + url: "http://sasquatch-influxdb.sasquatch:8086" + +# -- Number of Telegraf replicas. Multiple replicas increase availability. +replicaCount: 3 + + +# -- Kubernetes resources requests and limits +# @default -- See `values.yaml` +resources: + limits: + cpu: "2" + memory: "4Gi" + requests: + cpu: "0.5" + memory: "1Gi" + +# -- Node labels for pod assignment +nodeSelector: {} + +# -- Affinity for pod assignment +affinity: {} + +# -- Tolerations for pod assignment +tolerations: [] diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index fd425d5279..556761d75d 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -65,7 +65,6 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | registry.resources | object | See `values.yaml` | Kubernetes requests and limits for the Schema Registry | | registry.schemaTopic | string | `"registry-schemas"` | Name of the topic used by the Schema Registry | | superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. | -| users.appmetrics.enabled | bool | `false` | Enable user appmetrics | | users.camera.enabled | bool | `false` | Enable user camera, used at the camera environments | | users.consdb.enabled | bool | `false` | Enable user consdb | | users.kafdrop.enabled | bool | `false` | Enable user Kafdrop (deployed by parent Sasquatch chart). | diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml index 75b9433255..5b30f2a6a3 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml @@ -300,41 +300,3 @@ spec: host: "*" operation: All {{- end }} -{{- if .Values.users.appmetrics.enabled }} ---- -apiVersion: kafka.strimzi.io/v1beta2 -kind: KafkaUser -metadata: - name: appmetrics - labels: - strimzi.io/cluster: {{ .Values.cluster.name }} -spec: - authentication: - type: scram-sha-512 - password: - valueFrom: - secretKeyRef: - name: sasquatch - key: appmetrics-password - authorization: - type: simple - acls: - - resource: - type: group - name: "*" - patternType: literal - operation: All - - resource: - type: topic - name: "lsst.square.metrics" - patternType: prefix - type: allow - host: "*" - operation: All - - resource: - type: cluster - operations: - - Describe - - DescribeConfigs - # TODO: Any quotas needed? -{{- end }} diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index fa0deaa57b..6d587fd746 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -285,10 +285,6 @@ users: # -- Enable user consdb enabled: false - appmetrics: - # -- Enable user appmetrics - enabled: false - mirrormaker2: # -- Enable replication in the target (passive) cluster diff --git a/applications/sasquatch/secrets.yaml b/applications/sasquatch/secrets.yaml index 7f84437a65..13cf51ef04 100644 --- a/applications/sasquatch/secrets.yaml +++ b/applications/sasquatch/secrets.yaml @@ -69,10 +69,6 @@ ts-salkafka-password: description: >- ts-salkafka KafkaUser password. if: strimzi-kafka.users.tsSalKafka.enabled -appmetrics-password: - description: >- - appmetrics KafkaUser password. - if: strimzi-kafka.users.appmetrics.enabled connect-push-secret: description: >- Write token for pushing generated Strimzi Kafka Connect image to GitHub Container Registry. diff --git a/applications/sasquatch/values-idfdev.yaml b/applications/sasquatch/values-idfdev.yaml index 4db585f4d5..6519b85afb 100644 --- a/applications/sasquatch/values-idfdev.yaml +++ b/applications/sasquatch/values-idfdev.yaml @@ -32,8 +32,6 @@ strimzi-kafka: enabled: true kafkaConnectManager: enabled: true - appmetrics: - enabled: true kraft: enabled: true kafkaController: @@ -75,12 +73,6 @@ telegraf-kafka-consumer: replicaCount: 1 topicRegexps: | [ "lsst.Test.*" ] - appmetrics: - enabled: true - database: "metrics" - replicaCount: 1 - topicRegexps: | - [ "lsst.square.metrics.*" ] kafdrop: cmdArgs: "--message.format=AVRO --topic.deleteEnabled=true --topic.createEnabled=true" diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index d7cb91e266..cc9fff35e6 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -283,3 +283,10 @@ global: # -- Base path for Vault secrets # @default -- Set by Argo CD vaultSecretsPath: "" + +app-metrics: + # -- Enable the app-metrics subchart with topic, user, and telegraf configurations + enabled: false + + # -- The apps to create configuration for. + apps: [] From 86248413594b468e12e33e388d5502063acd0044 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 2 Oct 2024 09:51:04 -0700 Subject: [PATCH 192/193] Adjust telegraf resources on TTS --- applications/sasquatch/values-tucson-teststand.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 64f30615a3..da095af8b2 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -176,6 +176,13 @@ telegraf-kafka-consumer: topicRegexps: | [ "lsst.obsenv" ] debug: true + resources: + limits: + cpu: "2" + memory: "2Gi" + requests: + cpu: "1" + memory: "1Gi" kafdrop: cmdArgs: "--message.format=AVRO --message.keyFormat=DEFAULT --topic.deleteEnabled=false --topic.createEnabled=false" From f21231b8577ef38bc99b26b959d49961aa3cf83f Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 2 Oct 2024 16:35:59 -0700 Subject: [PATCH 193/193] Adjust configuration for EAS at TTS --- applications/sasquatch/values-tucson-teststand.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index da095af8b2..03b2703aa2 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -99,6 +99,8 @@ telegraf-kafka-consumer: eas: enabled: true database: "efd" + metric_batch_size: 100 + flush_interval: 20s topicRegexps: | [ "lsst.sal.DIMM", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.ESS", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] debug: true