diff --git a/.github/workflows/dependencies.yaml b/.github/workflows/dependencies.yaml deleted file mode 100644 index 49b52fbb6d..0000000000 --- a/.github/workflows/dependencies.yaml +++ /dev/null @@ -1,35 +0,0 @@ -name: Dependency Update - -"on": - schedule: - - cron: "0 12 * * 1" - workflow_dispatch: {} - -jobs: - update: - runs-on: ubuntu-latest - timeout-minutes: 10 - - steps: - - uses: actions/checkout@v4 - - # Omit pre-commit updates for now until neophile looks only at releases - # so that it doesn't pick up an old helm-docs release. - - name: Run neophile - uses: lsst-sqre/run-neophile@v1 - with: - python-version: "3.12" - mode: pr - types: python - app-id: ${{ secrets.NEOPHILE_APP_ID }} - app-secret: ${{ secrets.NEOPHILE_PRIVATE_KEY }} - - - name: Report status - if: always() - uses: ravsamhq/notify-slack-action@v2 - with: - status: ${{ job.status }} - notify_when: "failure" - notification_title: "Periodic dependency update for {repo} failed" - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_ALERT_WEBHOOK }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 772024b059..c684835a13 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,7 +14,7 @@ repos: - -c=.yamllint.yml - repo: https://github.com/python-jsonschema/check-jsonschema - rev: 0.29.1 + rev: 0.29.2 hooks: - id: check-jsonschema files: ^applications/.*/secrets(-[^./-]+)?\.yaml @@ -46,7 +46,7 @@ repos: - --template-files=../helm-docs.md.gotmpl - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.7 + rev: v0.6.7 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] diff --git a/Makefile b/Makefile index 7916bf066c..48c10041bb 100644 --- a/Makefile +++ b/Makefile @@ -41,20 +41,20 @@ update-deps: pip install --upgrade pip uv uv pip install --upgrade pre-commit pre-commit autoupdate - uv pip compile --upgrade --generate-hashes \ - --output-file requirements/main.txt requirements/main.in - uv pip compile --upgrade --generate-hashes \ + uv pip compile --upgrade --universal --generate-hashes \ + --output-file requirements/main.txt pyproject.toml + uv pip compile --upgrade --universal --generate-hashes \ --output-file requirements/dev.txt requirements/dev.in - uv pip compile --upgrade --generate-hashes \ + uv pip compile --upgrade --universal --generate-hashes \ --output-file requirements/tox.txt requirements/tox.in # Useful for testing against a Git version of Safir. .PHONY: update-deps-no-hashes update-deps-no-hashes: pip install --upgrade uv - uv pip compile --upgrade \ - --output-file requirements/main.txt requirements/main.in - uv pip compile --upgrade \ + uv pip compile --upgrade --universal \ + --output-file requirements/main.txt pyproject.toml + uv pip compile --upgrade --universal \ --output-file requirements/dev.txt requirements/dev.in - uv pip compile --upgrade \ + uv pip compile --upgrade --universal \ --output-file requirements/tox.txt requirements/tox.in diff --git a/applications/alert-stream-broker/README.md b/applications/alert-stream-broker/README.md index 7cf49c19d9..c8043c152d 100644 --- a/applications/alert-stream-broker/README.md +++ b/applications/alert-stream-broker/README.md @@ -72,12 +72,17 @@ Alert transmission to community brokers | alert-stream-broker.kafka.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Kafka brokers. | | alert-stream-broker.kafka.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. | | alert-stream-broker.kafka.version | string | `"3.4.0"` | Version of Kafka to deploy. | +| alert-stream-broker.kafkaController.enabled | bool | `false` | Enable Kafka Controller | +| alert-stream-broker.kafkaController.resources | object | See `values.yaml` | Kubernetes requests and limits for the Kafka Controller | +| alert-stream-broker.kafkaController.storage.size | string | `"20Gi"` | Size of the backing storage disk for each of the Kafka controllers | +| alert-stream-broker.kafkaController.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes | | alert-stream-broker.kafkaExporter | object | `{"enableSaramaLogging":false,"enabled":false,"groupRegex":".*","logLevel":"warning","topicRegex":".*"}` | Kafka JMX Exporter for more detailed diagnostic metrics. | | alert-stream-broker.kafkaExporter.enableSaramaLogging | bool | `false` | Enable Sarama logging | | alert-stream-broker.kafkaExporter.enabled | bool | `false` | Enable Kafka exporter. | | alert-stream-broker.kafkaExporter.groupRegex | string | `".*"` | Consumer groups to monitor | | alert-stream-broker.kafkaExporter.logLevel | string | `"warning"` | Log level for Sarama logging | | alert-stream-broker.kafkaExporter.topicRegex | string | `".*"` | Kafka topics to monitor | +| alert-stream-broker.kraft | bool | `true` | | | alert-stream-broker.maxBytesRetained | string | `"100000000000"` | Maximum number of bytes for the replay topic, per partition, per replica. Default is 100GB, but should be lower to not fill storage. | | alert-stream-broker.maxMillisecondsRetained | string | `"5259492000"` | Maximum amount of time to save alerts in the replay topic, in milliseconds. Default is 7 days (604800000). | | alert-stream-broker.nameOverride | string | `""` | | @@ -95,10 +100,8 @@ Alert transmission to community brokers | alert-stream-broker.users[0].readonlyTopics | list | `["alert-stream","alerts-simulated","alert-stream-test"]` | A list of topics that the user should get read-only access to. | | alert-stream-broker.users[0].username | string | `"rubin-testing"` | The username for the user that should be created. | | alert-stream-broker.vaultSecretsPath | string | `""` | Path to the secret resource in Vault | -| alert-stream-broker.zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. | -| alert-stream-broker.zookeeper.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Zookeeper instances. | -| alert-stream-broker.zookeeper.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. | | alert-stream-schema-registry.clusterName | string | `"alert-broker"` | Strimzi "cluster name" of the broker to use as a backend. | +| alert-stream-schema-registry.compatibilityLevel | string | `"None"` | | | alert-stream-schema-registry.hostname | string | `"usdf-alert-schemas-dev.slac.stanford.edu"` | Hostname for an ingress which sends traffic to the Schema Registry. | | alert-stream-schema-registry.name | string | `"alert-schema-registry"` | Name used by the registry, and by its users. | | alert-stream-schema-registry.port | int | `8081` | Port where the registry is listening. NOTE: Not actually configurable in strimzi-registry-operator, so this basically cannot be changed. | diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/README.md b/applications/alert-stream-broker/charts/alert-stream-broker/README.md index 4c6a0bcc4f..c44bd492cf 100644 --- a/applications/alert-stream-broker/charts/alert-stream-broker/README.md +++ b/applications/alert-stream-broker/charts/alert-stream-broker/README.md @@ -29,12 +29,17 @@ Kafka broker cluster for distributing alerts | kafka.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Kafka brokers. | | kafka.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. | | kafka.version | string | `"3.4.0"` | Version of Kafka to deploy. | +| kafkaController.enabled | bool | `false` | Enable Kafka Controller | +| kafkaController.resources | object | See `values.yaml` | Kubernetes requests and limits for the Kafka Controller | +| kafkaController.storage.size | string | `"20Gi"` | Size of the backing storage disk for each of the Kafka controllers | +| kafkaController.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes | | kafkaExporter | object | `{"enableSaramaLogging":false,"enabled":false,"groupRegex":".*","logLevel":"warning","topicRegex":".*"}` | Kafka JMX Exporter for more detailed diagnostic metrics. | | kafkaExporter.enableSaramaLogging | bool | `false` | Enable Sarama logging | | kafkaExporter.enabled | bool | `false` | Enable Kafka exporter. | | kafkaExporter.groupRegex | string | `".*"` | Consumer groups to monitor | | kafkaExporter.logLevel | string | `"warning"` | Log level for Sarama logging | | kafkaExporter.topicRegex | string | `".*"` | Kafka topics to monitor | +| kraft | bool | `true` | | | maxBytesRetained | string | `"100000000000"` | Maximum number of bytes for the replay topic, per partition, per replica. Default is 100GB, but should be lower to not fill storage. | | maxMillisecondsRetained | string | `"5259492000"` | Maximum amount of time to save alerts in the replay topic, in milliseconds. Default is 7 days (604800000). | | nameOverride | string | `""` | | @@ -52,6 +57,3 @@ Kafka broker cluster for distributing alerts | users[0].readonlyTopics | list | `["alert-stream","alerts-simulated","alert-stream-test"]` | A list of topics that the user should get read-only access to. | | users[0].username | string | `"rubin-testing"` | The username for the user that should be created. | | vaultSecretsPath | string | `""` | Path to the secret resource in Vault | -| zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. | -| zookeeper.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Zookeeper instances. | -| zookeeper.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. | diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka-topics.yaml b/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka-topics.yaml index 98717b9a4a..26c74abe19 100644 --- a/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka-topics.yaml +++ b/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka-topics.yaml @@ -26,6 +26,7 @@ spec: cleanup.policy: "delete" retention.ms: {{ .Values.maxMillisecondsRetained }} # 7 days retention.bytes: {{ .Values.maxBytesRetained }} + compression.type: {{ .Values.topicCompression}} # The default timestamp is the creation time of the alert. # To get the ingestion rate, we need this to be the log # append time, and the header will contain the producer @@ -45,6 +46,7 @@ spec: cleanup.policy: "delete" retention.ms: {{ .Values.maxMillisecondsRetained }} # 7 days retention.bytes: {{ .Values.maxBytesRetained }} + compression.type: {{ .Values.topicCompression}} # The default timestamp is the creation time of the alert. # To get the ingestion rate, we need this to be the log # append time, and the header will contain the producer @@ -64,11 +66,31 @@ spec: cleanup.policy: "delete" retention.ms: {{ .Values.maxMillisecondsRetained }} # 7 days retention.bytes: {{ .Values.maxBytesRetained }} - compression.type: {{ .Values.devTopicCompression}} + compression.type: {{ .Values.topicCompression}} # The default timestamp is the creation time of the alert. # To get the ingestion rate, we need this to be the log # append time, and the header will contain the producer # timestamp instead message.timestamp.type: 'LogAppendTime' partitions: {{ .Values.devTopicPartitions }} - replicas: {{ .Values.devTopicReplicas }} \ No newline at end of file + replicas: {{ .Values.devTopicReplicas }} +--- +apiVersion: "kafka.strimzi.io/{{ .Values.strimziAPIVersion }}" +kind: KafkaTopic +metadata: + labels: + strimzi.io/cluster: "{{ .Values.clusterName }}" + name: "{{ .Values.latissTopicName}}" +spec: + config: + cleanup.policy: "delete" + retention.ms: {{ .Values.maxMillisecondsRetained }} # 7 days + retention.bytes: {{ .Values.maxBytesRetained }} + compression.type: {{ .Values.topicCompression}} + # The default timestamp is the creation time of the alert. + # To get the ingestion rate, we need this to be the log + # append time, and the header will contain the producer + # timestamp instead + message.timestamp.type: 'LogAppendTime' + partitions: {{ .Values.latissTopicPartitions }} + replicas: {{ .Values.latissTopicReplicas }} \ No newline at end of file diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka.yaml b/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka.yaml index a226042239..2ca5b98df1 100644 --- a/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka.yaml +++ b/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka.yaml @@ -1,7 +1,60 @@ +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaNodePool +metadata: + name: controller + labels: + strimzi.io/cluster: {{ .Values.cluster.name }} +spec: + replicas: {{ .Values.kafka.replicas }} + roles: + - controller + storage: + type: jbod + volumes: + - id: 0 + type: persistent-claim + size: {{ .Values.kafkaController.storage.size }} + class: {{ .Values.kafkaController.storage.storageClassName }} + deleteClaim: false + {{- with .Values.kafkaController.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} +--- +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaNodePool +metadata: + name: kafka + labels: + strimzi.io/cluster: {{ .Values.cluster.name }} + annotations: + strimzi.io/next-node-ids: "[0-99]" +spec: + replicas: {{ .Values.kafka.replicas }} + roles: + - broker + storage: + type: jbod + volumes: + - id: 0 + type: persistent-claim + size: {{ .Values.kafka.storage.size }} + {{- if .Values.kafka.storage.storageClassName }} + class: {{ .Values.kafka.storage.storageClassName }} + {{- end}} + deleteClaim: false + {{- with .Values.kafka.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} +--- apiVersion: kafka.strimzi.io/{{ .Values.strimziAPIVersion }} kind: Kafka metadata: name: {{ .Values.cluster.name }} + annotations: + strimzi.io/kraft: enabled + strimzi.io/node-pools: enabled spec: {{- if .Values.kafkaExporter.enabled }} kafkaExporter: @@ -85,14 +138,15 @@ spec: {{- if .Values.kafka.externalListener.brokers }} brokers: - {{- range $idx, $broker := .Values.kafka.externalListener.brokers }} - - broker: {{ $idx }} + {{- range $broker := .Values.kafka.externalListener.brokers }} + - broker: {{ $broker.broker }} loadBalancerIP: {{ $broker.ip }} advertisedHost: {{ $broker.host }} - annotations: {{ toYaml $broker.annotations | nindent 16 }} - {{- end }} + advertisedPort: 9094 + annotations: + annotations: {{ toYaml $broker.annotations | nindent 16 }} + {{- end }} {{- end }} - {{- if and (.Values.kafka.externalListener.tls.enabled) (.Values.kafka.externalListener.bootstrap.host) }} brokerCertChainAndKey: secretName: {{ .Values.cluster.name }}-external-tls @@ -114,8 +168,6 @@ spec: transaction.state.log.replication.factor: 3 transaction.state.log.min.isr: 2 message.max.bytes: 4194304 # 8 Megabytes. For testing purposes only. - log.message.format.version: {{ .Values.kafka.logMessageFormatVersion }} - inter.broker.protocol.version: {{ .Values.kafka.interBrokerProtocolVersion }} ssl.client.auth: required {{- range $key, $value := .Values.kafka.config }} {{ $key }}: {{ $value }} @@ -133,53 +185,6 @@ spec: class: {{ .Values.kafka.storage.storageClassName }} deleteClaim: false - template: - pod: - {{- if .Values.kafka.nodePool.tolerations }} - tolerations: - {{- range $tol := .Values.kafka.nodePool.tolerations }} - - key: {{ $tol.key }} - operator: "Equal" - value: {{ $tol.value }} - effect: {{ $tol.effect }} - {{- end }} - {{- end }} - - {{- if .Values.kafka.nodePool.affinities }} - affinity: - nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - {{- range $affinity := .Values.kafka.nodePool.affinities }} - - weight: 1 - preference: - matchExpressions: - - key: {{ $affinity.key }} - operator: In - values: [{{ $affinity.value }}] - {{- end }} - {{- end }} - - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: "app.kubernetes.io/name" - operator: In - values: - - kafka - topologyKey: "kubernetes.io/hostname" - - zookeeper: - replicas: {{ .Values.zookeeper.replicas }} - storage: - # Note that storage is configured per replica. If there are 3 replicas, - # each will get its own PersistentVolumeClaim for the configured size. - type: persistent-claim - size: {{ .Values.zookeeper.storage.size }} - class: {{ .Values.zookeeper.storage.storageClassName }} - deleteClaim: false - template: pod: {{- if .Values.kafka.nodePool.tolerations }} diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml b/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml index 1757660413..8c5f950fa4 100644 --- a/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml +++ b/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml @@ -121,17 +121,6 @@ users: # matches. groups: ["rubin-testing"] - -zookeeper: - # -- Number of Zookeeper replicas to run. - replicas: 3 - - storage: - # -- Size of the backing storage disk for each of the Zookeeper instances. - size: 1000Gi - # -- Name of a StorageClass to use when requesting persistent volumes. - storageClassName: standard - tls: subject: # -- Organization to use in the 'Subject' field of the broker's TLS certificate. @@ -149,6 +138,29 @@ fullnameOverride: "" nameOverride: "" +kraft: true + +kafkaController: + # -- Enable Kafka Controller + enabled: false + + storage: + # -- Size of the backing storage disk for each of the Kafka controllers + size: 20Gi + + # -- Name of a StorageClass to use when requesting persistent volumes + storageClassName: "" + + # -- Kubernetes requests and limits for the Kafka Controller + # @default -- See `values.yaml` + resources: + requests: + memory: 32Gi + cpu: "4" + limits: + memory: 64Gi + cpu: "8" + # -- Topic used to send test alerts. testTopicName: alert-stream-test diff --git a/applications/alert-stream-broker/charts/alert-stream-schema-registry/README.md b/applications/alert-stream-broker/charts/alert-stream-schema-registry/README.md index a31ce78c20..5e7df966e2 100644 --- a/applications/alert-stream-broker/charts/alert-stream-schema-registry/README.md +++ b/applications/alert-stream-broker/charts/alert-stream-schema-registry/README.md @@ -7,6 +7,7 @@ Confluent Schema Registry for managing schema versions for the Alert Stream | Key | Type | Default | Description | |-----|------|---------|-------------| | clusterName | string | `"alert-broker"` | Strimzi "cluster name" of the broker to use as a backend. | +| compatibilityLevel | string | `"None"` | | | hostname | string | `"usdf-alert-schemas-dev.slac.stanford.edu"` | Hostname for an ingress which sends traffic to the Schema Registry. | | name | string | `"alert-schema-registry"` | Name used by the registry, and by its users. | | port | int | `8081` | Port where the registry is listening. NOTE: Not actually configurable in strimzi-registry-operator, so this basically cannot be changed. | diff --git a/applications/alert-stream-broker/charts/alert-stream-schema-registry/templates/schema-registry-server.yaml b/applications/alert-stream-broker/charts/alert-stream-schema-registry/templates/schema-registry-server.yaml index f97585bec4..ce73059575 100644 --- a/applications/alert-stream-broker/charts/alert-stream-schema-registry/templates/schema-registry-server.yaml +++ b/applications/alert-stream-broker/charts/alert-stream-schema-registry/templates/schema-registry-server.yaml @@ -9,4 +9,5 @@ metadata: revision: "1" spec: strimzi-version: {{ .Values.strimziAPIVersion }} - listener: internal \ No newline at end of file + listener: internal + compatibilityLevel: none \ No newline at end of file diff --git a/applications/alert-stream-broker/charts/alert-stream-schema-registry/values.yaml b/applications/alert-stream-broker/charts/alert-stream-schema-registry/values.yaml index e77f15f03c..ab28d9c736 100644 --- a/applications/alert-stream-broker/charts/alert-stream-schema-registry/values.yaml +++ b/applications/alert-stream-broker/charts/alert-stream-schema-registry/values.yaml @@ -16,6 +16,8 @@ clusterName: alert-broker # -- Name of the topic used by the Schema Registry to store data. schemaTopic: registry-schemas +compatibilityLevel: None + # -- Hostname for an ingress which sends traffic to the Schema Registry. hostname: usdf-alert-schemas-dev.slac.stanford.edu diff --git a/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml b/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml index f779daf70c..0f56055671 100644 --- a/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml +++ b/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml @@ -2,58 +2,53 @@ alert-stream-broker: cluster: name: "alert-broker" - zookeeper: - storage: - size: 1000Gi - storageClassName: wekafs--sdf-k8s01 - kafka: version: 3.7.0 - # -- Encoding version for messages, see - # https://strimzi.io/docs/operators/latest/deploying.html#ref-kafka-versions-str. - logMessageFormatVersion: 3.4 - # -- Version of the protocol for inter-broker communication, see - # https://strimzi.io/docs/operators/latest/deploying.html#ref-kafka-versions-str. - interBrokerProtocolVersion: 3.4 replicas: 6 prometheusScrapingEnabled: true - # Addresses based on the state as of 2021-12-02; these were assigned by - # Google and now we're pinning them. + # Addresses based on the state as of 2023; these were assigned by + # Square and now we're pinning them. externalListener: tls: enabled: false bootstrap: host: usdf-alert-stream-dev.lsst.cloud - ip: "134.79.23.215" + ip: "" annotations: metallb.universe.tf/address-pool: 'sdf-dmz' brokers: - host: usdf-alert-stream-dev-broker-0.lsst.cloud ip: "134.79.23.214" + broker: 6 annotations: metallb.universe.tf/address-pool: 'sdf-dmz' - host: usdf-alert-stream-dev-broker-1.lsst.cloud ip: "134.79.23.216" + broker: 7 annotations: metallb.universe.tf/address-pool: 'sdf-dmz' - host: usdf-alert-stream-dev-broker-2.lsst.cloud ip: "134.79.23.218" + broker: 8 annotations: metallb.universe.tf/address-pool: 'sdf-dmz' - host: usdf-alert-stream-dev-broker-3.lsst.cloud ip: "134.79.23.220" + broker: 9 annotations: metallb.universe.tf/address-pool: 'sdf-dmz' - host: usdf-alert-stream-dev-broker-4.lsst.cloud ip: "134.79.23.217" + broker: 10 annotations: metallb.universe.tf/address-pool: 'sdf-dmz' - host: usdf-alert-stream-dev-broker-5.lsst.cloud ip: "134.79.23.219" + broker: 11 annotations: metallb.universe.tf/address-pool: 'sdf-dmz' @@ -111,15 +106,23 @@ alert-stream-broker: groups: ["pittgoogle-idfint"] testTopicName: alert-stream-test - simulatedTopicName: alerts-simulated topicPartitions: 400 topicReplicas: 1 + + simulatedTopicName: alerts-simulated simulatedTopicPartitions: 45 simulatedTopicReplicas: 1 + devTopicName: dev-topic devTopicPartitions: 10 devTopicReplicas: 1 - devTopicCompression: lz4 + + latissTopicName: latiss-alerts + latissTopicPartitions: 45 + latissTopicReplicas: 1 + + # Compression set to snappy to balance alert packet compression speed and size. + topicCompression: snappy alert-stream-schema-registry: hostname: "usdf-alert-schemas-dev.slac.stanford.edu" diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index aa76df123b..28c6a47cac 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.42.0 + version: 0.42.3 repository: https://argoproj.github.io/argo-helm diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index 2956e31a97..56c9e07f2f 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 7.4.5 + version: 7.6.1 repository: https://argoproj.github.io/argo-helm diff --git a/applications/argocd/values-usdfdev-alert-stream-broker.yaml b/applications/argocd/values-usdfdev-alert-stream-broker.yaml index 482984f9b7..8298470022 100644 --- a/applications/argocd/values-usdfdev-alert-stream-broker.yaml +++ b/applications/argocd/values-usdfdev-alert-stream-broker.yaml @@ -33,6 +33,7 @@ argo-cd: g, smart@slac.stanford.edu, role:admin g, ebellm@slac.stanford.edu, role:admin g, hchiang2@slac.stanford.edu, role:admin + g, afausti@slac.stanford.edu, role:admin scopes: "[email]" server: diff --git a/applications/argocd/values-usdfdev.yaml b/applications/argocd/values-usdfdev.yaml index c5343fe22b..3a05ea872d 100644 --- a/applications/argocd/values-usdfdev.yaml +++ b/applications/argocd/values-usdfdev.yaml @@ -58,6 +58,9 @@ argo-cd: g, smart@slac.stanford.edu, role:developer g, spothi@slac.stanford.edu, role:developer g, bbrond@slac.stanford.edu, role:developer + g, vbecker@slac.stanford.edu, role:developer + g, gmegias@slac.stanford.edu, role:developer + g, salnikov@slac.stanford.edu, role:developer scopes: "[email]" server: diff --git a/applications/argocd/values-usdfint.yaml b/applications/argocd/values-usdfint.yaml index 5eaeafb2bd..12ba88dd96 100644 --- a/applications/argocd/values-usdfint.yaml +++ b/applications/argocd/values-usdfint.yaml @@ -57,6 +57,7 @@ argo-cd: g, smart@slac.stanford.edu, role:developer g, spothi@slac.stanford.edu, role:developer g, bbrond@slac.stanford.edu, role:developer + g, vbecker@slac.stanford.edu, role:developer scopes: "[email]" server: diff --git a/applications/argocd/values-usdfprod.yaml b/applications/argocd/values-usdfprod.yaml index 59a611f653..9c5fdf1734 100644 --- a/applications/argocd/values-usdfprod.yaml +++ b/applications/argocd/values-usdfprod.yaml @@ -53,6 +53,7 @@ argo-cd: g, smart@slac.stanford.edu, role:developer g, spothi@slac.stanford.edu, role:developer g, bbrond@slac.stanford.edu, role:developer + g, vbecker@slac.stanford.edu, role:developer scopes: "[email]" server: diff --git a/applications/butler/Chart.yaml b/applications/butler/Chart.yaml index 20d3063518..9d3b40a094 100644 --- a/applications/butler/Chart.yaml +++ b/applications/butler/Chart.yaml @@ -4,4 +4,4 @@ version: 1.0.0 description: Server for Butler data abstraction service sources: - https://github.com/lsst/daf_butler -appVersion: server-2.0.0 +appVersion: server-2.1.0 diff --git a/applications/butler/README.md b/applications/butler/README.md index ac80574a22..a3d2d49811 100644 --- a/applications/butler/README.md +++ b/applications/butler/README.md @@ -15,6 +15,8 @@ Server for Butler data abstraction service | autoscaling.maxReplicas | int | `100` | Maximum number of butler deployment pods | | autoscaling.minReplicas | int | `1` | Minimum number of butler deployment pods | | autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of butler deployment pods | +| config.additionalS3ProfileName | string | No second S3 profile is available. | Profile name identifying a second S3 endpoint and set of credentials to use for accessing files in the datastore. | +| config.dp02ClientServerIsDefault | bool | `false` | True if the 'dp02' Butler repository alias should use client/server Butler. False if it should use DirectButler. | | config.dp02PostgresUri | string | No configuration file for DP02 will be generated. | Postgres connection string pointing to the registry database hosting Data Preview 0.2 data. | | config.pathPrefix | string | `"/api/butler"` | The prefix of the path portion of the URL where the Butler service will be exposed. For example, if the service should be exposed at `https://data.lsst.cloud/api/butler`, this should be set to `/api/butler` | | config.pguser | string | Use values specified in per-repository Butler config files. | Postgres username used to connect to the Butler DB | diff --git a/applications/butler/secrets.yaml b/applications/butler/secrets.yaml index 1b2d88511e..23ee59d217 100644 --- a/applications/butler/secrets.yaml +++ b/applications/butler/secrets.yaml @@ -18,3 +18,9 @@ copy: application: nublado key: "postgres-credentials.txt" +"additional-s3-profile": + description: >- + Credentials and endpoint for a second S3 profile to use, in addition to the + default endpoint. For docs on format see + https://github.com/lsst/resources/blob/a34598e125919799d3db4bd8a2363087c3de434e/python/lsst/resources/s3utils.py#L201 + if: config.additionalS3ProfileName diff --git a/applications/butler/templates/configmap.yaml b/applications/butler/templates/configmap.yaml index 3a815fc6e6..8529fa2ba0 100644 --- a/applications/butler/templates/configmap.yaml +++ b/applications/butler/templates/configmap.yaml @@ -46,11 +46,17 @@ data: # connecting to the Butler server. # # We provide both DirectButler and RemoteButler versions of dp02 because some - # users rely on functionality not yet available via RemoteButler. The default is currently - # DirectButler because the Community Science team has not had the opportunity to test RemoteButler, - # and RemoteButler is not available in the current "recommended" RSP image. + # users rely on functionality not yet available via RemoteButler. The default in production is + # DirectButler because RemoteButler is not available in the current recommended RSP image. + # On dev and int it is RemoteButler -- the Community Science team is testing the new system. idf-repositories.yaml: | - dp02: {{ .Values.global.baseUrl }}{{ .Values.config.pathPrefix }}/configs/dp02.yaml - dp02-direct: {{ .Values.global.baseUrl }}{{ .Values.config.pathPrefix }}/configs/dp02.yaml - dp02-remote: {{ .Values.global.baseUrl }}{{ .Values.config.pathPrefix }}/repo/dp02/butler.yaml + {{- $dp02Direct := print .Values.global.baseUrl .Values.config.pathPrefix "/configs/dp02.yaml" -}} + {{- $dp02Remote := print .Values.global.baseUrl .Values.config.pathPrefix "/repo/dp02/butler.yaml" -}} + {{- if .Values.config.dp02ClientServerIsDefault }} + dp02: {{ $dp02Remote }} + {{- else }} + dp02: {{ $dp02Direct }} + {{- end }} + dp02-direct: {{ $dp02Direct }} + dp02-remote: {{ $dp02Remote }} {{- end }} diff --git a/applications/butler/templates/deployment.yaml b/applications/butler/templates/deployment.yaml index 9ba64a4257..c7e3f06b4c 100644 --- a/applications/butler/templates/deployment.yaml +++ b/applications/butler/templates/deployment.yaml @@ -65,6 +65,13 @@ spec: - name: PGUSER value: {{ .Values.config.pguser | quote }} {{ end }} + {{ if .Values.config.additionalS3ProfileName }} + - name: LSST_RESOURCES_S3_PROFILE_{{ .Values.config.additionalS3ProfileName }} + valueFrom: + secretKeyRef: + name: {{ include "butler.fullname" . }} + key: additional-s3-profile + {{ end }} volumeMounts: - name: "butler-secrets" mountPath: "/opt/lsst/butler/secrets" diff --git a/applications/butler/values-idfdev.yaml b/applications/butler/values-idfdev.yaml index 08c73c983f..92cc0e6897 100644 --- a/applications/butler/values-idfdev.yaml +++ b/applications/butler/values-idfdev.yaml @@ -2,7 +2,10 @@ image: pullPolicy: Always config: + dp02ClientServerIsDefault: true dp02PostgresUri: postgresql://postgres@sqlproxy-butler-int.sqlproxy-cross-project:5432/dp02 s3EndpointUrl: "https://storage.googleapis.com" + additionalS3ProfileName: "ir2" repositories: dp02: "file:///opt/lsst/butler/config/dp02.yaml" + ir2: "s3://butler-us-central1-panda-dev/ir2/butler-ir2.yaml" diff --git a/applications/butler/values-idfint.yaml b/applications/butler/values-idfint.yaml index 5f16d776da..fc3fcb6a8f 100644 --- a/applications/butler/values-idfint.yaml +++ b/applications/butler/values-idfint.yaml @@ -1,4 +1,5 @@ config: + dp02ClientServerIsDefault: true dp02PostgresUri: postgresql://postgres@sqlproxy-butler-int.sqlproxy-cross-project:5432/dp02 s3EndpointUrl: "https://storage.googleapis.com" repositories: diff --git a/applications/butler/values.yaml b/applications/butler/values.yaml index c59779c6a5..51ec757201 100644 --- a/applications/butler/values.yaml +++ b/applications/butler/values.yaml @@ -41,10 +41,15 @@ podAnnotations: {} resources: limits: cpu: "1" - memory: "324Mi" + # Worst case peak usage for a single container would be something like all + # 40 threads in the thread pool running large queries costing ~35MB each. + memory: "1.5Gi" requests: cpu: "15m" - memory: "150Mi" + # Butler server uses around 200MB idle at startup, but under dynamic usage + # Python seems to want to hold onto another couple hundred megabytes of + # heap. + memory: "0.5Gi" # -- Node selection rules for the butler deployment pod nodeSelector: {} @@ -80,6 +85,10 @@ config: # @default -- No configuration file for DP02 will be generated. dp02PostgresUri: "" + # -- True if the 'dp02' Butler repository alias should use client/server + # Butler. False if it should use DirectButler. + dp02ClientServerIsDefault: false + # -- Postgres username used to connect to the Butler DB # @default -- Use values specified in per-repository Butler config files. pguser: "" @@ -87,6 +96,11 @@ config: # -- URL for the S3 service where files for datasets are stored by Butler. s3EndpointUrl: "" + # -- Profile name identifying a second S3 endpoint and set of credentials + # to use for accessing files in the datastore. + # @default -- No second S3 profile is available. + additionalS3ProfileName: "" + # -- The prefix of the path portion of the URL where the Butler service will # be exposed. For example, if the service should be exposed at # `https://data.lsst.cloud/api/butler`, this should be set to `/api/butler` diff --git a/applications/cm-service/Chart.yaml b/applications/cm-service/Chart.yaml index ad1b2ef5a1..f6174a96f1 100644 --- a/applications/cm-service/Chart.yaml +++ b/applications/cm-service/Chart.yaml @@ -1,13 +1,8 @@ apiVersion: v2 -appVersion: 0.1.0 +appVersion: 0.1.2 description: Campaign Management for Rubin Data Release Production name: cm-service sources: - https://github.com/lsst-dm/cm-service type: application version: 1.0.0 - -dependencies: -- name: redis - version: 1.0.13 - repository: https://lsst-sqre.github.io/charts/ diff --git a/applications/cm-service/README.md b/applications/cm-service/README.md index 88b43969e7..9f720ea79d 100644 --- a/applications/cm-service/README.md +++ b/applications/cm-service/README.md @@ -30,8 +30,6 @@ Campaign Management for Rubin Data Release Production | image.repository | string | `"ghcr.io/lsst-dm/cm-service"` | Image to use for frontend containers | | image.tag | string | The appVersion of the chart | Tag of frontend image to use | | ingress.annotations | object | `{}` | Additional annotations for the frontend ingress rule | -| redis.config.secretKey | string | `"password"` | Key inside secret from which to get the Redis password (do not change) | -| redis.config.secretName | string | `"redis-secret"` | Name of secret containing Redis password | | worker.affinity | object | `{}` | Affinity rules for the worker pods | | worker.htcondor.config.contents | string | `nil` | If specified, contents of htcondor config file to be injected into worker containers | | worker.htcondor.config.mountPath | string | `nil` | If specified, location for htcondor config file to be injected into worker containers | diff --git a/applications/cm-service/secrets.yaml b/applications/cm-service/secrets.yaml index 414e59c34f..681ae4c8a3 100644 --- a/applications/cm-service/secrets.yaml +++ b/applications/cm-service/secrets.yaml @@ -1,16 +1,8 @@ -redis-password: - description: >- - Password used to authenticate cm-service to its internal Redis server, - deployed as part of the same Argo CD application. This secret can be - changed at any time, but both the Redis server and the cm-service - deployments will then have to be restarted to pick up the new value. - generate: - type: password postgres-password: description: >- Password used to authenticate cm-service to its internal cnpg Postgres - server, deployed as part of the same Argo CD application. This secret can - be changed at any time, but both the Redis server and the cm-service - deployments will then have to be restarted to pick up the new value. + server, deployed as part of the same Argo CD application. This secret can be + changed at any time, but the cm-service deployments will then have to be + restarted to pick up the new value. generate: type: password diff --git a/applications/cm-service/templates/deployment.yaml b/applications/cm-service/templates/deployment.yaml index b50e8277eb..bfe3b38d8a 100644 --- a/applications/cm-service/templates/deployment.yaml +++ b/applications/cm-service/templates/deployment.yaml @@ -28,11 +28,6 @@ spec: containers: - name: "cm-service" env: - - name: CM_ARQ_REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-secret - key: password - name: CM_DATABASE_PASSWORD valueFrom: secretKeyRef: @@ -48,8 +43,6 @@ spec: value: {{ .Values.config.logProfile | quote }} - name: CM_LOG_LEVEL value: {{ .Values.config.logLevel | quote }} - - name: CM_ARQ_REDIS_URL - value: "redis://cm-service-redis/1" image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: diff --git a/applications/cm-service/templates/ingress.yaml b/applications/cm-service/templates/ingress.yaml new file mode 100644 index 0000000000..882de320dc --- /dev/null +++ b/applications/cm-service/templates/ingress.yaml @@ -0,0 +1,38 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "cm-service" + labels: + {{- include "cm-service.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + loginRedirect: true + scopes: + all: + - "exec:internal-tools" +template: + metadata: + name: "cm-service" + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: {{ .Values.config.pathPrefix | quote }} + pathType: "Prefix" + backend: + service: + name: "cm-service" + port: + number: 8080 + - path: "/web_app" + pathType: "Prefix" + backend: + service: + name: "cm-service" + port: + number: 8080 diff --git a/applications/cm-service/templates/vault-secrets.yaml b/applications/cm-service/templates/vault-secrets.yaml index 26f72b46e6..996a6617d8 100644 --- a/applications/cm-service/templates/vault-secrets.yaml +++ b/applications/cm-service/templates/vault-secrets.yaml @@ -1,18 +1,5 @@ apiVersion: ricoberger.de/v1alpha1 kind: VaultSecret -metadata: - name: redis-secret - labels: - {{- include "cm-service.labels" . | nindent 4 }} -spec: - path: "{{ .Values.global.vaultSecretsPath }}/cm-service" - templates: - password: >- - {% index .Secrets "redis-password" %} - type: Opaque ---- -apiVersion: ricoberger.de/v1alpha1 -kind: VaultSecret metadata: name: postgres-secret labels: diff --git a/applications/cm-service/templates/worker-deployment.yaml b/applications/cm-service/templates/worker-deployment.yaml index e0bce6f8c1..3218cd651d 100644 --- a/applications/cm-service/templates/worker-deployment.yaml +++ b/applications/cm-service/templates/worker-deployment.yaml @@ -27,14 +27,6 @@ spec: automountServiceAccountToken: false containers: - name: "cm-service-worker" - env: - - name: CM_ARQ_REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-secret - key: password - - name: CM_ARQ_REDIS_URL - value: "redis://cm-service-redis/1" image: "{{ .Values.worker.image.repository }}:{{ .Values.worker.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} resources: diff --git a/applications/cm-service/values-usdf-cm-dev.yaml b/applications/cm-service/values-usdf-cm-dev.yaml new file mode 100644 index 0000000000..e7b42a3f33 --- /dev/null +++ b/applications/cm-service/values-usdf-cm-dev.yaml @@ -0,0 +1,31 @@ +config: + logLevel: "INFO" + logProfile: "development" + databaseEcho: true + outputVolume: + storageClassName: "sdf-data-rubin" + subPath: "shared/campaigns/users/usdf-cm-prod" +worker: + htcondor: + config: + mountPath: "/home/lsstsvc1/stack/conda/envs/lsst-scipipe-9.0.0/etc/condor/config.d" + contents: | + CONDOR_HOST = sdfiana012.sdf.slac.stanford.edu + COLLECTOR_HOST = sdfiana012.sdf.slac.stanford.edu + SEC_CLIENT_AUTHENTICATION_METHODS = FS, FS_REMOTE + use security:recommended_v9_0 + SEC_DEFAULT_AUTHENTICATION_METHODS = FS_REMOTE, IDTOKENS, FS + SEC_DAEMON_AUTHENTICATION_METHODS = FS_REMOTE, IDTOKENS, FS + SEC_READ_AUTHENTICATION_METHODS = FS_REMOTE, IDTOKENS, FS + FS_REMOTE_DIR = /sdf/group/rubin/services/htcondor/shared + SCHEDD_ADDRESS_FILE = /config/schedd-address + fsRemoteDir: + storageClassName: "sdf-group-rubin" + subPath: "services/htcondor/shared" + mountPath: "/sdf/group/rubin/services/htcondor/shared" + scheddAddress: + mountPath: "/config" + contents: | + <172.24.49.173:5935?addrs=172.24.49.173-5935&alias=sdfiana012.sdf.slac.stanford.edu> + $CondorVersion: 23.0.12 2024-06-13 BuildID: 739441 PackageID: 23.0.12-1 $ + $CondorPlatform: x86_64_AlmaLinux8 $ diff --git a/applications/cm-service/values.yaml b/applications/cm-service/values.yaml index 363b8a4e25..ae260853f4 100644 --- a/applications/cm-service/values.yaml +++ b/applications/cm-service/values.yaml @@ -129,15 +129,6 @@ worker: # -- If specified, location for htcondor schedd address file to be injected into worker pods contents: null -redis: - config: - # -- Name of secret containing Redis password - secretName: "redis-secret" - - # -- Key inside secret from which to get the Redis password (do not - # change) - secretKey: "password" - # The following will be set by parameters injected by Argo CD and should not # be set in the individual environment values files. global: diff --git a/applications/consdb/secrets.yaml b/applications/consdb/secrets.yaml index 61468589eb..99a8f6ba13 100644 --- a/applications/consdb/secrets.yaml +++ b/applications/consdb/secrets.yaml @@ -6,7 +6,12 @@ consdb-password: key: consdb-password oods-password: description: >- - PostgreSQL password for the OODS user Butler database. + PostgreSQL password for the OODS user Butler database. lfa-password: description: >- LFA password +exposurelog-password: + description: "Password for the TTS where we use exposurelog database." + copy: + application: exposure-log + key: exposurelog_password diff --git a/applications/consdb/values-tucson-teststand.yaml b/applications/consdb/values-tucson-teststand.yaml new file mode 100644 index 0000000000..21997de89d --- /dev/null +++ b/applications/consdb/values-tucson-teststand.yaml @@ -0,0 +1,21 @@ +db: + user: "oods" + host: "postgresdb01.tu.lsst.org" + database: "exposurelog" +lfa: + s3EndpointUrl: "https://s3.tu.lsst.org" +hinfo: + latiss: + enable: true + tag: "tickets-DM-44551" + logConfig: "consdb.hinfo=DEBUG" + lsstcomcam: + enable: true + tag: "tickets-DM-44551" + logConfig: "consdb.hinfo=DEBUG" + lsstcam: + enable: false + tag: "tickets-DM-44551" +pq: + image: + tag: "main" diff --git a/applications/exposurelog/Chart.yaml b/applications/exposurelog/Chart.yaml index c1a84f7c27..5b095cfd61 100644 --- a/applications/exposurelog/Chart.yaml +++ b/applications/exposurelog/Chart.yaml @@ -12,4 +12,4 @@ version: 1.0.0 # number should be incremented each time you make changes to the # application. Versions are not expected to follow Semantic Versioning. They # should reflect the version the application is using. -appVersion: 1.2.1 +appVersion: 1.3.0 diff --git a/applications/exposurelog/README.md b/applications/exposurelog/README.md index be173d896a..927c35f2f7 100644 --- a/applications/exposurelog/README.md +++ b/applications/exposurelog/README.md @@ -32,6 +32,7 @@ Log messages related to an exposure | db.host | string | `"postgres.postgres"` | database host | | db.port | int | `5432` | database port | | db.user | string | `"exposurelog"` | database user | +| env | list | `[]` | Environment variables to set in the exposurelog pod | | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | @@ -46,5 +47,6 @@ Log messages related to an exposure | podSecurityContext | object | `{}` | Security context for the exposurelog pod | | replicaCount | int | `1` | How many exposurelog pods to run | | resources | object | `{}` | Resource limits and requests for the exposurelog pod | +| secretEnv | list | `[]` | Additional secret environment variables to set in the exposurelog pod | | securityContext | object | `{}` | Security context for the exposurelog deployment | | tolerations | list | `[]` | Tolerations for the exposurelog pod | diff --git a/applications/exposurelog/secrets-usdfdev.yaml b/applications/exposurelog/secrets-usdfdev.yaml new file mode 100644 index 0000000000..317e9c5aab --- /dev/null +++ b/applications/exposurelog/secrets-usdfdev.yaml @@ -0,0 +1,12 @@ +"aws-credentials.ini": + description: >- + S3 Butler credentials in AWS format. + copy: + application: nublado + key: "aws-credentials.ini" +"postgres-credentials.txt": + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. + copy: + application: nublado + key: "postgres-credentials.txt" diff --git a/applications/exposurelog/templates/deployment.yaml b/applications/exposurelog/templates/deployment.yaml index 775d9a7635..f738aaa49c 100644 --- a/applications/exposurelog/templates/deployment.yaml +++ b/applications/exposurelog/templates/deployment.yaml @@ -57,18 +57,11 @@ spec: value: {{ .Values.config.butler_uri_2 | quote }} - name: EXPOSURELOG_DB_USER value: {{ .Values.db.user | quote }} - - name: PGUSER - value: {{ .Values.db.user | quote }} - name: EXPOSURELOG_DB_PASSWORD valueFrom: secretKeyRef: name: exposurelog key: exposurelog_password - - name: PGPASSWORD - valueFrom: - secretKeyRef: - name: exposurelog - key: exposurelog_password - name: EXPOSURELOG_DB_HOST value: {{ .Values.db.host | quote }} - name: EXPOSURELOG_DB_PORT @@ -77,6 +70,17 @@ spec: value: {{ .Values.db.database | quote }} - name: SITE_ID value: {{ .Values.config.site_id | quote }} + {{- range .Values.env }} + - name: {{ .name }} + value: {{ .value | quote }} + {{- end }} + {{- range .Values.secretEnv }} + - name: {{ .name }} + valueFrom: + secretKeyRef: + name: {{ .secretName }} + key: {{ .secretKey }} + {{- end }} volumeMounts: {{- if .Values.config.nfs_path_1 }} - name: volume1 @@ -86,8 +90,25 @@ spec: - name: volume2 mountPath: /volume_2 {{- end }} + - name: user-secrets + mountPath: /var/secrets/butler - name: tmp mountPath: /tmp + initContainers: + - name: secret-perm-fixer + image: busybox + command: + - "/bin/sh" + - "-c" + - | + cp /secrets/* /etc/secrets && \ + chown 1000:1000 /etc/secrets/* && \ + chmod 0400 /etc/secrets/* + volumeMounts: + - name: butler-secrets + mountPath: /secrets + - name: user-secrets + mountPath: /etc/secrets volumes: {{- if .Values.config.nfs_path_1 }} - name: volume1 @@ -110,6 +131,12 @@ spec: readOnly: true server: {{ .Values.config.nfs_server_3 }} {{- end }} + - name: butler-secrets + secret: + defaultMode: 420 + secretName: exposurelog + - name: user-secrets + emptyDir: {} - name: tmp emptyDir: {} {{- with .Values.nodeSelector }} diff --git a/applications/exposurelog/values-base.yaml b/applications/exposurelog/values-base.yaml index c3ff786c6e..3aff3ea83a 100644 --- a/applications/exposurelog/values-base.yaml +++ b/applications/exposurelog/values-base.yaml @@ -6,3 +6,15 @@ config: db: host: postgresdb01.ls.lsst.org + +# We use the same database user and password defined on the db object +# in the values.yaml file. This is due to telescope deployments +# are not using butler access which requires a different user and password. +env: + - name: PGUSER + value: exposurelog + +secretEnv: + - name: PGPASSWORD + secretName: exposurelog + secretKey: exposurelog_password diff --git a/applications/exposurelog/values-summit.yaml b/applications/exposurelog/values-summit.yaml index 636150ebec..61ba1cf848 100644 --- a/applications/exposurelog/values-summit.yaml +++ b/applications/exposurelog/values-summit.yaml @@ -1,7 +1,7 @@ config: site_id: summit - nfs_path_1: /repo/LSSTComCam # Mounted as /volume_1 - nfs_server_1: comcam-archiver.cp.lsst.org + nfs_path_1: /comcam/repo/LSSTComCam # Mounted as /volume_1 + nfs_server_1: nfs3.cp.lsst.org butler_uri_1: /volume_1 nfs_path_2: /auxtel/repo/LATISS # Mounted as /volume_2 @@ -9,3 +9,15 @@ config: butler_uri_2: /volume_2 db: host: postgresdb01.cp.lsst.org + +# We use the same database user and password defined on the db object +# in the values.yaml file. This is due to telescope deployments +# are not using butler access which requires a different user and password. +env: + - name: PGUSER + value: exposurelog + +secretEnv: + - name: PGPASSWORD + secretName: exposurelog + secretKey: exposurelog_password diff --git a/applications/exposurelog/values-tucson-teststand.yaml b/applications/exposurelog/values-tucson-teststand.yaml index 94a3159b2f..9a9f75c408 100644 --- a/applications/exposurelog/values-tucson-teststand.yaml +++ b/applications/exposurelog/values-tucson-teststand.yaml @@ -9,3 +9,15 @@ config: butler_uri_2: /volume_2 db: host: postgresdb01.tu.lsst.org + +# We use the same database user and password defined on the db object +# in the values.yaml file. This is due to telescope deployments +# are not using butler access which requires a different user and password. +env: + - name: PGUSER + value: exposurelog + +secretEnv: + - name: PGPASSWORD + secretName: exposurelog + secretKey: exposurelog_password diff --git a/applications/exposurelog/values-usdfdev.yaml b/applications/exposurelog/values-usdfdev.yaml index 5153d2fde7..e914a0e17f 100644 --- a/applications/exposurelog/values-usdfdev.yaml +++ b/applications/exposurelog/values-usdfdev.yaml @@ -1,6 +1,17 @@ config: site_id: usdfdev - butler_uri_1: s3://rubin-summit-users/butler.yaml + butler_uri_1: s3://embargo@rubin-summit-users/butler.yaml db: host: usdf-summitdb.slac.stanford.edu user: usdf +env: + - name: AWS_SHARED_CREDENTIALS_FILE + value: "/var/secrets/butler/aws-credentials.ini" + - name: DAF_BUTLER_REPOSITORY_INDEX + value: "/project/data-repos.yaml" + - name: S3_ENDPOINT_URL + value: "https://s3dfrgw.slac.stanford.edu" + - name: PGPASSFILE + value: "/var/secrets/butler/postgres-credentials.txt" + - name: PGUSER + value: "rubin" diff --git a/applications/exposurelog/values.yaml b/applications/exposurelog/values.yaml index 15be2fd7df..ece7625737 100644 --- a/applications/exposurelog/values.yaml +++ b/applications/exposurelog/values.yaml @@ -86,6 +86,12 @@ config: # Sandboxes should use `test`. site_id: "" +# -- Environment variables to set in the exposurelog pod +env: [] + +# -- Additional secret environment variables to set in the exposurelog pod +secretEnv: [] + # -- Annotations for the exposurelog pod podAnnotations: {} diff --git a/applications/gafaelfawr/templates/serviceaccount.yaml b/applications/gafaelfawr/templates/serviceaccount.yaml index aa35285b29..acf07b2ed2 100644 --- a/applications/gafaelfawr/templates/serviceaccount.yaml +++ b/applications/gafaelfawr/templates/serviceaccount.yaml @@ -15,7 +15,6 @@ metadata: name: "gafaelfawr-schema-update" labels: {{- include "gafaelfawr.labels" . | nindent 4 }} - annotations: annotations: helm.sh/hook: "pre-install,pre-upgrade" helm.sh/hook-delete-policy: "hook-succeeded" diff --git a/applications/gafaelfawr/values-idfprod.yaml b/applications/gafaelfawr/values-idfprod.yaml index ef48fe0314..f9148ef05d 100644 --- a/applications/gafaelfawr/values-idfprod.yaml +++ b/applications/gafaelfawr/values-idfprod.yaml @@ -28,6 +28,14 @@ config: firestore: project: "rsp-firestore-stable-e8eb" + # This environment provides authentication services to IDACs. + oidcServer: + enabled: true + dataRightsMapping: + g_users: + - "dp0.2" + - "dp0.3" + # Support generating user metadata for CADC authentication code. cadcBaseUuid: "5f0eb655-0e72-4948-a6a5-a94c0be9019f" diff --git a/applications/gafaelfawr/values-roe.yaml b/applications/gafaelfawr/values-roe.yaml index f53b9e0ead..f3914a1d96 100644 --- a/applications/gafaelfawr/values-roe.yaml +++ b/applications/gafaelfawr/values-roe.yaml @@ -8,6 +8,9 @@ config: github: clientId: "10172b4db1b67ee31620" + # Support generating user metadata for CADC authentication code. + cadcBaseUuid: "4cb5f948-aad9-466c-837b-5eae565b0a77" + # Allow access by GitHub team. groupMapping: "exec:admin": diff --git a/applications/gafaelfawr/values-usdf-cm-dev.yaml b/applications/gafaelfawr/values-usdf-cm-dev.yaml new file mode 100644 index 0000000000..18f741fc04 --- /dev/null +++ b/applications/gafaelfawr/values-usdf-cm-dev.yaml @@ -0,0 +1,223 @@ +replicaCount: 2 + +# Use the CSI storage class so that we can use snapshots. +redis: + persistence: + storageClass: "wekafs--sdf-k8s01" + +config: + internalDatabase: true + + oidcServer: + enabled: true + + oidc: + clientId: vcluster--usdf-cm-dev + audience: "vcluster--usdf-cm-dev" + loginUrl: "https://dex.slac.stanford.edu/auth" + tokenUrl: "https://dex.slac.stanford.edu/token" + issuer: "https://dex.slac.stanford.edu" + scopes: + - "openid" + - "email" + - "groups" + - "profile" + usernameClaim: "name" + + ldap: + url: ldaps://ldap-unix.slac.stanford.edu:636 + groupBaseDn: ou=Group,dc=slac,dc=stanford,dc=edu + groupObjectClass: posixGroup + groupMemberAttr: memberUid + groupSearchByDn: false + userBaseDn: ou=Accounts,dc=slac,dc=stanford,dc=edu + userSearchAttr: uid + addUserGroup: false + uidAttr: uidNumber + gidAttr: gidNumber + nameAttr: gecos + + groupMapping: + "admin:token": + - "rubinmgr" + - "unix-admin" + "exec:admin": + - "rubinmgr" + - "unix-admin" + "exec:internal-tools": + - "lsst" + - "lsst-ccs" + - "rubin_users" + - "rubin_users-a" + - "rubin_users-b" + - "rubin_users-c" + - "rubin_users-d" + - "rubin_users-e" + - "rubin_users-f" + - "rubin_users-g" + - "rubin_users-h" + - "rubin_users-i" + - "rubin_users-j" + - "rubin_users-k" + - "rubin_users-l" + - "rubin_users-m" + - "rubin_users-n" + - "rubin_users-o" + - "rubin_users-p" + - "rubin_users-q" + - "rubin_users-r" + - "rubin_users-s" + - "rubin_users-t" + - "rubin_users-u" + - "rubin_users-v" + - "rubin_users-w" + - "rubin_users-x" + - "rubin_users-y" + - "rubin_users-z" + - "rubin_admin_datasets" + - "rubin_admin_repos" + - "unix-admin" + "exec:notebook": + - "lsst" + - "lsst-ccs" + - "rubin_users" + - "rubin_users-a" + - "rubin_users-b" + - "rubin_users-c" + - "rubin_users-d" + - "rubin_users-e" + - "rubin_users-f" + - "rubin_users-g" + - "rubin_users-h" + - "rubin_users-i" + - "rubin_users-j" + - "rubin_users-k" + - "rubin_users-l" + - "rubin_users-m" + - "rubin_users-n" + - "rubin_users-o" + - "rubin_users-p" + - "rubin_users-q" + - "rubin_users-r" + - "rubin_users-s" + - "rubin_users-t" + - "rubin_users-u" + - "rubin_users-v" + - "rubin_users-w" + - "rubin_users-x" + - "rubin_users-y" + - "rubin_users-z" + - "rubin_admin_datasets" + - "rubin_admin_repos" + - "unix-admin" + "exec:portal": + - "lsst" + - "lsst-ccs" + - "rubin_users" + - "rubin_users-a" + - "rubin_users-b" + - "rubin_users-c" + - "rubin_users-d" + - "rubin_users-e" + - "rubin_users-f" + - "rubin_users-g" + - "rubin_users-h" + - "rubin_users-i" + - "rubin_users-j" + - "rubin_users-k" + - "rubin_users-l" + - "rubin_users-m" + - "rubin_users-n" + - "rubin_users-o" + - "rubin_users-p" + - "rubin_users-q" + - "rubin_users-r" + - "rubin_users-s" + - "rubin_users-t" + - "rubin_users-u" + - "rubin_users-v" + - "rubin_users-w" + - "rubin_users-x" + - "rubin_users-y" + - "rubin_users-z" + - "rubin_admin_datasets" + - "rubin_admin_repos" + - "unix-admin" + "read:tap": + - "lsst" + - "lsst-ccs" + - "rubin_users" + - "rubin_users-a" + - "rubin_users-b" + - "rubin_users-c" + - "rubin_users-d" + - "rubin_users-e" + - "rubin_users-f" + - "rubin_users-g" + - "rubin_users-h" + - "rubin_users-i" + - "rubin_users-j" + - "rubin_users-k" + - "rubin_users-l" + - "rubin_users-m" + - "rubin_users-n" + - "rubin_users-o" + - "rubin_users-p" + - "rubin_users-q" + - "rubin_users-r" + - "rubin_users-s" + - "rubin_users-t" + - "rubin_users-u" + - "rubin_users-v" + - "rubin_users-w" + - "rubin_users-x" + - "rubin_users-y" + - "rubin_users-z" + - "rubin_admin_datasets" + - "rubin_admin_repos" + - "unix-admin" + "read:image": + - "lsst" + - "lsst-ccs" + - "rubin_users" + - "rubin_users-a" + - "rubin_users-b" + - "rubin_users-c" + - "rubin_users-d" + - "rubin_users-e" + - "rubin_users-f" + - "rubin_users-g" + - "rubin_users-h" + - "rubin_users-i" + - "rubin_users-j" + - "rubin_users-k" + - "rubin_users-l" + - "rubin_users-m" + - "rubin_users-n" + - "rubin_users-o" + - "rubin_users-p" + - "rubin_users-q" + - "rubin_users-r" + - "rubin_users-s" + - "rubin_users-t" + - "rubin_users-u" + - "rubin_users-v" + - "rubin_users-w" + - "rubin_users-x" + - "rubin_users-y" + - "rubin_users-z" + - "rubin_admin_datasets" + - "rubin_admin_repos" + - "unix-admin" + "write:sasquatch": + - "rubinmgr" + - "unix-admin" + + initialAdmins: + - "afausti" + - "athor" + - "frossie" + - "jonathansick" + - "rra" + - "ytl" + - "ppascual" diff --git a/applications/ghostwriter/.helmignore b/applications/ghostwriter/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/applications/ghostwriter/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/applications/ghostwriter/Chart.yaml b/applications/ghostwriter/Chart.yaml new file mode 100644 index 0000000000..8d923876b0 --- /dev/null +++ b/applications/ghostwriter/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v2 +appVersion: 0.1.1 +description: URL rewriter/personalizer +name: ghostwriter +sources: +- https://github.com/lsst-sqre/ghostwriter +type: application +version: 1.0.0 diff --git a/applications/ghostwriter/README.md b/applications/ghostwriter/README.md new file mode 100644 index 0000000000..ef7ad713f4 --- /dev/null +++ b/applications/ghostwriter/README.md @@ -0,0 +1,29 @@ +# ghostwriter + +URL rewriter/personalizer + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the ghostwriter deployment pod | +| config | object | `{"debug":false,"slackAlerts":false}` | ghostwriter configuration | +| config.debug | bool | `false` | If set to true, enable verbose logging and disable structured JSON logging | +| config.slackAlerts | bool | `false` | Whether to send alerts and status to Slack. | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the ghostwriter image | +| image.repository | string | `"ghcr.io/lsst-sqre/ghostwriter"` | Image to use in the ghostwriter deployment | +| image.tag | string | The appVersion of the chart | Tag of image to use | +| ingress.annotations | object | `{}` | Additional annotations for the ingress rule | +| mapping | object | `{"routes":[]}` | ghostwriter URL mapping | +| nodeSelector | object | `{}` | Node selection rules for the ghostwriter deployment pod | +| podAnnotations | object | `{}` | Annotations for the ghostwriter deployment pod | +| replicaCount | int | `1` | Number of web deployment pods to start | +| resources | object | See `values.yaml` | Resource limits and requests for the ghostwriter deployment pod | +| tolerations | list | `[]` | Tolerations for the ghostwriter deployment pod | diff --git a/applications/ghostwriter/secrets.yaml b/applications/ghostwriter/secrets.yaml new file mode 100644 index 0000000000..e0f4154904 --- /dev/null +++ b/applications/ghostwriter/secrets.yaml @@ -0,0 +1,8 @@ +slack-webhook: + description: >- + Slack web hook used to report internal errors to Slack. This secret may be + changed at any time. + if: config.slackAlerts + copy: + application: mobu + key: app-alert-webhook diff --git a/applications/ghostwriter/templates/_helpers.tpl b/applications/ghostwriter/templates/_helpers.tpl new file mode 100644 index 0000000000..51a900690a --- /dev/null +++ b/applications/ghostwriter/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ghostwriter.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "ghostwriter.labels" -}} +helm.sh/chart: {{ include "ghostwriter.chart" . }} +{{ include "ghostwriter.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "ghostwriter.selectorLabels" -}} +app.kubernetes.io/name: "ghostwriter" +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/ghostwriter/templates/configmap.yaml b/applications/ghostwriter/templates/configmap.yaml new file mode 100644 index 0000000000..ba63bffdea --- /dev/null +++ b/applications/ghostwriter/templates/configmap.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "ghostwriter-config" + labels: + {{- include "ghostwriter.labels" . | nindent 4 }} +data: + routing.yaml: |- + {{- toYaml .Values.mapping | nindent 4 }} + config.yaml: |- + # Empty: values will be taken from environment diff --git a/applications/ghostwriter/templates/deployment.yaml b/applications/ghostwriter/templates/deployment.yaml new file mode 100644 index 0000000000..8c945dc7b4 --- /dev/null +++ b/applications/ghostwriter/templates/deployment.yaml @@ -0,0 +1,82 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "ghostwriter" + labels: + {{- include "ghostwriter.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "ghostwriter.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "ghostwriter.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + automountServiceAccountToken: false + containers: + - name: {{ .Chart.Name }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + env: + - name: "GHOSTWRITER_ENVIRONMENT_URL" + value: {{ .Values.global.baseUrl | quote }} + {{- if .Values.config.slackAlerts }} + - name: "GHOSTWRITER_ALERT_HOOK" + valueFrom: + secretKeyRef: + name: "ghostwriter-secret" + key: "slack-webhook" + {{- end }} + {{- if .Values.config.debug }} + - name: GHOSTWRITER_LOG_LEVEL + value: "DEBUG" + - name: GHOSTWRITER_LOGGING_PROFILE + value: "development" + {{- end }} + ports: + - name: "http" + containerPort: 8080 + protocol: "TCP" + readinessProbe: + httpGet: + path: "/" + port: "http" + resources: + {{- toYaml .Values.resources | nindent 12 }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: true + volumeMounts: + - name: "config" + mountPath: "/etc/ghostwriter" + readOnly: true + volumes: + - name: "config" + configMap: + name: "ghostwriter-config" + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 diff --git a/applications/ghostwriter/templates/ingress-toplevel.yaml b/applications/ghostwriter/templates/ingress-toplevel.yaml new file mode 100644 index 0000000000..fb8ebbf3b5 --- /dev/null +++ b/applications/ghostwriter/templates/ingress-toplevel.yaml @@ -0,0 +1,37 @@ +{{- $root := . -}} +{{- range $route := $root.Values.mapping.routes }} +{{- $source := $route.source_prefix | trimAll "/" }} +{{- $res_src := trimPrefix "/" $source | replace "/" "-" }} +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "ghostwriter-{{ $res_src }}" +config: + baseUrl: {{ $root.Values.global.baseUrl | quote }} + scopes: + all: + - "read:image" + delegate: + notebook: {} +template: + metadata: + name: "ghostwriter-{{ $res_src }}" + annotations: + nginx.ingress.kubernetes.io/rewrite-target: "/ghostwriter/rewrite/$1" + {{- with $root.Values.ingress.annotations }} + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" $root.Values.global.host | quote }} + http: + paths: + - path: "/({{ $source }}/.*)" + pathType: "ImplementationSpecific" + backend: + service: + name: "ghostwriter" + port: + number: 8080 +--- +{{- end }} diff --git a/applications/ghostwriter/templates/ingress.yaml b/applications/ghostwriter/templates/ingress.yaml new file mode 100644 index 0000000000..1570a890a7 --- /dev/null +++ b/applications/ghostwriter/templates/ingress.yaml @@ -0,0 +1,32 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "ghostwriter" + labels: + {{- include "ghostwriter.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + all: + - "read:image" + delegate: + notebook: {} +template: + metadata: + name: "ghostwriter" + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: "/ghostwriter" + pathType: "Prefix" + backend: + service: + name: "ghostwriter" + port: + number: 8080 diff --git a/applications/ghostwriter/templates/networkpolicy.yaml b/applications/ghostwriter/templates/networkpolicy.yaml new file mode 100644 index 0000000000..b4a5ecb1e5 --- /dev/null +++ b/applications/ghostwriter/templates/networkpolicy.yaml @@ -0,0 +1,21 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "ghostwriter" +spec: + podSelector: + matchLabels: + {{- include "ghostwriter.selectorLabels" . | nindent 6 }} + policyTypes: + - "Ingress" + ingress: + # Allow inbound access from pods (in any namespace) labeled + # gafaelfawr.lsst.io/ingress: true. + - from: + - namespaceSelector: {} + podSelector: + matchLabels: + gafaelfawr.lsst.io/ingress: "true" + ports: + - protocol: "TCP" + port: 8080 diff --git a/applications/ghostwriter/templates/service.yaml b/applications/ghostwriter/templates/service.yaml new file mode 100644 index 0000000000..ced6204a96 --- /dev/null +++ b/applications/ghostwriter/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: "ghostwriter" + labels: + {{- include "ghostwriter.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - port: 8080 + targetPort: "http" + protocol: "TCP" + name: "http" + selector: + {{- include "ghostwriter.selectorLabels" . | nindent 4 }} diff --git a/applications/ghostwriter/templates/vault-secrets.yaml b/applications/ghostwriter/templates/vault-secrets.yaml new file mode 100644 index 0000000000..785ca96b8a --- /dev/null +++ b/applications/ghostwriter/templates/vault-secrets.yaml @@ -0,0 +1,11 @@ +{{- if .Values.config.slackAlerts }} +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: ghostwriter-secret + labels: + {{- include "ghostwriter.labels" . | nindent 4 }} +spec: + path: "{{ .Values.global.vaultSecretsPath }}/ghostwriter" + type: "Opaque" +{{- end }} diff --git a/applications/ghostwriter/values-idfdev.yaml b/applications/ghostwriter/values-idfdev.yaml new file mode 100644 index 0000000000..a2ee3d93ca --- /dev/null +++ b/applications/ghostwriter/values-idfdev.yaml @@ -0,0 +1,20 @@ +config: + # slackAlerts: true + debug: true +mapping: + routes: + - source_prefix: "/queries/" + target: "${base_url}/nb/user/${user}/lab/tree/notebooks/queries/portal_${path}.ipynb" + hooks: + - "ensure_running_lab" + - "portal_query" + - source_prefix: "/notebooks/github.com/" + target: "${base_url}/nb/user/${user}/lab/tree/notebooks/on-demand/github.com/${path}.ipynb" + hooks: + - "ensure_running_lab" + - "github_notebook" + # Two convenience routes that themselves just use the github_notebook hook + - source_prefix: "/system-test/" + target: "${base_url}/notebooks/github.com/lsst-sqre/system-test/${path}" + - source_prefix: "/tutorials/" + target: "${base_url}/notebooks/github.com/rubin-dp0/tutorial-notebooks/${path}" diff --git a/applications/ghostwriter/values-idfint.yaml b/applications/ghostwriter/values-idfint.yaml new file mode 100644 index 0000000000..364d12c05b --- /dev/null +++ b/applications/ghostwriter/values-idfint.yaml @@ -0,0 +1,17 @@ +mapping: + routes: + - source_prefix: "/queries/" + target: "${base_url}/nb/user/${user}/lab/tree/notebooks/queries/portal_${path}.ipynb" + hooks: + - "ensure_running_lab" + - "portal_query" + - source_prefix: "/notebooks/github.com/" + target: "${base_url}/nb/user/${user}/lab/tree/notebooks/on-demand/github.com/${path}.ipynb" + hooks: + - "ensure_running_lab" + - "github_notebook" + # Two convenience routes that themselves just use the github_notebook hook + - source_prefix: "/system-test/" + target: "${base_url}/notebooks/github.com/lsst-sqre/system-test/${path}" + - source_prefix: "/tutorials/" + target: "${base_url}/notebooks/github.com/rubin-dp0/tutorial-notebooks/${path}" diff --git a/applications/ghostwriter/values-idfprod.yaml b/applications/ghostwriter/values-idfprod.yaml new file mode 100644 index 0000000000..364d12c05b --- /dev/null +++ b/applications/ghostwriter/values-idfprod.yaml @@ -0,0 +1,17 @@ +mapping: + routes: + - source_prefix: "/queries/" + target: "${base_url}/nb/user/${user}/lab/tree/notebooks/queries/portal_${path}.ipynb" + hooks: + - "ensure_running_lab" + - "portal_query" + - source_prefix: "/notebooks/github.com/" + target: "${base_url}/nb/user/${user}/lab/tree/notebooks/on-demand/github.com/${path}.ipynb" + hooks: + - "ensure_running_lab" + - "github_notebook" + # Two convenience routes that themselves just use the github_notebook hook + - source_prefix: "/system-test/" + target: "${base_url}/notebooks/github.com/lsst-sqre/system-test/${path}" + - source_prefix: "/tutorials/" + target: "${base_url}/notebooks/github.com/rubin-dp0/tutorial-notebooks/${path}" diff --git a/applications/ghostwriter/values.yaml b/applications/ghostwriter/values.yaml new file mode 100644 index 0000000000..75247c6676 --- /dev/null +++ b/applications/ghostwriter/values.yaml @@ -0,0 +1,73 @@ +# Default values for ghostwriter. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- ghostwriter configuration +config: + # -- Whether to send alerts and status to Slack. + slackAlerts: false + + # -- If set to true, enable verbose logging and disable structured JSON + # logging + debug: false + +# -- ghostwriter URL mapping +mapping: + # routes for URL rewriting + # @default -- None; must be set for each environment + routes: [] + +# -- Number of web deployment pods to start +replicaCount: 1 + +image: + # -- Image to use in the ghostwriter deployment + repository: "ghcr.io/lsst-sqre/ghostwriter" + + # -- Pull policy for the ghostwriter image + pullPolicy: "IfNotPresent" + + # -- Tag of image to use + # @default -- The appVersion of the chart + tag: null + +ingress: + # -- Additional annotations for the ingress rule + annotations: {} + +# -- Affinity rules for the ghostwriter deployment pod +affinity: {} + +# -- Node selection rules for the ghostwriter deployment pod +nodeSelector: {} + +# -- Annotations for the ghostwriter deployment pod +podAnnotations: {} + +# -- Resource limits and requests for the ghostwriter deployment pod +# @default -- See `values.yaml` +resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "10m" + memory: "128Mi" + +# -- Tolerations for the ghostwriter deployment pod +tolerations: [] + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: null + + # -- Host name for ingress + # @default -- Set by Argo CD + host: null + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: null diff --git a/applications/kubernetes-replicator/Chart.yaml b/applications/kubernetes-replicator/Chart.yaml index 27c1677bfb..335507f312 100644 --- a/applications/kubernetes-replicator/Chart.yaml +++ b/applications/kubernetes-replicator/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/mittwald/kubernetes-replicator dependencies: - name: kubernetes-replicator - version: 2.10.1 + version: 2.10.2 repository: https://helm.mittwald.de diff --git a/applications/love/README.md b/applications/love/README.md index ac03b1cff6..fae75a25ca 100644 --- a/applications/love/README.md +++ b/applications/love/README.md @@ -17,6 +17,7 @@ Deployment for the LSST Operators Visualization Environment | global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| love-manager.manager | object | `{"frontend":{"affinity":{},"autoscaling":{"enabled":true,"maxReplicas":100,"minReplicas":1,"scaleDownPolicy":{},"scaleUpPolicy":{},"targetCPUUtilizationPercentage":80,"targetMemoryUtilizationPercentage":""},"env":{"AUTH_LDAP_1_SERVER_URI":"ldap://ipa1.lsst.local","AUTH_LDAP_2_SERVER_URI":"ldap://ipa2.lsst.local","AUTH_LDAP_3_SERVER_URI":"ldap://ipa3.lsst.local","COMMANDER_HOSTNAME":"love-commander-service","COMMANDER_PORT":5000,"DB_ENGINE":"postgresql","DB_HOST":"love-manager-database-service","DB_NAME":"love","DB_PORT":5432,"DB_USER":"love","JIRA_API_HOSTNAME":"rubinobs.atlassian.net","JIRA_PROJECT_ID":10063,"LOVE_PRODUCER_WEBSOCKET_HOST":"love-service/manager/ws/subscription","LOVE_SITE":"local","OLE_API_HOSTNAME":"site.lsst.local","REDIS_CONFIG_CAPACITY":5000,"REDIS_CONFIG_EXPIRY":5,"REDIS_HOST":"love-manager-redis-service","REMOTE_STORAGE":true,"SERVER_URL":"love.lsst.local","URL_SUBPATH":"/love"},"envSecrets":{"ADMIN_USER_PASS":"admin-user-pass","AUTHLIST_USER_PASS":"authlist-user-pass","AUTH_LDAP_BIND_PASSWORD":"auth-ldap-bind-password","CMD_USER_PASS":"cmd-user-pass","DB_PASS":"db-pass","JIRA_API_TOKEN":"jira-api-token","PROCESS_CONNECTION_PASS":"process-connection-pass","REDIS_PASS":"redis-pass","SECRET_KEY":"manager-secret-key","USER_USER_PASS":"user-user-pass"},"image":{"nexus3":"","pullPolicy":"IfNotPresent","repository":"lsstts/love-manager"},"nodeSelector":{},"ports":{"container":8000,"node":30000},"readinessProbe":{},"replicas":1,"resources":{},"tolerations":[]},"producers":[{"affinity":{},"autoscaling":{"enabled":true,"maxReplicas":100,"minReplicas":1,"scaleDownPolicy":{},"scaleUpPolicy":{},"targetCPUUtilizationPercentage":80,"targetMemoryUtilizationPercentage":""},"env":{"AUTH_LDAP_1_SERVER_URI":"ldap://ipa1.lsst.local","AUTH_LDAP_2_SERVER_URI":"ldap://ipa2.lsst.local","AUTH_LDAP_3_SERVER_URI":"ldap://ipa3.lsst.local","COMMANDER_HOSTNAME":"love-commander-service","COMMANDER_PORT":5000,"DB_ENGINE":"postgresql","DB_HOST":"love-manager-database-service","DB_NAME":"love","DB_PORT":5432,"DB_USER":"love","HEARTBEAT_QUERY_COMMANDER":false,"JIRA_API_HOSTNAME":"rubinobs.atlassian.net","JIRA_PROJECT_ID":10063,"LOVE_SITE":"local","OLE_API_HOSTNAME":"site.lsst.local","REDIS_CONFIG_CAPACITY":5000,"REDIS_CONFIG_EXPIRY":5,"REDIS_HOST":"love-manager-redis-service","REMOTE_STORAGE":true,"SERVER_URL":"love.lsst.local","URL_SUBPATH":"/love"},"envSecrets":{"ADMIN_USER_PASS":"admin-user-pass","AUTHLIST_USER_PASS":"authlist-user-pass","AUTH_LDAP_BIND_PASSWORD":"auth-ldap-bind-password","CMD_USER_PASS":"cmd-user-pass","DB_PASS":"db-pass","JIRA_API_TOKEN":"jira-api-token","PROCESS_CONNECTION_PASS":"process-connection-pass","REDIS_PASS":"redis-pass","SECRET_KEY":"manager-secret-key","USER_USER_PASS":"user-user-pass"},"image":{"nexus3":"","pullPolicy":"IfNotPresent","repository":"lsstts/love-manager"},"name":"example-producer","nodeSelector":{},"ports":{"container":8000,"node":30000},"readinessProbe":{},"replicas":1,"resources":{},"tolerations":[]}],"producers_ports":{"container":8000,"node":30000}}` | Configuration for the different manager instances. This is divided into two sessions; frontend and producers. _frontend_ Configuration for the manager frontend. The frontend session defines the configuration for the so-called frontend managers. These serves the frontend artifacts as well as handles the data piping from the system to the frontend. Every time a user opens a view in LOVE the page will connect to the frontend manager and will receive the telemetry data from the system. Once a connection is established between a frontend and the manager it is kept alive. As more connections come in, the autoscaler will scale up the number of frontend managers and new connections should be redirected to them. The redirect is handled by the manager-frontend-service ClusterIP. _producers_ Configurations for the manger producers. This is basically a list of managers (with the same structure as the frontend, but in a list). These defines services that the LOVE-producers connect to, to feed data from the control system. | | love-manager.manager.frontend.affinity | object | `{}` | Affinity rules for the LOVE manager frontend pods | | love-manager.manager.frontend.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | | love-manager.manager.frontend.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | @@ -66,55 +67,59 @@ Deployment for the LSST Operators Visualization Environment | love-manager.manager.frontend.replicas | int | `1` | Set the default number of LOVE manager frontend pod replicas | | love-manager.manager.frontend.resources | object | `{}` | Resource specifications for the LOVE manager frontend pods | | love-manager.manager.frontend.tolerations | list | `[]` | Toleration specifications for the LOVE manager frontend pods | -| love-manager.manager.producers.affinity | object | `{}` | Affinity rules for the LOVE manager producers pods | -| love-manager.manager.producers.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | -| love-manager.manager.producers.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | -| love-manager.manager.producers.autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | -| love-manager.manager.producers.autoscaling.scaleDownPolicy | object | `{}` | Policy for scaling down manager pods | -| love-manager.manager.producers.autoscaling.scaleUpPolicy | object | `{}` | Policy for scaling up manager pods | -| love-manager.manager.producers.autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | -| love-manager.manager.producers.autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | -| love-manager.manager.producers.env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | -| love-manager.manager.producers.env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | -| love-manager.manager.producers.env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | -| love-manager.manager.producers.env.COMMANDER_HOSTNAME | string | `"love-commander-service"` | Label for the LOVE commander service. Must match the one spcified in the LOVE commander chart | -| love-manager.manager.producers.env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | -| love-manager.manager.producers.env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager producers | -| love-manager.manager.producers.env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | -| love-manager.manager.producers.env.DB_NAME | string | `"love"` | The name of the database being used for the LOVE manager producers | -| love-manager.manager.producers.env.DB_PORT | int | `5432` | The port for the database service | -| love-manager.manager.producers.env.DB_USER | string | `"love"` | The database user needed for access from the LOVE manager producers | -| love-manager.manager.producers.env.HEARTBEAT_QUERY_COMMANDER | bool | `false` | Have the LOVE producer managers not query commander | -| love-manager.manager.producers.env.JIRA_API_HOSTNAME | string | `"rubinobs.atlassian.net"` | Set the hostname for the Jira instance | -| love-manager.manager.producers.env.JIRA_PROJECT_ID | int | `10063` | Set the Jira project ID | -| love-manager.manager.producers.env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | -| love-manager.manager.producers.env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | -| love-manager.manager.producers.env.REDIS_CONFIG_CAPACITY | int | `5000` | The connection capacity for the redis service | -| love-manager.manager.producers.env.REDIS_CONFIG_EXPIRY | int | `5` | The expiration time for the redis service | -| love-manager.manager.producers.env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | -| love-manager.manager.producers.env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | -| love-manager.manager.producers.env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | -| love-manager.manager.producers.env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | -| love-manager.manager.producers.envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager producers admin user password secret key name | -| love-manager.manager.producers.envSecrets.AUTHLIST_USER_PASS | string | `"authlist-user-pass"` | The LOVE manager producers authlist_user password secret key name | -| love-manager.manager.producers.envSecrets.AUTH_LDAP_BIND_PASSWORD | string | `"auth-ldap-bind-password"` | The LOVE manager producers LDAP binding password secret key name | -| love-manager.manager.producers.envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager producers cmd_user user password secret key name | -| love-manager.manager.producers.envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. Must match `database.envSecrets.POSTGRES_PASSWORD` | -| love-manager.manager.producers.envSecrets.JIRA_API_TOKEN | string | `"jira-api-token"` | The LOVE manager jira API token secret key name | -| love-manager.manager.producers.envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager producers process connection password secret key name | -| love-manager.manager.producers.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | -| love-manager.manager.producers.envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager producers secret secret key name | -| love-manager.manager.producers.envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager producers user user password secret key name | -| love-manager.manager.producers.image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | -| love-manager.manager.producers.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager producers image | -| love-manager.manager.producers.image.repository | string | `"lsstts/love-manager"` | The LOVE manager producers image to use | -| love-manager.manager.producers.nodeSelector | object | `{}` | Node selection rules for the LOVE manager producers pods | -| love-manager.manager.producers.ports.container | int | `8000` | The port on the container for normal communications | -| love-manager.manager.producers.ports.node | int | `30000` | The port on the node for normal communcations | -| love-manager.manager.producers.readinessProbe | object | `{}` | Configuration for the LOVE manager producers pods readiness probe | -| love-manager.manager.producers.replicas | int | `1` | Set the default number of LOVE manager producers pod replicas | -| love-manager.manager.producers.resources | object | `{}` | Resource specifications for the LOVE manager producers pods | -| love-manager.manager.producers.tolerations | list | `[]` | Toleration specifications for the LOVE manager producers pods | +| love-manager.manager.producers[0] | object | `{"affinity":{},"autoscaling":{"enabled":true,"maxReplicas":100,"minReplicas":1,"scaleDownPolicy":{},"scaleUpPolicy":{},"targetCPUUtilizationPercentage":80,"targetMemoryUtilizationPercentage":""},"env":{"AUTH_LDAP_1_SERVER_URI":"ldap://ipa1.lsst.local","AUTH_LDAP_2_SERVER_URI":"ldap://ipa2.lsst.local","AUTH_LDAP_3_SERVER_URI":"ldap://ipa3.lsst.local","COMMANDER_HOSTNAME":"love-commander-service","COMMANDER_PORT":5000,"DB_ENGINE":"postgresql","DB_HOST":"love-manager-database-service","DB_NAME":"love","DB_PORT":5432,"DB_USER":"love","HEARTBEAT_QUERY_COMMANDER":false,"JIRA_API_HOSTNAME":"rubinobs.atlassian.net","JIRA_PROJECT_ID":10063,"LOVE_SITE":"local","OLE_API_HOSTNAME":"site.lsst.local","REDIS_CONFIG_CAPACITY":5000,"REDIS_CONFIG_EXPIRY":5,"REDIS_HOST":"love-manager-redis-service","REMOTE_STORAGE":true,"SERVER_URL":"love.lsst.local","URL_SUBPATH":"/love"},"envSecrets":{"ADMIN_USER_PASS":"admin-user-pass","AUTHLIST_USER_PASS":"authlist-user-pass","AUTH_LDAP_BIND_PASSWORD":"auth-ldap-bind-password","CMD_USER_PASS":"cmd-user-pass","DB_PASS":"db-pass","JIRA_API_TOKEN":"jira-api-token","PROCESS_CONNECTION_PASS":"process-connection-pass","REDIS_PASS":"redis-pass","SECRET_KEY":"manager-secret-key","USER_USER_PASS":"user-user-pass"},"image":{"nexus3":"","pullPolicy":"IfNotPresent","repository":"lsstts/love-manager"},"name":"example-producer","nodeSelector":{},"ports":{"container":8000,"node":30000},"readinessProbe":{},"replicas":1,"resources":{},"tolerations":[]}` | Example producer configuration. Each producer should follow the same structure as frontend with the added name field. | +| love-manager.manager.producers[0].affinity | object | `{}` | Affinity rules for the LOVE manager producers pods | +| love-manager.manager.producers[0].autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | +| love-manager.manager.producers[0].autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | +| love-manager.manager.producers[0].autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | +| love-manager.manager.producers[0].autoscaling.scaleDownPolicy | object | `{}` | Policy for scaling down manager pods | +| love-manager.manager.producers[0].autoscaling.scaleUpPolicy | object | `{}` | Policy for scaling up manager pods | +| love-manager.manager.producers[0].autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | +| love-manager.manager.producers[0].autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | +| love-manager.manager.producers[0].env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | +| love-manager.manager.producers[0].env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | +| love-manager.manager.producers[0].env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | +| love-manager.manager.producers[0].env.COMMANDER_HOSTNAME | string | `"love-commander-service"` | Label for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| love-manager.manager.producers[0].env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| love-manager.manager.producers[0].env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager producers | +| love-manager.manager.producers[0].env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | +| love-manager.manager.producers[0].env.DB_NAME | string | `"love"` | The name of the database being used for the LOVE manager producers | +| love-manager.manager.producers[0].env.DB_PORT | int | `5432` | The port for the database service | +| love-manager.manager.producers[0].env.DB_USER | string | `"love"` | The database user needed for access from the LOVE manager producers | +| love-manager.manager.producers[0].env.HEARTBEAT_QUERY_COMMANDER | bool | `false` | Have the LOVE producer managers not query commander | +| love-manager.manager.producers[0].env.JIRA_API_HOSTNAME | string | `"rubinobs.atlassian.net"` | Set the hostname for the Jira instance | +| love-manager.manager.producers[0].env.JIRA_PROJECT_ID | int | `10063` | Set the Jira project ID | +| love-manager.manager.producers[0].env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | +| love-manager.manager.producers[0].env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | +| love-manager.manager.producers[0].env.REDIS_CONFIG_CAPACITY | int | `5000` | The connection capacity for the redis service | +| love-manager.manager.producers[0].env.REDIS_CONFIG_EXPIRY | int | `5` | The expiration time for the redis service | +| love-manager.manager.producers[0].env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | +| love-manager.manager.producers[0].env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | +| love-manager.manager.producers[0].env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | +| love-manager.manager.producers[0].env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | +| love-manager.manager.producers[0].envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager producers admin user password secret key name | +| love-manager.manager.producers[0].envSecrets.AUTHLIST_USER_PASS | string | `"authlist-user-pass"` | The LOVE manager producers authlist_user password secret key name | +| love-manager.manager.producers[0].envSecrets.AUTH_LDAP_BIND_PASSWORD | string | `"auth-ldap-bind-password"` | The LOVE manager producers LDAP binding password secret key name | +| love-manager.manager.producers[0].envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager producers cmd_user user password secret key name | +| love-manager.manager.producers[0].envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. Must match `database.envSecrets.POSTGRES_PASSWORD` | +| love-manager.manager.producers[0].envSecrets.JIRA_API_TOKEN | string | `"jira-api-token"` | The LOVE manager jira API token secret key name | +| love-manager.manager.producers[0].envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager producers process connection password secret key name | +| love-manager.manager.producers[0].envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | +| love-manager.manager.producers[0].envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager producers secret secret key name | +| love-manager.manager.producers[0].envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager producers user user password secret key name | +| love-manager.manager.producers[0].image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | +| love-manager.manager.producers[0].image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager producers image | +| love-manager.manager.producers[0].image.repository | string | `"lsstts/love-manager"` | The LOVE manager producers image to use | +| love-manager.manager.producers[0].nodeSelector | object | `{}` | Node selection rules for the LOVE manager producers pods | +| love-manager.manager.producers[0].ports.container | int | `8000` | The port on the container for normal communications | +| love-manager.manager.producers[0].ports.node | int | `30000` | The port on the node for normal communcations | +| love-manager.manager.producers[0].readinessProbe | object | `{}` | Configuration for the LOVE manager producers pods readiness probe | +| love-manager.manager.producers[0].replicas | int | `1` | Set the default number of LOVE manager producers pod replicas | +| love-manager.manager.producers[0].resources | object | `{}` | Resource specifications for the LOVE manager producers pods | +| love-manager.manager.producers[0].tolerations | list | `[]` | Toleration specifications for the LOVE manager producers pods | +| love-manager.manager.producers_ports | object | `{"container":8000,"node":30000}` | Configuration for the producers ports. this is a single configuration for all the producers. | +| love-manager.manager.producers_ports.container | int | `8000` | The port on the container for normal communications | +| love-manager.manager.producers_ports.node | int | `30000` | The port on the node for normal communcations | | love-manager.namespace | string | `"love"` | The overall namespace for the application | | love-manager.redis.affinity | object | `{}` | Affinity rules for the LOVE redis pods | | love-manager.redis.config | string | `"timeout 60\n"` | Configuration specification for the redis service | @@ -170,7 +175,7 @@ Deployment for the LSST Operators Visualization Environment | love-nginx.tolerations | list | `[]` | Toleration specifications for the NGINX pod | | love-producer.affinity | object | `{}` | Affinity rules applied to all LOVE producer pods | | love-producer.annotations | object | `{}` | This allows for the specification of pod annotations. | -| love-producer.env | object | `{"WEBSOCKET_HOST":"love-nginx/manager/ws/subscription"}` | This section holds a set of key, value pairs for environmental variables | +| love-producer.env | object | `{}` | This section holds a set of key, value pairs for environmental variables | | love-producer.envSecrets | object | `{"PROCESS_CONNECTION_PASS":"process-connection-pass"}` | This section holds a set of key, value pairs for secrets | | love-producer.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE producer image | | love-producer.image.repository | string | `"lsstts/love-producer"` | The LOVE producer image to use | diff --git a/applications/love/charts/love-manager/README.md b/applications/love/charts/love-manager/README.md index 8db2596b51..47a93da5c5 100644 --- a/applications/love/charts/love-manager/README.md +++ b/applications/love/charts/love-manager/README.md @@ -6,6 +6,7 @@ Helm chart for the LOVE manager service. | Key | Type | Default | Description | |-----|------|---------|-------------| +| manager | object | `{"frontend":{"affinity":{},"autoscaling":{"enabled":true,"maxReplicas":100,"minReplicas":1,"scaleDownPolicy":{},"scaleUpPolicy":{},"targetCPUUtilizationPercentage":80,"targetMemoryUtilizationPercentage":""},"env":{"AUTH_LDAP_1_SERVER_URI":"ldap://ipa1.lsst.local","AUTH_LDAP_2_SERVER_URI":"ldap://ipa2.lsst.local","AUTH_LDAP_3_SERVER_URI":"ldap://ipa3.lsst.local","COMMANDER_HOSTNAME":"love-commander-service","COMMANDER_PORT":5000,"DB_ENGINE":"postgresql","DB_HOST":"love-manager-database-service","DB_NAME":"love","DB_PORT":5432,"DB_USER":"love","JIRA_API_HOSTNAME":"rubinobs.atlassian.net","JIRA_PROJECT_ID":10063,"LOVE_PRODUCER_WEBSOCKET_HOST":"love-service/manager/ws/subscription","LOVE_SITE":"local","OLE_API_HOSTNAME":"site.lsst.local","REDIS_CONFIG_CAPACITY":5000,"REDIS_CONFIG_EXPIRY":5,"REDIS_HOST":"love-manager-redis-service","REMOTE_STORAGE":true,"SERVER_URL":"love.lsst.local","URL_SUBPATH":"/love"},"envSecrets":{"ADMIN_USER_PASS":"admin-user-pass","AUTHLIST_USER_PASS":"authlist-user-pass","AUTH_LDAP_BIND_PASSWORD":"auth-ldap-bind-password","CMD_USER_PASS":"cmd-user-pass","DB_PASS":"db-pass","JIRA_API_TOKEN":"jira-api-token","PROCESS_CONNECTION_PASS":"process-connection-pass","REDIS_PASS":"redis-pass","SECRET_KEY":"manager-secret-key","USER_USER_PASS":"user-user-pass"},"image":{"nexus3":"","pullPolicy":"IfNotPresent","repository":"lsstts/love-manager"},"nodeSelector":{},"ports":{"container":8000,"node":30000},"readinessProbe":{},"replicas":1,"resources":{},"tolerations":[]},"producers":[{"affinity":{},"autoscaling":{"enabled":true,"maxReplicas":100,"minReplicas":1,"scaleDownPolicy":{},"scaleUpPolicy":{},"targetCPUUtilizationPercentage":80,"targetMemoryUtilizationPercentage":""},"env":{"AUTH_LDAP_1_SERVER_URI":"ldap://ipa1.lsst.local","AUTH_LDAP_2_SERVER_URI":"ldap://ipa2.lsst.local","AUTH_LDAP_3_SERVER_URI":"ldap://ipa3.lsst.local","COMMANDER_HOSTNAME":"love-commander-service","COMMANDER_PORT":5000,"DB_ENGINE":"postgresql","DB_HOST":"love-manager-database-service","DB_NAME":"love","DB_PORT":5432,"DB_USER":"love","HEARTBEAT_QUERY_COMMANDER":false,"JIRA_API_HOSTNAME":"rubinobs.atlassian.net","JIRA_PROJECT_ID":10063,"LOVE_SITE":"local","OLE_API_HOSTNAME":"site.lsst.local","REDIS_CONFIG_CAPACITY":5000,"REDIS_CONFIG_EXPIRY":5,"REDIS_HOST":"love-manager-redis-service","REMOTE_STORAGE":true,"SERVER_URL":"love.lsst.local","URL_SUBPATH":"/love"},"envSecrets":{"ADMIN_USER_PASS":"admin-user-pass","AUTHLIST_USER_PASS":"authlist-user-pass","AUTH_LDAP_BIND_PASSWORD":"auth-ldap-bind-password","CMD_USER_PASS":"cmd-user-pass","DB_PASS":"db-pass","JIRA_API_TOKEN":"jira-api-token","PROCESS_CONNECTION_PASS":"process-connection-pass","REDIS_PASS":"redis-pass","SECRET_KEY":"manager-secret-key","USER_USER_PASS":"user-user-pass"},"image":{"nexus3":"","pullPolicy":"IfNotPresent","repository":"lsstts/love-manager"},"name":"example-producer","nodeSelector":{},"ports":{"container":8000,"node":30000},"readinessProbe":{},"replicas":1,"resources":{},"tolerations":[]}],"producers_ports":{"container":8000,"node":30000}}` | Configuration for the different manager instances. This is divided into two sessions; frontend and producers. _frontend_ Configuration for the manager frontend. The frontend session defines the configuration for the so-called frontend managers. These serves the frontend artifacts as well as handles the data piping from the system to the frontend. Every time a user opens a view in LOVE the page will connect to the frontend manager and will receive the telemetry data from the system. Once a connection is established between a frontend and the manager it is kept alive. As more connections come in, the autoscaler will scale up the number of frontend managers and new connections should be redirected to them. The redirect is handled by the manager-frontend-service ClusterIP. _producers_ Configurations for the manger producers. This is basically a list of managers (with the same structure as the frontend, but in a list). These defines services that the LOVE-producers connect to, to feed data from the control system. | | manager.frontend.affinity | object | `{}` | Affinity rules for the LOVE manager frontend pods | | manager.frontend.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | | manager.frontend.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | @@ -55,55 +56,59 @@ Helm chart for the LOVE manager service. | manager.frontend.replicas | int | `1` | Set the default number of LOVE manager frontend pod replicas | | manager.frontend.resources | object | `{}` | Resource specifications for the LOVE manager frontend pods | | manager.frontend.tolerations | list | `[]` | Toleration specifications for the LOVE manager frontend pods | -| manager.producers.affinity | object | `{}` | Affinity rules for the LOVE manager producers pods | -| manager.producers.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | -| manager.producers.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | -| manager.producers.autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | -| manager.producers.autoscaling.scaleDownPolicy | object | `{}` | Policy for scaling down manager pods | -| manager.producers.autoscaling.scaleUpPolicy | object | `{}` | Policy for scaling up manager pods | -| manager.producers.autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | -| manager.producers.autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | -| manager.producers.env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | -| manager.producers.env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | -| manager.producers.env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | -| manager.producers.env.COMMANDER_HOSTNAME | string | `"love-commander-service"` | Label for the LOVE commander service. Must match the one spcified in the LOVE commander chart | -| manager.producers.env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | -| manager.producers.env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager producers | -| manager.producers.env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | -| manager.producers.env.DB_NAME | string | `"love"` | The name of the database being used for the LOVE manager producers | -| manager.producers.env.DB_PORT | int | `5432` | The port for the database service | -| manager.producers.env.DB_USER | string | `"love"` | The database user needed for access from the LOVE manager producers | -| manager.producers.env.HEARTBEAT_QUERY_COMMANDER | bool | `false` | Have the LOVE producer managers not query commander | -| manager.producers.env.JIRA_API_HOSTNAME | string | `"rubinobs.atlassian.net"` | Set the hostname for the Jira instance | -| manager.producers.env.JIRA_PROJECT_ID | int | `10063` | Set the Jira project ID | -| manager.producers.env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | -| manager.producers.env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | -| manager.producers.env.REDIS_CONFIG_CAPACITY | int | `5000` | The connection capacity for the redis service | -| manager.producers.env.REDIS_CONFIG_EXPIRY | int | `5` | The expiration time for the redis service | -| manager.producers.env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | -| manager.producers.env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | -| manager.producers.env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | -| manager.producers.env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | -| manager.producers.envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager producers admin user password secret key name | -| manager.producers.envSecrets.AUTHLIST_USER_PASS | string | `"authlist-user-pass"` | The LOVE manager producers authlist_user password secret key name | -| manager.producers.envSecrets.AUTH_LDAP_BIND_PASSWORD | string | `"auth-ldap-bind-password"` | The LOVE manager producers LDAP binding password secret key name | -| manager.producers.envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager producers cmd_user user password secret key name | -| manager.producers.envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. Must match `database.envSecrets.POSTGRES_PASSWORD` | -| manager.producers.envSecrets.JIRA_API_TOKEN | string | `"jira-api-token"` | The LOVE manager jira API token secret key name | -| manager.producers.envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager producers process connection password secret key name | -| manager.producers.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | -| manager.producers.envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager producers secret secret key name | -| manager.producers.envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager producers user user password secret key name | -| manager.producers.image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | -| manager.producers.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager producers image | -| manager.producers.image.repository | string | `"lsstts/love-manager"` | The LOVE manager producers image to use | -| manager.producers.nodeSelector | object | `{}` | Node selection rules for the LOVE manager producers pods | -| manager.producers.ports.container | int | `8000` | The port on the container for normal communications | -| manager.producers.ports.node | int | `30000` | The port on the node for normal communcations | -| manager.producers.readinessProbe | object | `{}` | Configuration for the LOVE manager producers pods readiness probe | -| manager.producers.replicas | int | `1` | Set the default number of LOVE manager producers pod replicas | -| manager.producers.resources | object | `{}` | Resource specifications for the LOVE manager producers pods | -| manager.producers.tolerations | list | `[]` | Toleration specifications for the LOVE manager producers pods | +| manager.producers[0] | object | `{"affinity":{},"autoscaling":{"enabled":true,"maxReplicas":100,"minReplicas":1,"scaleDownPolicy":{},"scaleUpPolicy":{},"targetCPUUtilizationPercentage":80,"targetMemoryUtilizationPercentage":""},"env":{"AUTH_LDAP_1_SERVER_URI":"ldap://ipa1.lsst.local","AUTH_LDAP_2_SERVER_URI":"ldap://ipa2.lsst.local","AUTH_LDAP_3_SERVER_URI":"ldap://ipa3.lsst.local","COMMANDER_HOSTNAME":"love-commander-service","COMMANDER_PORT":5000,"DB_ENGINE":"postgresql","DB_HOST":"love-manager-database-service","DB_NAME":"love","DB_PORT":5432,"DB_USER":"love","HEARTBEAT_QUERY_COMMANDER":false,"JIRA_API_HOSTNAME":"rubinobs.atlassian.net","JIRA_PROJECT_ID":10063,"LOVE_SITE":"local","OLE_API_HOSTNAME":"site.lsst.local","REDIS_CONFIG_CAPACITY":5000,"REDIS_CONFIG_EXPIRY":5,"REDIS_HOST":"love-manager-redis-service","REMOTE_STORAGE":true,"SERVER_URL":"love.lsst.local","URL_SUBPATH":"/love"},"envSecrets":{"ADMIN_USER_PASS":"admin-user-pass","AUTHLIST_USER_PASS":"authlist-user-pass","AUTH_LDAP_BIND_PASSWORD":"auth-ldap-bind-password","CMD_USER_PASS":"cmd-user-pass","DB_PASS":"db-pass","JIRA_API_TOKEN":"jira-api-token","PROCESS_CONNECTION_PASS":"process-connection-pass","REDIS_PASS":"redis-pass","SECRET_KEY":"manager-secret-key","USER_USER_PASS":"user-user-pass"},"image":{"nexus3":"","pullPolicy":"IfNotPresent","repository":"lsstts/love-manager"},"name":"example-producer","nodeSelector":{},"ports":{"container":8000,"node":30000},"readinessProbe":{},"replicas":1,"resources":{},"tolerations":[]}` | Example producer configuration. Each producer should follow the same structure as frontend with the added name field. | +| manager.producers[0].affinity | object | `{}` | Affinity rules for the LOVE manager producers pods | +| manager.producers[0].autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | +| manager.producers[0].autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | +| manager.producers[0].autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | +| manager.producers[0].autoscaling.scaleDownPolicy | object | `{}` | Policy for scaling down manager pods | +| manager.producers[0].autoscaling.scaleUpPolicy | object | `{}` | Policy for scaling up manager pods | +| manager.producers[0].autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | +| manager.producers[0].autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | +| manager.producers[0].env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | +| manager.producers[0].env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | +| manager.producers[0].env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | +| manager.producers[0].env.COMMANDER_HOSTNAME | string | `"love-commander-service"` | Label for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| manager.producers[0].env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| manager.producers[0].env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager producers | +| manager.producers[0].env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | +| manager.producers[0].env.DB_NAME | string | `"love"` | The name of the database being used for the LOVE manager producers | +| manager.producers[0].env.DB_PORT | int | `5432` | The port for the database service | +| manager.producers[0].env.DB_USER | string | `"love"` | The database user needed for access from the LOVE manager producers | +| manager.producers[0].env.HEARTBEAT_QUERY_COMMANDER | bool | `false` | Have the LOVE producer managers not query commander | +| manager.producers[0].env.JIRA_API_HOSTNAME | string | `"rubinobs.atlassian.net"` | Set the hostname for the Jira instance | +| manager.producers[0].env.JIRA_PROJECT_ID | int | `10063` | Set the Jira project ID | +| manager.producers[0].env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | +| manager.producers[0].env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | +| manager.producers[0].env.REDIS_CONFIG_CAPACITY | int | `5000` | The connection capacity for the redis service | +| manager.producers[0].env.REDIS_CONFIG_EXPIRY | int | `5` | The expiration time for the redis service | +| manager.producers[0].env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | +| manager.producers[0].env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | +| manager.producers[0].env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | +| manager.producers[0].env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | +| manager.producers[0].envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager producers admin user password secret key name | +| manager.producers[0].envSecrets.AUTHLIST_USER_PASS | string | `"authlist-user-pass"` | The LOVE manager producers authlist_user password secret key name | +| manager.producers[0].envSecrets.AUTH_LDAP_BIND_PASSWORD | string | `"auth-ldap-bind-password"` | The LOVE manager producers LDAP binding password secret key name | +| manager.producers[0].envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager producers cmd_user user password secret key name | +| manager.producers[0].envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. Must match `database.envSecrets.POSTGRES_PASSWORD` | +| manager.producers[0].envSecrets.JIRA_API_TOKEN | string | `"jira-api-token"` | The LOVE manager jira API token secret key name | +| manager.producers[0].envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager producers process connection password secret key name | +| manager.producers[0].envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | +| manager.producers[0].envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager producers secret secret key name | +| manager.producers[0].envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager producers user user password secret key name | +| manager.producers[0].image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | +| manager.producers[0].image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager producers image | +| manager.producers[0].image.repository | string | `"lsstts/love-manager"` | The LOVE manager producers image to use | +| manager.producers[0].nodeSelector | object | `{}` | Node selection rules for the LOVE manager producers pods | +| manager.producers[0].ports.container | int | `8000` | The port on the container for normal communications | +| manager.producers[0].ports.node | int | `30000` | The port on the node for normal communcations | +| manager.producers[0].readinessProbe | object | `{}` | Configuration for the LOVE manager producers pods readiness probe | +| manager.producers[0].replicas | int | `1` | Set the default number of LOVE manager producers pod replicas | +| manager.producers[0].resources | object | `{}` | Resource specifications for the LOVE manager producers pods | +| manager.producers[0].tolerations | list | `[]` | Toleration specifications for the LOVE manager producers pods | +| manager.producers_ports | object | `{"container":8000,"node":30000}` | Configuration for the producers ports. this is a single configuration for all the producers. | +| manager.producers_ports.container | int | `8000` | The port on the container for normal communications | +| manager.producers_ports.node | int | `30000` | The port on the node for normal communcations | | namespace | string | `"love"` | The overall namespace for the application | | redis.affinity | object | `{}` | Affinity rules for the LOVE redis pods | | redis.config | string | `"timeout 60\n"` | Configuration specification for the redis service | diff --git a/applications/love/charts/love-manager/templates/_helpers.tpl b/applications/love/charts/love-manager/templates/_helpers.tpl index 13e1c5bcec..f95f771b7b 100644 --- a/applications/love/charts/love-manager/templates/_helpers.tpl +++ b/applications/love/charts/love-manager/templates/_helpers.tpl @@ -33,8 +33,8 @@ Manager frontend fullname {{/* Manager producers fullname */}} -{{- define "love-manager-producers.fullname" -}} -{{ include "love-manager.fullname" . }}-producers +{{- define "love-manager-producer.fullname" -}} +{{ include "love-manager.fullname" . }}-producer {{- end }} {{/* @@ -63,9 +63,9 @@ helm.sh/chart: {{ include "love-manager.chart" . }} {{/* Manager Producers Common labels */}} -{{- define "love-manager-producers.labels" -}} +{{- define "love-manager-producer.labels" -}} helm.sh/chart: {{ include "love-manager.chart" . }} -{{ include "love-manager-producers.selectorLabels" . }} +{{ include "love-manager-producer.selectorLabels" . }} {{- end }} {{/* @@ -87,9 +87,9 @@ app.kubernetes.io/instance: {{ include "love-manager.name" . }}-frontend {{/* Manager Producers Selector labels */}} -{{- define "love-manager-producers.selectorLabels" -}} +{{- define "love-manager-producer.selectorLabels" -}} app.kubernetes.io/name: {{ include "love-manager.name" . }} -app.kubernetes.io/instance: {{ include "love-manager.name" . }}-producers +app.kubernetes.io/instance: {{ include "love-manager.name" . }}-producer {{- end }} {{/* diff --git a/applications/love/charts/love-manager/templates/manager-producers-deployment.yaml b/applications/love/charts/love-manager/templates/manager-producers-deployment.yaml index 308f2eb69b..855fe7d4d9 100644 --- a/applications/love/charts/love-manager/templates/manager-producers-deployment.yaml +++ b/applications/love/charts/love-manager/templates/manager-producers-deployment.yaml @@ -1,55 +1,62 @@ +{{ range $manager_producer:= .Values.manager.producers }} +{{ $_ := set $.Values "manager_producer" $manager_producer }} +--- apiVersion: apps/v1 kind: Deployment metadata: - name: {{ include "love-manager-producers.fullname" . }} + name: {{ include "love-manager-producer.fullname" $ }}-{{ $manager_producer.name }} namespace: {{ $.Values.global.controlSystem.appNamespace }} labels: - {{- include "love-manager-producers.labels" . | nindent 4 }} + app.kubernetes.io/instance: {{ include "love-manager-producer.fullname" $ }}-{{ $manager_producer.name }} + app.kubernetes.io/name: {{ include "love-manager-producer.fullname" $ }} spec: selector: matchLabels: - {{- include "love-manager-producers.selectorLabels" . | nindent 6 }} - {{- if not .Values.manager.producers.autoscaling.enabled }} - replicas: {{ .Values.manager.producers.replicas }} + app.kubernetes.io/instance: {{ include "love-manager-producer.fullname" $ }}-{{ $manager_producer.name }} + app.kubernetes.io/name: {{ include "love-manager-producer.fullname" $ }} + {{- if not $manager_producer.autoscaling.enabled }} + replicas: {{ $manager_producer.replicas }} {{- end }} template: metadata: labels: - {{- include "love-manager-producers.selectorLabels" . | nindent 8 }} + app.kubernetes.io/instance: {{ include "love-manager-producer.fullname" $ }}-{{ $manager_producer.name }} + app.kubernetes.io/name: {{ include "love-manager-producer.fullname" $ }} spec: containers: - - name: {{ include "love-manager-producers.fullname" . }} - {{- $imageTag := .Values.manager.producers.image.tag | default $.Values.global.controlSystem.imageTag }} - image: "{{ .Values.manager.producers.image.repository }}:{{ $imageTag }}" - imagePullPolicy: {{ .Values.manager.producers.image.pullPolicy }} + - name: {{ include "love-manager-producer.fullname" $ }}-{{ $manager_producer.name }} + {{- $imageTag := $manager_producer.image.tag | default $.Values.global.controlSystem.imageTag }} + image: "{{ $manager_producer.image.repository }}:{{ $imageTag }}" + imagePullPolicy: {{ $manager_producer.image.pullPolicy }} ports: - - containerPort: {{ .Values.manager.producers.ports.container }} + - containerPort: {{ $.Values.manager.producers_ports.container }} env: - {{- $data := dict "env" .Values.manager.producers.env "secret" false }} + {{- $data := dict "env" $manager_producer.env "secret" false }} {{- include "helpers.envFromList" $data | indent 10 }} - {{- if .Values.manager.producers.envSecrets }} - {{- $data := dict "secret" true "env" .Values.manager.producers.envSecrets }} + {{- if $manager_producer.envSecrets }} + {{- $data := dict "secret" true "env" $manager_producer.envSecrets }} {{- include "helpers.envFromList" $data | indent 10 }} {{- end }} - {{- with $.Values.manager.producers.resources }} + {{- with $manager_producer.resources }} resources: - {{- toYaml $.Values.manager.producers.resources | nindent 10 }} + {{- toYaml $manager_producer.resources | nindent 10 }} {{- end }} - {{- with $.Values.manager.producers.readinessProbe }} + {{- with $manager_producer.readinessProbe }} readinessProbe: - {{- toYaml $.Values.manager.producers.readinessProbe | nindent 10 }} + {{- toYaml $manager_producer.readinessProbe | nindent 10 }} {{- end }} imagePullSecrets: - name: pull-secret - {{- with $.Values.manager.producers.nodeSelector }} + {{- with $manager_producer.nodeSelector }} nodeSelector: {{- toYaml $ | nindent 8 }} {{- end }} - {{- with $.Values.manager.producers.affinity }} + {{- with $manager_producer.affinity }} affinity: {{- toYaml $ | nindent 8 }} {{- end }} - {{- with $.Values.manager.producers.tolerations }} + {{- with $manager_producer.tolerations }} tolerations: {{- toYaml $ | nindent 8 }} {{- end }} +{{- end }} \ No newline at end of file diff --git a/applications/love/charts/love-manager/templates/manager-producers-hpa.yaml b/applications/love/charts/love-manager/templates/manager-producers-hpa.yaml index a44422835b..238c66f21c 100644 --- a/applications/love/charts/love-manager/templates/manager-producers-hpa.yaml +++ b/applications/love/charts/love-manager/templates/manager-producers-hpa.yaml @@ -1,47 +1,51 @@ -{{- if .Values.manager.producers.autoscaling.enabled }} +{{ range $manager_producer:= .Values.manager.producers }} +{{ $_ := set $.Values "manager_producer" $manager_producer }} +--- +{{- if $manager_producer.autoscaling.enabled }} apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: - name: {{ include "love-manager-producers.fullname" . }} + name: {{ include "love-manager-producer.fullname" $ }}-{{ $manager_producer.name }} labels: - {{- include "love-manager-producers.labels" . | nindent 4 }} + {{- include "love-manager-producer.labels" $ | nindent 4 }} spec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment - name: {{ include "love-manager-producers.fullname" . }} - minReplicas: {{ .Values.manager.producers.autoscaling.minReplicas }} - maxReplicas: {{ .Values.manager.producers.autoscaling.maxReplicas }} + name: {{ include "love-manager-producer.fullname" $ }}-{{ $manager_producer.name }} + minReplicas: {{ $manager_producer.autoscaling.minReplicas }} + maxReplicas: {{ $manager_producer.autoscaling.maxReplicas }} metrics: - {{- if .Values.manager.producers.autoscaling.targetCPUUtilizationPercentage }} + {{- if $manager_producer.autoscaling.targetCPUUtilizationPercentage }} - type: Resource resource: name: cpu target: type: Utilization - averageUtilization: {{ .Values.manager.producers.autoscaling.targetCPUUtilizationPercentage }} + averageUtilization: {{ $manager_producer.autoscaling.targetCPUUtilizationPercentage }} {{- end }} - {{- if .Values.manager.producers.autoscaling.targetMemoryUtilizationPercentage }} + {{- if $manager_producer.autoscaling.targetMemoryUtilizationPercentage }} - type: Resource resource: name: memory target: type: Utilization - averageUtilization: {{ .Values.manager.producers.autoscaling.targetMemoryUtilizationPercentage }} + averageUtilization: {{ $manager_producer.autoscaling.targetMemoryUtilizationPercentage }} {{- end }} - {{- if or .Values.manager.producers.autoscaling.scaleUpPolicy .Values.manager.producers.autoscaling.scaleDownPolicy }} + {{- if or $manager_producer.autoscaling.scaleUpPolicy $manager_producer.autoscaling.scaleDownPolicy }} behavior: - {{- if .Values.manager.producers.autoscaling.scaleUpPolicy }} + {{- if $manager_producer.autoscaling.scaleUpPolicy }} scaleUp: - {{- with .Values.manager.producers.autoscaling.scaleUpPolicy }} + {{- with $manager_producer.autoscaling.scaleUpPolicy }} {{- toYaml . | nindent 6 }} {{- end }} {{- end }} - {{- if .Values.manager.producers.autoscaling.scaleDownPolicy }} + {{- if $manager_producer.autoscaling.scaleDownPolicy }} scaleDown: - {{- with .Values.manager.producers.autoscaling.scaleDownPolicy }} + {{- with $manager_producer.autoscaling.scaleDownPolicy }} {{- toYaml . | nindent 6 }} {{- end }} {{- end }} {{- end }} {{- end }} +{{- end }} \ No newline at end of file diff --git a/applications/love/charts/love-manager/templates/manager-producers-service.yaml b/applications/love/charts/love-manager/templates/manager-producers-service.yaml index bf90a53f9b..1195507e30 100644 --- a/applications/love/charts/love-manager/templates/manager-producers-service.yaml +++ b/applications/love/charts/love-manager/templates/manager-producers-service.yaml @@ -1,10 +1,14 @@ +{{ range $manager_producer:= .Values.manager.producers }} +{{ $_ := set $.Values "manager_producer" $manager_producer }} +--- apiVersion: v1 kind: Service metadata: - name: {{ include "love-manager-producers.fullname" . }}-service - namespace: {{ .Values.namespace }} + name: {{ include "love-manager-producer.fullname" $ }}-{{ $manager_producer.name }}-service + namespace: {{ $.Values.namespace }} spec: selector: - app.kubernetes.io/instance: {{ include "love-manager-producers.fullname" . }} + app.kubernetes.io/instance: {{ include "love-manager-producer.fullname" $ }}-{{ $manager_producer.name }} ports: - - port: {{ .Values.manager.producers.ports.container }} + - port: {{ $.Values.manager.producers_ports.container }} +{{- end }} \ No newline at end of file diff --git a/applications/love/charts/love-manager/values.yaml b/applications/love/charts/love-manager/values.yaml index 391b5c51e7..d5534ee77c 100644 --- a/applications/love/charts/love-manager/values.yaml +++ b/applications/love/charts/love-manager/values.yaml @@ -1,5 +1,21 @@ # -- The overall namespace for the application namespace: love +# -- Configuration for the different manager instances. +# This is divided into two sessions; frontend and producers. +# _frontend_ Configuration for the manager frontend. +# The frontend session defines the configuration for the +# so-called frontend managers. These serves the frontend artifacts +# as well as handles the data piping from the system to the frontend. +# Every time a user opens a view in LOVE the page will connect to the +# frontend manager and will receive the telemetry data from the system. +# Once a connection is established between a frontend and the manager it +# is kept alive. As more connections come in, the autoscaler will scale +# up the number of frontend managers and new connections should be redirected +# to them. The redirect is handled by the manager-frontend-service ClusterIP. +# _producers_ Configurations for the manger producers. +# This is basically a list of managers (with the same structure as the +# frontend, but in a list). These defines services that the LOVE-producers +# connect to, to feed data from the control system. manager: frontend: image: @@ -110,113 +126,123 @@ manager: # -- Configuration for the LOVE manager frontend pods readiness probe readinessProbe: {} producers: - image: - # -- The LOVE manager producers image to use - repository: lsstts/love-manager - # -- The pull policy on the LOVE manager producers image - pullPolicy: IfNotPresent - # -- The tag name for the Nexus3 Docker repository secrets if private images need to be pulled - nexus3: "" - ports: - # -- The port on the container for normal communications - container: 8000 - # -- The port on the node for normal communcations - node: 30000 - env: - # -- The site tag where LOVE is being run - LOVE_SITE: local - # -- The external URL from the NGINX server for LOVE - SERVER_URL: love.lsst.local - # -- The Kubernetes sub-path for LOVE - URL_SUBPATH: /love - # -- Set the manager to use LFA storage - REMOTE_STORAGE: true - # -- Set the hostname for the Jira instance - JIRA_API_HOSTNAME: rubinobs.atlassian.net - # -- Set the Jira project ID - JIRA_PROJECT_ID: 10063 - # -- Set the URL for the OLE instance - OLE_API_HOSTNAME: site.lsst.local - # -- Set the URI for the 1st LDAP server - AUTH_LDAP_1_SERVER_URI: ldap://ipa1.lsst.local - # -- Set the URI for the 2nd LDAP server - AUTH_LDAP_2_SERVER_URI: ldap://ipa2.lsst.local - # -- Set the URI for the 3rd LDAP server - AUTH_LDAP_3_SERVER_URI: ldap://ipa3.lsst.local - # -- Have the LOVE producer managers not query commander - HEARTBEAT_QUERY_COMMANDER: false - # -- Label for the LOVE commander service. - # Must match the one spcified in the LOVE commander chart - COMMANDER_HOSTNAME: love-commander-service - # -- Port number for the LOVE commander service. - # Must match the one spcified in the LOVE commander chart - COMMANDER_PORT: 5000 - # -- The type of database engine being used for the LOVE manager producers - DB_ENGINE: postgresql - # -- The name of the database being used for the LOVE manager producers - DB_NAME: love - # -- The database user needed for access from the LOVE manager producers - DB_USER: love - # -- The name of the database service - DB_HOST: love-manager-database-service - # -- The port for the database service - DB_PORT: 5432 - # -- The name of the redis service - REDIS_HOST: love-manager-redis-service - # -- The expiration time for the redis service - REDIS_CONFIG_EXPIRY: 5 - # -- The connection capacity for the redis service - REDIS_CONFIG_CAPACITY: 5000 - envSecrets: - # -- The LOVE manager producers secret secret key name - SECRET_KEY: manager-secret-key - # -- The LOVE manager producers process connection password secret key name - PROCESS_CONNECTION_PASS: process-connection-pass - # -- The LOVE manager producers admin user password secret key name - ADMIN_USER_PASS: admin-user-pass - # -- The LOVE manager producers user user password secret key name - USER_USER_PASS: user-user-pass - # -- The LOVE manager producers cmd_user user password secret key name - CMD_USER_PASS: cmd-user-pass - # -- The LOVE manager producers authlist_user password secret key name - AUTHLIST_USER_PASS: authlist-user-pass - # -- The LOVE manager producers LDAP binding password secret key name - AUTH_LDAP_BIND_PASSWORD: auth-ldap-bind-password - # -- The database password secret key name. - # Must match `database.envSecrets.POSTGRES_PASSWORD` - DB_PASS: db-pass - # -- The redis password secret key name. - # Must match `redis.envSecrets.REDIS_PASS` - REDIS_PASS: redis-pass - # -- The LOVE manager jira API token secret key name - JIRA_API_TOKEN: jira-api-token - # -- Set the default number of LOVE manager producers pod replicas - replicas: 1 - autoscaling: - # -- Whether automatic horizontal scaling is active - enabled: true - # -- The allowed minimum number of replicas - minReplicas: 1 - # -- The allowed maximum number of replicas - maxReplicas: 100 - # -- The percentage of CPU utilization that will trigger the scaling - targetCPUUtilizationPercentage: 80 - # -- (int) The percentage of memory utilization that will trigger the scaling - targetMemoryUtilizationPercentage: "" - # -- Policy for scaling up manager pods - scaleUpPolicy: {} - # -- Policy for scaling down manager pods - scaleDownPolicy: {} - # -- Resource specifications for the LOVE manager producers pods - resources: {} - # -- Node selection rules for the LOVE manager producers pods - nodeSelector: {} - # -- Toleration specifications for the LOVE manager producers pods - tolerations: [] - # -- Affinity rules for the LOVE manager producers pods - affinity: {} - # -- Configuration for the LOVE manager producers pods readiness probe - readinessProbe: {} + # -- Example producer configuration. Each producer should follow the + # same structure as frontend with the added name field. + - name: example-producer + image: + # -- The LOVE manager producers image to use + repository: lsstts/love-manager + # -- The pull policy on the LOVE manager producers image + pullPolicy: IfNotPresent + # -- The tag name for the Nexus3 Docker repository secrets if private images need to be pulled + nexus3: "" + ports: + # -- The port on the container for normal communications + container: 8000 + # -- The port on the node for normal communcations + node: 30000 + env: + # -- The site tag where LOVE is being run + LOVE_SITE: local + # -- The external URL from the NGINX server for LOVE + SERVER_URL: love.lsst.local + # -- The Kubernetes sub-path for LOVE + URL_SUBPATH: /love + # -- Set the manager to use LFA storage + REMOTE_STORAGE: true + # -- Set the hostname for the Jira instance + JIRA_API_HOSTNAME: rubinobs.atlassian.net + # -- Set the Jira project ID + JIRA_PROJECT_ID: 10063 + # -- Set the URL for the OLE instance + OLE_API_HOSTNAME: site.lsst.local + # -- Set the URI for the 1st LDAP server + AUTH_LDAP_1_SERVER_URI: ldap://ipa1.lsst.local + # -- Set the URI for the 2nd LDAP server + AUTH_LDAP_2_SERVER_URI: ldap://ipa2.lsst.local + # -- Set the URI for the 3rd LDAP server + AUTH_LDAP_3_SERVER_URI: ldap://ipa3.lsst.local + # -- Have the LOVE producer managers not query commander + HEARTBEAT_QUERY_COMMANDER: false + # -- Label for the LOVE commander service. + # Must match the one spcified in the LOVE commander chart + COMMANDER_HOSTNAME: love-commander-service + # -- Port number for the LOVE commander service. + # Must match the one spcified in the LOVE commander chart + COMMANDER_PORT: 5000 + # -- The type of database engine being used for the LOVE manager producers + DB_ENGINE: postgresql + # -- The name of the database being used for the LOVE manager producers + DB_NAME: love + # -- The database user needed for access from the LOVE manager producers + DB_USER: love + # -- The name of the database service + DB_HOST: love-manager-database-service + # -- The port for the database service + DB_PORT: 5432 + # -- The name of the redis service + REDIS_HOST: love-manager-redis-service + # -- The expiration time for the redis service + REDIS_CONFIG_EXPIRY: 5 + # -- The connection capacity for the redis service + REDIS_CONFIG_CAPACITY: 5000 + envSecrets: + # -- The LOVE manager producers secret secret key name + SECRET_KEY: manager-secret-key + # -- The LOVE manager producers process connection password secret key name + PROCESS_CONNECTION_PASS: process-connection-pass + # -- The LOVE manager producers admin user password secret key name + ADMIN_USER_PASS: admin-user-pass + # -- The LOVE manager producers user user password secret key name + USER_USER_PASS: user-user-pass + # -- The LOVE manager producers cmd_user user password secret key name + CMD_USER_PASS: cmd-user-pass + # -- The LOVE manager producers authlist_user password secret key name + AUTHLIST_USER_PASS: authlist-user-pass + # -- The LOVE manager producers LDAP binding password secret key name + AUTH_LDAP_BIND_PASSWORD: auth-ldap-bind-password + # -- The database password secret key name. + # Must match `database.envSecrets.POSTGRES_PASSWORD` + DB_PASS: db-pass + # -- The redis password secret key name. + # Must match `redis.envSecrets.REDIS_PASS` + REDIS_PASS: redis-pass + # -- The LOVE manager jira API token secret key name + JIRA_API_TOKEN: jira-api-token + # -- Set the default number of LOVE manager producers pod replicas + replicas: 1 + autoscaling: + # -- Whether automatic horizontal scaling is active + enabled: true + # -- The allowed minimum number of replicas + minReplicas: 1 + # -- The allowed maximum number of replicas + maxReplicas: 100 + # -- The percentage of CPU utilization that will trigger the scaling + targetCPUUtilizationPercentage: 80 + # -- (int) The percentage of memory utilization that will trigger the scaling + targetMemoryUtilizationPercentage: "" + # -- Policy for scaling up manager pods + scaleUpPolicy: {} + # -- Policy for scaling down manager pods + scaleDownPolicy: {} + # -- Resource specifications for the LOVE manager producers pods + resources: {} + # -- Node selection rules for the LOVE manager producers pods + nodeSelector: {} + # -- Toleration specifications for the LOVE manager producers pods + tolerations: [] + # -- Affinity rules for the LOVE manager producers pods + affinity: {} + # -- Configuration for the LOVE manager producers pods readiness probe + readinessProbe: {} + # -- Configuration for the producers ports. + # this is a single configuration for all the producers. + producers_ports: + # -- The port on the container for normal communications + container: 8000 + # -- The port on the node for normal communcations + node: 30000 redis: image: # -- The redis image to use diff --git a/applications/love/charts/love-producer/README.md b/applications/love/charts/love-producer/README.md index 7857e17d30..5420c2e03f 100644 --- a/applications/love/charts/love-producer/README.md +++ b/applications/love/charts/love-producer/README.md @@ -8,7 +8,7 @@ Helm chart for the LOVE producers. |-----|------|---------|-------------| | affinity | object | `{}` | Affinity rules applied to all LOVE producer pods | | annotations | object | `{}` | This allows for the specification of pod annotations. | -| env | object | `{"WEBSOCKET_HOST":"love-nginx/manager/ws/subscription"}` | This section holds a set of key, value pairs for environmental variables | +| env | object | `{}` | This section holds a set of key, value pairs for environmental variables | | envSecrets | object | `{"PROCESS_CONNECTION_PASS":"process-connection-pass"}` | This section holds a set of key, value pairs for secrets | | image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE producer image | | image.repository | string | `"lsstts/love-producer"` | The LOVE producer image to use | diff --git a/applications/love/charts/love-producer/templates/deployment.yaml b/applications/love/charts/love-producer/templates/deployment.yaml index 77209f1579..5221670b21 100644 --- a/applications/love/charts/love-producer/templates/deployment.yaml +++ b/applications/love/charts/love-producer/templates/deployment.yaml @@ -34,6 +34,8 @@ spec: env: - name: LOVE_CSC_PRODUCER value: {{ $producer.csc | quote }} + - name: WEBSOCKET_HOST + value: {{ $producer.WEBSOCKET_HOST | quote }} - name: LSST_KAFKA_SECURITY_PASSWORD valueFrom: secretKeyRef: diff --git a/applications/love/charts/love-producer/values.yaml b/applications/love/charts/love-producer/values.yaml index ca39d63d95..49d6de9594 100644 --- a/applications/love/charts/love-producer/values.yaml +++ b/applications/love/charts/love-producer/values.yaml @@ -8,8 +8,7 @@ image: # -- The pull policy on the LOVE producer image pullPolicy: IfNotPresent # -- This section holds a set of key, value pairs for environmental variables -env: - WEBSOCKET_HOST: love-nginx/manager/ws/subscription +env: {} # -- This section holds a set of key, value pairs for secrets envSecrets: PROCESS_CONNECTION_PASS: process-connection-pass diff --git a/applications/love/values-base.yaml b/applications/love/values-base.yaml index 3742727392..090346b111 100644 --- a/applications/love/values-base.yaml +++ b/applications/love/values-base.yaml @@ -23,6 +23,7 @@ love-manager: frontend: image: repository: ts-dockerhub.lsst.org/love-manager + tag: k0002 pullPolicy: Always env: SERVER_URL: base-lsp.lsst.codes @@ -59,19 +60,174 @@ love-manager: initialDelaySeconds: 20 periodSeconds: 10 producers: + - name: general image: repository: ts-dockerhub.lsst.org/love-manager + tag: k0002 pullPolicy: Always env: + LOVE_SITE: base SERVER_URL: base-lsp.lsst.codes OLE_API_HOSTNAME: base-lsp.lsst.codes AUTH_LDAP_1_SERVER_URI: ldap://ipa1.ls.lsst.org AUTH_LDAP_2_SERVER_URI: ldap://ipa2.ls.lsst.org AUTH_LDAP_3_SERVER_URI: ldap://ipa3.ls.lsst.org + COMMANDER_HOSTNAME: love-commander-service + COMMANDER_PORT: 5000 DB_HOST: postgresdb01.ls.lsst.org + DB_ENGINE: postgresql + DB_NAME: love + DB_PORT: 5432 + DB_USER: love + HEARTBEAT_QUERY_COMMANDER: false + JIRA_API_HOSTNAME: rubinobs.atlassian.net + JIRA_PROJECT_ID: 10063 + REDIS_CONFIG_CAPACITY: 5000 + REDIS_CONFIG_EXPIRY: 5 + REDIS_HOST: love-manager-redis-service + REMOTE_STORAGE: true + URL_SUBPATH: /love + envSecrets: + SECRET_KEY: manager-secret-key + PROCESS_CONNECTION_PASS: process-connection-pass + ADMIN_USER_PASS: admin-user-pass + USER_USER_PASS: user-user-pass + CMD_USER_PASS: cmd-user-pass + AUTHLIST_USER_PASS: authlist-user-pass + AUTH_LDAP_BIND_PASSWORD: auth-ldap-bind-password + DB_PASS: db-pass + REDIS_PASS: redis-pass + replicas: 10 + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 25 + targetCPUUtilizationPercentage: 50 + scaleDownPolicy: + policies: + - type: Pods + value: 2 + periodSeconds: 120 + - type: Percent + value: 10 + periodSeconds: 120 + selectPolicy: Min + resources: + requests: + cpu: 150m + memory: 200Mi + limits: + cpu: 1000m + memory: 1500Mi + readinessProbe: + tcpSocket: + port: 8000 + initialDelaySeconds: 20 + periodSeconds: 10 + - name: queue + image: + repository: ts-dockerhub.lsst.org/love-manager + tag: k0002 + pullPolicy: Always + env: LOVE_SITE: base + SERVER_URL: base-lsp.lsst.codes + OLE_API_HOSTNAME: base-lsp.lsst.codes + AUTH_LDAP_1_SERVER_URI: ldap://ipa1.ls.lsst.org + AUTH_LDAP_2_SERVER_URI: ldap://ipa2.ls.lsst.org + AUTH_LDAP_3_SERVER_URI: ldap://ipa3.ls.lsst.org + COMMANDER_HOSTNAME: love-commander-service + COMMANDER_PORT: 5000 + DB_HOST: postgresdb01.ls.lsst.org + DB_ENGINE: postgresql + DB_NAME: love + DB_PORT: 5432 + DB_USER: love + HEARTBEAT_QUERY_COMMANDER: false + JIRA_API_HOSTNAME: rubinobs.atlassian.net + JIRA_PROJECT_ID: 10063 + REDIS_CONFIG_CAPACITY: 5000 + REDIS_CONFIG_EXPIRY: 5 + REDIS_HOST: love-manager-redis-service + REMOTE_STORAGE: true + URL_SUBPATH: /love + envSecrets: + SECRET_KEY: manager-secret-key + PROCESS_CONNECTION_PASS: process-connection-pass + ADMIN_USER_PASS: admin-user-pass + USER_USER_PASS: user-user-pass + CMD_USER_PASS: cmd-user-pass + AUTHLIST_USER_PASS: authlist-user-pass + AUTH_LDAP_BIND_PASSWORD: auth-ldap-bind-password + DB_PASS: db-pass + REDIS_PASS: redis-pass + replicas: 3 autoscaling: - enabled: true + enabled: false + minReplicas: 2 + maxReplicas: 25 + targetCPUUtilizationPercentage: 50 + scaleDownPolicy: + policies: + - type: Pods + value: 2 + periodSeconds: 120 + - type: Percent + value: 10 + periodSeconds: 120 + selectPolicy: Min + resources: + requests: + cpu: 150m + memory: 200Mi + limits: + cpu: 1000m + memory: 1500Mi + readinessProbe: + tcpSocket: + port: 8000 + initialDelaySeconds: 20 + periodSeconds: 10 + - name: m1m3 + image: + repository: ts-dockerhub.lsst.org/love-manager + tag: k0002 + pullPolicy: Always + env: + LOVE_SITE: base + SERVER_URL: base-lsp.lsst.codes + OLE_API_HOSTNAME: base-lsp.lsst.codes + AUTH_LDAP_1_SERVER_URI: ldap://ipa1.ls.lsst.org + AUTH_LDAP_2_SERVER_URI: ldap://ipa2.ls.lsst.org + AUTH_LDAP_3_SERVER_URI: ldap://ipa3.ls.lsst.org + COMMANDER_HOSTNAME: love-commander-service + COMMANDER_PORT: 5000 + DB_HOST: postgresdb01.ls.lsst.org + DB_ENGINE: postgresql + DB_NAME: love + DB_PORT: 5432 + DB_USER: love + HEARTBEAT_QUERY_COMMANDER: false + JIRA_API_HOSTNAME: rubinobs.atlassian.net + JIRA_PROJECT_ID: 10063 + REDIS_CONFIG_CAPACITY: 5000 + REDIS_CONFIG_EXPIRY: 5 + REDIS_HOST: love-manager-redis-service + REMOTE_STORAGE: true + URL_SUBPATH: /love + envSecrets: + SECRET_KEY: manager-secret-key + PROCESS_CONNECTION_PASS: process-connection-pass + ADMIN_USER_PASS: admin-user-pass + USER_USER_PASS: user-user-pass + CMD_USER_PASS: cmd-user-pass + AUTHLIST_USER_PASS: authlist-user-pass + AUTH_LDAP_BIND_PASSWORD: auth-ldap-bind-password + DB_PASS: db-pass + REDIS_PASS: redis-pass + replicas: 1 + autoscaling: + enabled: false minReplicas: 2 maxReplicas: 25 targetCPUUtilizationPercentage: 50 @@ -156,7 +312,23 @@ love-nginx: proxy_redirect off; } location /love/manager/producers { - proxy_pass http://love-manager-producers-service:8000; + proxy_pass http://love-manager-producer-general-service:8000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_redirect off; + } + location /love/manager/m1m3 { + proxy_pass http://love-manager-producer-m1m3-service:8000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_redirect off; + } + location /love/manager/queue { + proxy_pass http://love-manager-producer-queue-service:8000; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; @@ -200,8 +372,6 @@ love-producer: image: repository: ts-dockerhub.lsst.org/love-producer pullPolicy: Always - env: - WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription resources: requests: cpu: 10m @@ -212,113 +382,176 @@ love-producer: producers: - name: ataos csc: ATAOS:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atcamera csc: ATCamera:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atdome csc: ATDome:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atdometrajectory csc: ATDomeTrajectory:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atheaderservice csc: ATHeaderService:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: athexapod csc: ATHexapod:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atmcs csc: ATMCS:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atocps csc: OCPS:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atoods csc: ATOODS:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atpneumatics csc: ATPneumatics:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atptg csc: ATPtg:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atscheduler csc: Scheduler:2 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: atscriptqueue csc: ScriptQueue:2 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/queue/ws/subscription - name: atspectrograph csc: ATSpectrograph:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: auxteless201 csc: ESS:201 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: auxteless202 csc: ESS:202 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: auxteless203 csc: ESS:203 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: auxteless204 csc: ESS:204 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: auxteless205 csc: ESS:205 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: calibhilless301 csc: ESS:301 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: camerahexapod csc: MTHexapod:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: dimm1 csc: DIMM:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: dimm2 csc: DIMM:2 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: dsm1 csc: DSM:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: dsm2 csc: DSM:2 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: epm1 csc: EPM:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: gcheaderservice1 csc: GCHeaderService:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: genericcamera1 csc: GenericCamera:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: lasertracker1 csc: LaserTracker:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: love csc: LOVE:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: m2ess106 csc: ESS:106 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: m2hexapod csc: MTHexapod:2 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtaircompressor1 csc: MTAirCompressor:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtaircompressor2 csc: MTAirCompressor:2 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtaos csc: MTAOS:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtdome csc: MTDome:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtdomeess101 csc: ESS:101 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtdomeess102 csc: ESS:102 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtdomeess103 csc: ESS:103 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtdomeess107 csc: ESS:107 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtdomeess108 csc: ESS:108 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtdometrajectory csc: MTDomeTrajectory:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtm1m3 csc: MTM1M3:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/m1m3/ws/subscription + resources: + requests: + cpu: 10m + memory: 200Mi + limits: + cpu: 100m + memory: 600Mi - name: mtm2 csc: MTM2:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtmount csc: MTMount:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtptg csc: MTPtg:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtrotator csc: MTRotator:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtscheduler csc: Scheduler:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: mtscriptqueue csc: ScriptQueue:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/queue/ws/subscription - name: ocsscheduler csc: Scheduler:3 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: ocsscriptqueue csc: ScriptQueue:3 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/queue/ws/subscription - name: tmaess001 csc: ESS:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: tmaess104 csc: ESS:104 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: tmaess105 csc: ESS:105 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: watcher csc: Watcher:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription - name: weatherforecast csc: WeatherForecast:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription diff --git a/applications/nightreport/secrets.yaml b/applications/nightreport/secrets-usdfdev.yaml similarity index 53% rename from applications/nightreport/secrets.yaml rename to applications/nightreport/secrets-usdfdev.yaml index 7a1e9e4a72..a748a56695 100644 --- a/applications/nightreport/secrets.yaml +++ b/applications/nightreport/secrets-usdfdev.yaml @@ -1,2 +1,5 @@ nightreport_password: description: "Password for the nightreport database." + copy: + application: exposurelog + key: exposurelog_password diff --git a/applications/nightreport/values-summit.yaml b/applications/nightreport/values-summit.yaml index 4d12a865e7..9fa3095228 100644 --- a/applications/nightreport/values-summit.yaml +++ b/applications/nightreport/values-summit.yaml @@ -3,7 +3,7 @@ image: tag: c0036 pullPolicy: Always config: - site_id: base + site_id: summit db: host: postgresdb01.cp.lsst.org global: diff --git a/applications/nightreport/values-usdfdev.yaml b/applications/nightreport/values-usdfdev.yaml new file mode 100644 index 0000000000..1fab965ee8 --- /dev/null +++ b/applications/nightreport/values-usdfdev.yaml @@ -0,0 +1,9 @@ +image: + repository: ts-dockerhub.lsst.org/nightreport + tag: c0039 + pullPolicy: Always +config: + site_id: usdfdev +db: + host: usdf-summitdb.slac.stanford.edu + user: usdf diff --git a/applications/noteburst/Chart.yaml b/applications/noteburst/Chart.yaml index dbb9d3de95..bcac46aef7 100644 --- a/applications/noteburst/Chart.yaml +++ b/applications/noteburst/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: noteburst version: 1.0.0 -appVersion: "0.12.1" +appVersion: "0.13.0" description: Noteburst is a notebook execution service for the Rubin Science Platform. type: application home: https://noteburst.lsst.io/ diff --git a/applications/noteburst/README.md b/applications/noteburst/README.md index 5db1416c00..0f195c8d15 100644 --- a/applications/noteburst/README.md +++ b/applications/noteburst/README.md @@ -25,7 +25,7 @@ Noteburst is a notebook execution service for the Rubin Science Platform. | config.worker.imageSelector | string | `"recommended"` | Nublado image stream to select: "recommended", "weekly" or "reference" | | config.worker.jobTimeout | int | `300` | The default notebook execution timeout, in seconds. | | config.worker.keepAlive | string | `"normal"` | Worker keep alive mode: "normal", "fast", "disabled" | -| config.worker.maxConcurrentJobs | int | `3` | Max number of concurrent notebook executions per worker | +| config.worker.maxConcurrentJobs | int | `1` | Max number of concurrent notebook executions per worker | | config.worker.tokenLifetime | string | `"2419200"` | Worker token lifetime, in seconds. | | config.worker.tokenScopes | string | `"exec:notebook,read:image,read:tap,read:alertdb"` | Nublado2 worker account's token scopes as a comma-separated list. | | config.worker.workerCount | int | `1` | Number of workers to run | diff --git a/applications/noteburst/values.yaml b/applications/noteburst/values.yaml index d136cbe1f9..0ec9a6f705 100644 --- a/applications/noteburst/values.yaml +++ b/applications/noteburst/values.yaml @@ -123,7 +123,7 @@ config: jobTimeout: 300 # -- Max number of concurrent notebook executions per worker - maxConcurrentJobs: 3 + maxConcurrentJobs: 1 # -- Worker token lifetime, in seconds. tokenLifetime: "2419200" diff --git a/applications/nublado/Chart.yaml b/applications/nublado/Chart.yaml index 554c445ffd..430008b4fc 100644 --- a/applications/nublado/Chart.yaml +++ b/applications/nublado/Chart.yaml @@ -5,7 +5,7 @@ description: JupyterHub and custom spawner for the Rubin Science Platform sources: - https://github.com/lsst-sqre/nublado home: https://nublado.lsst.io/ -appVersion: 6.3.0 +appVersion: 7.2.0 dependencies: - name: jupyterhub diff --git a/applications/nublado/values-base.yaml b/applications/nublado/values-base.yaml index 269181582f..6fb6f5ad9c 100644 --- a/applications/nublado/values-base.yaml +++ b/applications/nublado/values-base.yaml @@ -10,7 +10,7 @@ controller: numWeeklies: 3 numDailies: 2 cycle: null - recommendedTag: "recommended_k0001" + recommendedTag: "recommended_k0002" lab: extraAnnotations: k8s.v1.cni.cncf.io/networks: "kube-system/dds" diff --git a/applications/nublado/values-idfdemo.yaml b/applications/nublado/values-idfdemo.yaml index c36c3b5df1..49ea663521 100644 --- a/applications/nublado/values-idfdemo.yaml +++ b/applications/nublado/values-idfdemo.yaml @@ -23,6 +23,7 @@ controller: DAF_BUTLER_REPOSITORY_INDEX: "https://demo.lsst.cloud/api/butler/configs/idf-repositories.yaml" GOOGLE_APPLICATION_CREDENTIALS: "/opt/lsst/software/jupyterlab/secrets/butler-gcs-idf-creds.json" S3_ENDPOINT_URL: "https://storage.googleapis.com" + TMPDIR: "/tmp" initContainers: - name: "inithome" image: diff --git a/applications/nublado/values-idfdev.yaml b/applications/nublado/values-idfdev.yaml index 6432ec3fa2..94760a4c94 100644 --- a/applications/nublado/values-idfdev.yaml +++ b/applications/nublado/values-idfdev.yaml @@ -23,6 +23,7 @@ controller: DAF_BUTLER_REPOSITORY_INDEX: "https://data-dev.lsst.cloud/api/butler/configs/idf-repositories.yaml" GOOGLE_APPLICATION_CREDENTIALS: "/opt/lsst/software/jupyterlab/secrets/butler-gcs-idf-creds.json" S3_ENDPOINT_URL: "https://storage.googleapis.com" + TMPDIR: "/tmp" initContainers: - name: "inithome" image: diff --git a/applications/nublado/values-idfint.yaml b/applications/nublado/values-idfint.yaml index 0ce19538b5..74deab6857 100644 --- a/applications/nublado/values-idfint.yaml +++ b/applications/nublado/values-idfint.yaml @@ -33,6 +33,7 @@ controller: PANDAMON_URL: "https://usdf-panda-bigmon.slac.stanford.edu:8443/" PANDA_CONFIG_ROOT: "~" CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" # 5 days + TMPDIR: "/tmp" initContainers: - name: "inithome" image: diff --git a/applications/nublado/values-idfprod.yaml b/applications/nublado/values-idfprod.yaml index 2f64319a2f..85d9196fef 100644 --- a/applications/nublado/values-idfprod.yaml +++ b/applications/nublado/values-idfprod.yaml @@ -18,6 +18,7 @@ controller: DAF_BUTLER_REPOSITORY_INDEX: "https://data.lsst.cloud/api/butler/configs/idf-repositories.yaml" S3_ENDPOINT_URL: "https://storage.googleapis.com" CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" # 5 days + TMPDIR: "/tmp" initContainers: - name: "inithome" image: diff --git a/applications/nublado/values-summit.yaml b/applications/nublado/values-summit.yaml index 289eaea4e6..360e229a8f 100644 --- a/applications/nublado/values-summit.yaml +++ b/applications/nublado/values-summit.yaml @@ -8,8 +8,8 @@ controller: numReleases: 0 numWeeklies: 3 numDailies: 2 - cycle: 38 - recommendedTag: "recommended_c0038" + cycle: 39 + recommendedTag: "recommended_c0039" lab: extraAnnotations: k8s.v1.cni.cncf.io/networks: "kube-system/dds" @@ -58,8 +58,13 @@ controller: - name: "lsstcomcam" source: type: "nfs" - serverPath: "/repo/LSSTComCam" - server: "comcam-archiver.cp.lsst.org" + serverPath: "/comcam/repo/LSSTComCam" + server: "nfs3.cp.lsst.org" + - name: "lsstcam" + source: + type: "nfs" + serverPath: "/lsstcam/repo/LSSTCam" + server: "nfs3.cp.lsst.org" - name: "obs-env" source: type: "nfs" @@ -73,23 +78,33 @@ controller: - name: "lsstdata-comcam" source: type: "nfs" - serverPath: "/lsstdata" - server: "comcam-archiver.cp.lsst.org" + serverPath: "/comcam/lsstdata" + server: "nfs3.cp.lsst.org" - name: "lsstdata-auxtel" source: type: "nfs" serverPath: "/auxtel/lsstdata" server: "nfs-auxtel.cp.lsst.org" + - name: "lsstdata-lsstcam" + source: + type: "nfs" + serverPath: "/lsstcam/lsstdata" + server: "nfs3.cp.lsst.org" - name: "lsstdata-base-comcam" source: type: "nfs" - serverPath: "/lsstdata/base/comcam" - server: "comcam-archiver.cp.lsst.org" + serverPath: "/comcam/lsstdata/base/comcam" + server: "nfs3.cp.lsst.org" - name: "lsstdata-base-auxtel" source: type: "nfs" serverPath: "/auxtel/lsstdata/base/auxtel" server: "nfs-auxtel.cp.lsst.org" + - name: "lsstdata-base-lsstcam" + source: + type: "nfs" + serverPath: "/lsstcam/lsstdata/base/maintel" + server: "nfs3.cp.lsst.org" volumeMounts: - containerPath: "/home" volumeName: "home" @@ -101,6 +116,8 @@ controller: volumeName: "latiss" - containerPath: "/repo/LSSTComCam" volumeName: "lsstcomcam" + - containerPath: "/repo/LSSTCam" + volumeName: "lsstcam" - containerPath: "/net/obs-env" volumeName: "obs-env" - containerPath: "/readonly/lsstdata/other" @@ -109,10 +126,14 @@ controller: volumeName: "lsstdata-comcam" - containerPath: "/readonly/lsstdata/auxtel" volumeName: "lsstdata-auxtel" + - containerPath: "/readonly/lsstdata/lsstcam" + volumeName: "lsstdata-lsstcam" - containerPath: "/data/lsstdata/base/comcam" volumeName: "lsstdata-base-comcam" - containerPath: "/data/lsstdata/base/auxtel" volumeName: "lsstdata-base-auxtel" + - containerPath: "/data/lsstdata/base/maintel" + volumeName: "lsstdata-base-lsstcam" hub: internalDatabase: false diff --git a/applications/nublado/values-tucson-teststand.yaml b/applications/nublado/values-tucson-teststand.yaml index abe987c409..bafd20a7a2 100644 --- a/applications/nublado/values-tucson-teststand.yaml +++ b/applications/nublado/values-tucson-teststand.yaml @@ -8,8 +8,8 @@ controller: numReleases: 0 numWeeklies: 3 numDailies: 2 - cycle: 38 - recommendedTag: "recommended_c0038" + cycle: 39 + recommendedTag: "recommended_c0039" lab: extraAnnotations: k8s.v1.cni.cncf.io/networks: "kube-system/dds" diff --git a/applications/obssys/values-base.yaml b/applications/obssys/values-base.yaml index 399aab63ef..c221197c1b 100644 --- a/applications/obssys/values-base.yaml +++ b/applications/obssys/values-base.yaml @@ -7,6 +7,7 @@ atqueue: DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml RUN_ARG: 2 --state enabled USER_USERNAME: user + IMAGE_SERVER_URL: http://lsstcam-mcm.ls.lsst.org butlerSecret: containerPath: &bS-cP /home/saluser/.lsst dbUser: &bS-dbU oods @@ -84,6 +85,7 @@ mtqueue: DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml RUN_ARG: 1 --state enabled USER_USERNAME: user + IMAGE_SERVER_URL: http://lsstcam-mcm.ls.lsst.org butlerSecret: containerPath: *bS-cP dbUser: *bS-dbU @@ -161,6 +163,7 @@ ocsqueue: DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml RUN_ARG: 3 --state enabled USER_USERNAME: user + IMAGE_SERVER_URL: http://lsstcam-mcm.ls.lsst.org butlerSecret: containerPath: *bS-cP dbUser: *bS-dbU diff --git a/applications/onepassword-connect/Chart.yaml b/applications/onepassword-connect/Chart.yaml index ea91cd2183..7cb6ff21d9 100644 --- a/applications/onepassword-connect/Chart.yaml +++ b/applications/onepassword-connect/Chart.yaml @@ -6,7 +6,7 @@ version: 1.0.0 dependencies: - name: connect - version: 1.15.1 + version: 1.16.0 repository: https://1password.github.io/connect-helm-charts/ annotations: diff --git a/applications/postgres/values-usdf-cm-dev.yaml b/applications/postgres/values-usdf-cm-dev.yaml new file mode 100644 index 0000000000..79960946d4 --- /dev/null +++ b/applications/postgres/values-usdf-cm-dev.yaml @@ -0,0 +1,5 @@ +gafaelfawr_db: + user: 'gafaelfawr' + db: 'gafaelfawr' + +postgresStorageClass: 'wekafs--sdf-k8s01' diff --git a/applications/ppdb-replication/.helmignore b/applications/ppdb-replication/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/applications/ppdb-replication/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/applications/ppdb-replication/Chart.yaml b/applications/ppdb-replication/Chart.yaml new file mode 100644 index 0000000000..1dd8dce332 --- /dev/null +++ b/applications/ppdb-replication/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v2 +appVersion: 0.1.0 +description: Replicates data from the APDB to the PPDB +name: ppdb-replication +sources: +- https://github.com/lsst/dax_ppdb.git +type: application +version: 1.0.0 diff --git a/applications/ppdb-replication/README.md b/applications/ppdb-replication/README.md new file mode 100644 index 0000000000..5598cba6d9 --- /dev/null +++ b/applications/ppdb-replication/README.md @@ -0,0 +1,44 @@ +# ppdb-replication + +Replicates data from the APDB to the PPDB + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the ppdb-replication deployment pod | +| config.additionalS3ProfileName | string | `nil` | Additional S3 profile name | +| config.additionalS3ProfileUrl | string | `nil` | Additional S3 profile URL | +| config.apdbConfig | string | `nil` | APDB config file resource | +| config.apdbIndexUri | string | `nil` | APDB index URI | +| config.checkInterval | string | `nil` | Time to wait before checking for new chunks, if no chunk appears | +| config.disableBucketValidation | int | `1` | Disable bucket validation in LSST S3 tools | +| config.logLevel | string | `"INFO"` | Logging level | +| config.logProfile | string | `"production"` | Logging profile (`production` for JSON, `development` for human-friendly) | +| config.maxWaitTime | string | `nil` | Maximum time to wait before replicating a chunk after next chunk appears | +| config.minWaitTime | string | `nil` | Minimum time to wait before replicating a chunk after next chunk appears | +| config.monLogger | string | `"lsst.dax.ppdb.monitor"` | Name of logger for monitoring | +| config.monRules | string | `nil` | Comma-separated list of monitoring filter rules | +| config.pathPrefix | string | `"/ppdb-replication"` | URL path prefix | +| config.persistentVolumeClaims | list | `[]` | Persistent volume claims | +| config.ppdbConfig | string | `nil` | PPDB config file resource | +| config.s3EndpointUrl | string | `nil` | S3 endpoint URL | +| config.updateExisting | bool | `false` | Allow updates to already replicated data | +| config.volumeMounts | list | `[]` | Volume mounts | +| config.volumes | list | `[]` | Volumes specific to the environment | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"Always"` | Pull policy for the ppdb-replication image | +| image.repository | string | `"ghcr.io/lsst/ppdb-replication"` | Image to use in the ppdb-replication deployment | +| image.tag | string | The appVersion of the chart | Tag of image to use | +| ingress.annotations | object | `{}` | Additional annotations for the ingress rule | +| nodeSelector | object | `{}` | Node selection rules for the ppdb-replication deployment pod | +| podAnnotations | object | `{}` | Annotations for the ppdb-replication deployment pod | +| replicaCount | int | `1` | Number of deployment pods to start | +| resources | object | see `values.yaml` | Resource limits and requests for the ppdb-replication deployment pod | +| tolerations | list | `[]` | Tolerations for the ppdb-replication deployment pod | diff --git a/applications/ppdb-replication/secrets.yaml b/applications/ppdb-replication/secrets.yaml new file mode 100644 index 0000000000..92474ab2c3 --- /dev/null +++ b/applications/ppdb-replication/secrets.yaml @@ -0,0 +1,9 @@ +"aws-credentials.ini": + description: >- + AWS credentials required for acessing configuration files in S3. +"db-auth.yaml": + description: >- + Cassandra database credentials for the APDB. +"postgres-credentials.txt": + description: >- + PostgreSQL credentials in its pgpass format for the PPDB database. diff --git a/applications/ppdb-replication/templates/_helpers.tpl b/applications/ppdb-replication/templates/_helpers.tpl new file mode 100644 index 0000000000..47bdc59cfe --- /dev/null +++ b/applications/ppdb-replication/templates/_helpers.tpl @@ -0,0 +1,44 @@ +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ppdb-replication.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "ppdb-replication.labels" -}} +helm.sh/chart: {{ include "ppdb-replication.chart" . }} +{{ include "ppdb-replication.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "ppdb-replication.selectorLabels" -}} +app.kubernetes.io/name: "ppdb-replication" +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "ppdb-replication.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/applications/ppdb-replication/templates/configmap.yaml b/applications/ppdb-replication/templates/configmap.yaml new file mode 100644 index 0000000000..a66bacce2c --- /dev/null +++ b/applications/ppdb-replication/templates/configmap.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "ppdb-replication" + labels: + {{- include "ppdb-replication.labels" . | nindent 4 }} +data: + DAX_APDB_INDEX_URI: {{ .Values.config.apdbIndexUri | quote }} + PPDB_REPLICATION_LOG_LEVEL: {{ .Values.config.logLevel | quote }} + PPDB_REPLICATION_PATH_PREFIX: {{ .Values.config.pathPrefix | quote }} + PPDB_REPLICATION_PROFILE: {{ .Values.config.logProfile | quote }} + PPDB_REPLICATION_APDB_CONFIG: {{ .Values.config.apdbConfig | quote }} + PPDB_REPLICATION_PPDB_CONFIG: {{ .Values.config.ppdbConfig | quote }} + PPDB_REPLICATION_MON_LOGGER: {{ .Values.config.monLogger | quote }} + PPDB_REPLICATION_MON_RULES: {{ .Values.config.monRules | quote }} + PPDB_REPLICATION_UPDATE_EXISTING: {{ .Values.config.updateExisting | quote}} + PPDB_REPLICATION_MIN_WAIT_TIME: {{ .Values.config.minWaitTime | quote }} + PPDB_REPLICATION_MAX_WAIT_TIME: {{ .Values.config.maxWaitTime | quote }} + PPDB_REPLICATION_CHECK_INTERVAL: {{ .Values.config.checkInterval | quote}} diff --git a/applications/ppdb-replication/templates/deployment.yaml b/applications/ppdb-replication/templates/deployment.yaml new file mode 100644 index 0000000000..454ec56b56 --- /dev/null +++ b/applications/ppdb-replication/templates/deployment.yaml @@ -0,0 +1,89 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "ppdb-replication.fullname" . }} + labels: + {{- include "ppdb-replication.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "ppdb-replication.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "ppdb-replication.selectorLabels" . | nindent 8 }} + annotations: + # Force the pod to restart when the config maps are updated. + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + volumes: + - name: "ppdb-replication-secrets-raw" + secret: + secretName: {{ include "ppdb-replication.fullname" . }} + - name: "ppdb-replication-secrets" + emptyDir: + sizeLimit: "100Mi" + {{- with .Values.config.volumes }} + {{- . | toYaml | nindent 8 }} + {{- end }} + initContainers: + - name: fix-secret-permissions + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - "/bin/sh" + - "-c" + - | + cp -RL /tmp/ppdb-replication-secrets-raw/* /app/secrets/ + chmod 0400 /app/secrets/* + securityContext: + runAsNonRoot: false + runAsUser: 0 + runAsGroup: 0 + volumeMounts: + - name: "ppdb-replication-secrets" + mountPath: "/app/secrets" + - name: "ppdb-replication-secrets-raw" + mountPath: "/tmp/ppdb-replication-secrets-raw" + readOnly: true + containers: + - name: {{ .Chart.Name }} + envFrom: + - configMapRef: + name: "ppdb-replication" + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: AWS_SHARED_CREDENTIALS_FILE + value: "/app/secrets/aws-credentials.ini" + - name: PGPASSFILE + value: "/app/secrets/postgres-credentials.txt" + - name: LSST_DB_AUTH + value: "/app/secrets/db-auth.yaml" + - name: S3_ENDPOINT_URL + value: {{ .Values.config.s3EndpointUrl | quote }} + - name: LSST_RESOURCES_S3_PROFILE_{{ .Values.config.additionalS3ProfileName }} + value: {{ .Values.config.additionalS3ProfileUrl | quote }} + - name: LSST_DISABLE_BUCKET_VALIDATION + value: {{ .Values.config.disableBucketValidation | quote }} + volumeMounts: + - name: "ppdb-replication-secrets" + mountPath: "/app/secrets" + readOnly: true + {{- with .Values.config.volumeMounts }} + {{- . | toYaml | nindent 12 }} + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/applications/ppdb-replication/templates/ingress.yaml b/applications/ppdb-replication/templates/ingress.yaml new file mode 100644 index 0000000000..381bce084c --- /dev/null +++ b/applications/ppdb-replication/templates/ingress.yaml @@ -0,0 +1,30 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "ppdb-replication" + labels: + {{- include "ppdb-replication.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + all: + - "read:image" +template: + metadata: + name: "ppdb-replication" + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: {{ .Values.config.pathPrefix | quote }} + pathType: "Prefix" + backend: + service: + name: "ppdb-replication" + port: + number: 8080 diff --git a/applications/ppdb-replication/templates/networkpolicy.yaml b/applications/ppdb-replication/templates/networkpolicy.yaml new file mode 100644 index 0000000000..10ddf62820 --- /dev/null +++ b/applications/ppdb-replication/templates/networkpolicy.yaml @@ -0,0 +1,21 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "ppdb-replication" +spec: + podSelector: + matchLabels: + {{- include "ppdb-replication.selectorLabels" . | nindent 6 }} + policyTypes: + - "Ingress" + ingress: + # Allow inbound access from pods (in any namespace) labeled + # gafaelfawr.lsst.io/ingress: true. + - from: + - namespaceSelector: {} + podSelector: + matchLabels: + gafaelfawr.lsst.io/ingress: "true" + ports: + - protocol: "TCP" + port: 8080 diff --git a/applications/ppdb-replication/templates/pvc.yaml b/applications/ppdb-replication/templates/pvc.yaml new file mode 100644 index 0000000000..52af2db47b --- /dev/null +++ b/applications/ppdb-replication/templates/pvc.yaml @@ -0,0 +1,18 @@ +{{- if .Values.config.persistentVolumeClaims }} +{{- $top := . -}} +{{- range $index, $pvc := .Values.config.persistentVolumeClaims }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "{{ $pvc.name }}" +spec: + storageClassName: "{{ $pvc.storageClassName }}" + accessModes: + - ReadOnlyMany + resources: + requests: + storage: 100Mi +{{- end }} +{{- end }} + diff --git a/applications/ppdb-replication/templates/service.yaml b/applications/ppdb-replication/templates/service.yaml new file mode 100644 index 0000000000..27b726bc7b --- /dev/null +++ b/applications/ppdb-replication/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: "ppdb-replication" + labels: + {{- include "ppdb-replication.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - port: 8080 + targetPort: "http" + protocol: "TCP" + name: "http" + selector: + {{- include "ppdb-replication.selectorLabels" . | nindent 4 }} diff --git a/applications/ppdb-replication/templates/vault-secrets.yaml b/applications/ppdb-replication/templates/vault-secrets.yaml new file mode 100644 index 0000000000..96c228968f --- /dev/null +++ b/applications/ppdb-replication/templates/vault-secrets.yaml @@ -0,0 +1,9 @@ +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: {{ include "ppdb-replication.fullname" . }} + labels: + {{- include "ppdb-replication.labels" . | nindent 4 }} +spec: + path: "{{ .Values.global.vaultSecretsPath }}/ppdb-replication" + type: Opaque diff --git a/applications/ppdb-replication/values-usdfdev.yaml b/applications/ppdb-replication/values-usdfdev.yaml new file mode 100644 index 0000000000..b373b91d38 --- /dev/null +++ b/applications/ppdb-replication/values-usdfdev.yaml @@ -0,0 +1,44 @@ +config: + + # -- Logging level + logLevel: "INFO" + + # -- Logging profile (`production` for JSON, `development` for + # human-friendly) + logProfile: "development" + + # -- APDB config file resource + apdbConfig: "label:pp-prod:lsstcomcamsim-or4" + + # -- PPDB config file resource + ppdbConfig: "/sdf/group/rubin/user/jeremym/ppdb-replication/config/ppdb-replication-test-1.yaml" + + # -- APDB index URI + apdbIndexUri: "/sdf/group/rubin/shared/apdb_config/apdb-index.yaml" + + # -- S3 endpoint URL + s3EndpointUrl: https://s3dfrgw.slac.stanford.edu + + # -- S3 profile name for additional S3 profile + additionalS3ProfileName: "embargo" + + # -- S3 profile URL for additional S3 profile + additionalS3ProfileUrl: "https://sdfembs3.sdf.slac.stanford.edu" + + volumes: + - name: sdf-group-rubin + persistentVolumeClaim: + claimName: sdf-group-rubin + - name: sdf-data-rubin + persistentVolumeClaim: + claimName: sdf-data-rubin + volumeMounts: + - name: sdf-group-rubin + mountPath: /sdf/group/rubin + - name: sdf-data-rubin + mountPath: /sdf/data/rubin + persistentVolumeClaims: + - name: sdf-group-rubin + storageClassName: sdf-group-rubin + - name: sdf-data-rubin + storageClassName: sdf-data-rubin diff --git a/applications/ppdb-replication/values.yaml b/applications/ppdb-replication/values.yaml new file mode 100644 index 0000000000..fec71e1776 --- /dev/null +++ b/applications/ppdb-replication/values.yaml @@ -0,0 +1,118 @@ +# Default values for ppdb-replication. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Number of deployment pods to start +replicaCount: 1 + +image: + # -- Image to use in the ppdb-replication deployment + repository: "ghcr.io/lsst/ppdb-replication" + + # -- Pull policy for the ppdb-replication image + pullPolicy: "Always" + + # -- Tag of image to use + # @default -- The appVersion of the chart + tag: "main" + +ingress: + # -- Additional annotations for the ingress rule + annotations: {} + +# -- Affinity rules for the ppdb-replication deployment pod +affinity: {} + +# -- Node selection rules for the ppdb-replication deployment pod +nodeSelector: {} + +# -- Annotations for the ppdb-replication deployment pod +podAnnotations: {} + +# -- Resource limits and requests for the ppdb-replication deployment pod +# @default -- see `values.yaml` +resources: + limits: + cpu: "1" + memory: "16.0Gi" + requests: + cpu: "200m" # 20% of a single core + memory: "4.0Gi" + +# -- Tolerations for the ppdb-replication deployment pod +tolerations: [] + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: null + + # -- Host name for ingress + # @default -- Set by Argo CD + host: null + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: null + +# Application-specific configuration +config: + # -- Logging level + logLevel: "INFO" + + # -- Name of logger for monitoring + monLogger: "lsst.dax.ppdb.monitor" + + # -- Logging profile (`production` for JSON, `development` for + # human-friendly) + logProfile: "production" + + # -- URL path prefix + pathPrefix: "/ppdb-replication" + + # -- APDB config file resource + apdbConfig: null + + # -- PPDB config file resource + ppdbConfig: null + + # -- APDB index URI + apdbIndexUri: null + + # -- Comma-separated list of monitoring filter rules + monRules: null + + # -- Allow updates to already replicated data + updateExisting: false + + # -- Minimum time to wait before replicating a chunk after next chunk appears + minWaitTime: null + + # -- Maximum time to wait before replicating a chunk after next chunk appears + maxWaitTime: null + + # -- Time to wait before checking for new chunks, if no chunk appears + checkInterval: null + + # -- S3 endpoint URL + s3EndpointUrl: null + + # -- Additional S3 profile name + additionalS3ProfileName: null + + # -- Additional S3 profile URL + additionalS3ProfileUrl: null + + # -- Disable bucket validation in LSST S3 tools + disableBucketValidation: 1 + + # -- Volumes specific to the environment + volumes: [] + + # -- Volume mounts + volumeMounts: [] + + # -- Persistent volume claims + persistentVolumeClaims: [] diff --git a/applications/prompt-proto-service-hsc-gpu/README.md b/applications/prompt-proto-service-hsc-gpu/README.md index e244bf3f12..2415159676 100644 --- a/applications/prompt-proto-service-hsc-gpu/README.md +++ b/applications/prompt-proto-service-hsc-gpu/README.md @@ -15,12 +15,13 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | | prompt-proto-service.affinity | object | `{}` | Affinity rules for the prompt processing pods | | prompt-proto-service.alerts.server | string | `"usdf-alert-stream-dev-broker-0.lsst.cloud:9094"` | Server address for the alert stream | -| prompt-proto-service.alerts.topic | string | `"alert-stream-test"` | Topic name where alerts will be sent | +| prompt-proto-service.alerts.topic | string | None, must be set | Topic name where alerts will be sent | | prompt-proto-service.alerts.username | string | `"kafka-admin"` | Username for sending alerts to the alert stream | | prompt-proto-service.apdb.config | string | None, must be set | URL to a serialized APDB configuration, or the "label:" prefix followed by the indexed name of such a config. | | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | | prompt-proto-service.cache.patchesPerImage | int | `4` | A factor by which to multiply `baseSize` for templates and other patch-based datasets. | | prompt-proto-service.cache.refcatsPerImage | int | `4` | A factor by which to multiply `baseSize` for refcat datasets. | +| prompt-proto-service.containerConcurrency | int | `1` | The number of Knative requests that can be handled simultaneously by one container | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | @@ -32,19 +33,20 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | | prompt-proto-service.instrument.skymap | string | `"hsc_rings_v1"` | Skymap to use with the instrument | -| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores. | -| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested. | -| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). | -| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). | +| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | +| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | | prompt-proto-service.knative.extraTimeout | int | `10` | To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. This parameter adds extra time to that minimum (seconds). | | prompt-proto-service.knative.gpu | bool | `true` | GPUs enabled. | -| prompt-proto-service.knative.gpuRequest | int | `1` | The number of GPUs to request. | +| prompt-proto-service.knative.gpuRequest | int | `1` | The number of GPUs to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.idleTimeout | int | `0` | Maximum time that a container can send nothing to Knative (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | -| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit. | -| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request. | +| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | +| prompt-proto-service.raw_microservice | string | `""` | The URI to a microservice that maps image metadata to a file location. If empty, Prompt Processing does not use a microservice. | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | int | `0` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | diff --git a/applications/prompt-proto-service-hsc-gpu/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-hsc-gpu/values-usdfdev-prompt-processing.yaml index 6ecda4e3cb..7e9e4e559b 100644 --- a/applications/prompt-proto-service-hsc-gpu/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-hsc-gpu/values-usdfdev-prompt-processing.yaml @@ -13,7 +13,7 @@ prompt-proto-service: instrument: pipelines: main: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/HSC/ApPipe.yaml] - preprocessing: (survey="SURVEY")=[] + preprocessing: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/HSC/Preprocessing.yaml] calibRepo: s3://rubin-pp-dev-users/central_repo/ s3: @@ -27,6 +27,9 @@ prompt-proto-service: apdb: config: s3://rubin-pp-dev-users/apdb_config/sql/pp_apdb_hsc-dev.py + alerts: + topic: "alert-stream-test" + sasquatch: endpointUrl: https://usdf-rsp-dev.slac.stanford.edu/sasquatch-rest-proxy auth_env: false diff --git a/applications/prompt-proto-service-hsc-gpu/values.yaml b/applications/prompt-proto-service-hsc-gpu/values.yaml index 46c7db1e09..7efc93a3bb 100644 --- a/applications/prompt-proto-service-hsc-gpu/values.yaml +++ b/applications/prompt-proto-service-hsc-gpu/values.yaml @@ -73,6 +73,10 @@ prompt-proto-service: # -- Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. disableBucketValidation: 0 + # -- The URI to a microservice that maps image metadata to a file location. + # If empty, Prompt Processing does not use a microservice. + raw_microservice: "" + imageNotifications: # -- Hostname and port of the Kafka provider # @default -- None, must be set @@ -95,7 +99,8 @@ prompt-proto-service: # -- Server address for the alert stream server: "usdf-alert-stream-dev-broker-0.lsst.cloud:9094" # -- Topic name where alerts will be sent - topic: "alert-stream-test" + # @default -- None, must be set + topic: "" registry: # -- If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. @@ -117,21 +122,23 @@ prompt-proto-service: auth_env: true knative: - # -- The cpu cores requested. + # -- The cpu cores requested for the full pod (see `containerConcurrency`). cpuRequest: 1 - # -- The maximum cpu cores. + # -- The maximum cpu cores for the full pod (see `containerConcurrency`). cpuLimit: 1 # -- The storage space reserved for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageRequest: "5Gi" # -- The maximum storage space allowed for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageLimit: "5Gi" # -- GPUs enabled. gpu: true - # -- The number of GPUs to request. + # -- The number of GPUs to request for the full pod (see `containerConcurrency`). gpuRequest: 1 - # -- The minimum memory to request. + # -- The minimum memory to request for the full pod (see `containerConcurrency`). memoryRequest: "2Gi" - # -- The maximum memory limit. + # -- The maximum memory limit for the full pod (see `containerConcurrency`). memoryLimit: "8Gi" # -- To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. # This parameter adds extra time to that minimum (seconds). @@ -145,6 +152,9 @@ prompt-proto-service: # If 0, idle timeout is ignored. responseStartTimeout: 0 + # -- The number of Knative requests that can be handled simultaneously by one container + containerConcurrency: 1 + # -- Kubernetes YAML configs for extra container volume(s). # Any volumes required by other config options are automatically handled by the Helm chart. additionalVolumeMounts: [] diff --git a/applications/prompt-proto-service-hsc/README.md b/applications/prompt-proto-service-hsc/README.md index 3c10244ada..fbb60fceae 100644 --- a/applications/prompt-proto-service-hsc/README.md +++ b/applications/prompt-proto-service-hsc/README.md @@ -15,12 +15,13 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | | prompt-proto-service.affinity | object | `{}` | Affinity rules for the prompt processing pods | | prompt-proto-service.alerts.server | string | `"usdf-alert-stream-dev-broker-0.lsst.cloud:9094"` | Server address for the alert stream | -| prompt-proto-service.alerts.topic | string | `"alert-stream-test"` | Topic name where alerts will be sent | +| prompt-proto-service.alerts.topic | string | None, must be set | Topic name where alerts will be sent | | prompt-proto-service.alerts.username | string | `"kafka-admin"` | Username for sending alerts to the alert stream | | prompt-proto-service.apdb.config | string | None, must be set | URL to a serialized APDB configuration, or the "label:" prefix followed by the indexed name of such a config. | | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | | prompt-proto-service.cache.patchesPerImage | int | `4` | A factor by which to multiply `baseSize` for templates and other patch-based datasets. | | prompt-proto-service.cache.refcatsPerImage | int | `4` | A factor by which to multiply `baseSize` for refcat datasets. | +| prompt-proto-service.containerConcurrency | int | `1` | The number of Knative requests that can be handled simultaneously by one container | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | @@ -32,19 +33,20 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | | prompt-proto-service.instrument.skymap | string | `"hsc_rings_v1"` | Skymap to use with the instrument | -| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores. | -| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested. | -| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). | -| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). | +| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | +| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | | prompt-proto-service.knative.extraTimeout | int | `10` | To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. This parameter adds extra time to that minimum (seconds). | | prompt-proto-service.knative.gpu | bool | `false` | GPUs enabled. | -| prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request. | +| prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.idleTimeout | int | `0` | Maximum time that a container can send nothing to Knative (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | -| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit. | -| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request. | +| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | +| prompt-proto-service.raw_microservice | string | `""` | The URI to a microservice that maps image metadata to a file location. If empty, Prompt Processing does not use a microservice. | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | int | `0` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | diff --git a/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml index 987d3b4e9f..aba3ca2b2c 100644 --- a/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml @@ -14,7 +14,7 @@ prompt-proto-service: instrument: pipelines: main: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/HSC/ApPipe.yaml] - preprocessing: (survey="SURVEY")=[] + preprocessing: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/HSC/Preprocessing.yaml] calibRepo: s3://rubin-pp-dev-users/central_repo/ s3: @@ -28,6 +28,9 @@ prompt-proto-service: apdb: config: s3://rubin-pp-dev-users/apdb_config/sql/pp_apdb_hsc-dev.py + alerts: + topic: "alert-stream-test" + sasquatch: endpointUrl: https://usdf-rsp-dev.slac.stanford.edu/sasquatch-rest-proxy auth_env: false diff --git a/applications/prompt-proto-service-hsc/values.yaml b/applications/prompt-proto-service-hsc/values.yaml index 931f3525b9..c3921fcb42 100644 --- a/applications/prompt-proto-service-hsc/values.yaml +++ b/applications/prompt-proto-service-hsc/values.yaml @@ -73,6 +73,10 @@ prompt-proto-service: # -- Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. disableBucketValidation: 0 + # -- The URI to a microservice that maps image metadata to a file location. + # If empty, Prompt Processing does not use a microservice. + raw_microservice: "" + imageNotifications: # -- Hostname and port of the Kafka provider # @default -- None, must be set @@ -95,7 +99,8 @@ prompt-proto-service: # -- Server address for the alert stream server: "usdf-alert-stream-dev-broker-0.lsst.cloud:9094" # -- Topic name where alerts will be sent - topic: "alert-stream-test" + # @default -- None, must be set + topic: "" registry: # -- If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. @@ -117,21 +122,23 @@ prompt-proto-service: auth_env: true knative: - # -- The cpu cores requested. + # -- The cpu cores requested for the full pod (see `containerConcurrency`). cpuRequest: 1 - # -- The maximum cpu cores. + # -- The maximum cpu cores for the full pod (see `containerConcurrency`). cpuLimit: 1 # -- The storage space reserved for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageRequest: "5Gi" # -- The maximum storage space allowed for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageLimit: "5Gi" # -- GPUs enabled. gpu: false - # -- The number of GPUs to request. + # -- The number of GPUs to request for the full pod (see `containerConcurrency`). gpuRequest: 0 - # -- The minimum memory to request. + # -- The minimum memory to request for the full pod (see `containerConcurrency`). memoryRequest: "2Gi" - # -- The maximum memory limit. + # -- The maximum memory limit for the full pod (see `containerConcurrency`). memoryLimit: "8Gi" # -- To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. # This parameter adds extra time to that minimum (seconds). @@ -145,6 +152,9 @@ prompt-proto-service: # If 0, idle timeout is ignored. responseStartTimeout: 0 + # -- The number of Knative requests that can be handled simultaneously by one container + containerConcurrency: 1 + # -- Kubernetes YAML configs for extra container volume(s). # Any volumes required by other config options are automatically handled by the Helm chart. additionalVolumeMounts: [] diff --git a/applications/prompt-proto-service-latiss/README.md b/applications/prompt-proto-service-latiss/README.md index 605ee8a88e..941c350a20 100644 --- a/applications/prompt-proto-service-latiss/README.md +++ b/applications/prompt-proto-service-latiss/README.md @@ -15,12 +15,13 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | | prompt-proto-service.affinity | object | `{}` | Affinity rules for the prompt processing pods | | prompt-proto-service.alerts.server | string | `"usdf-alert-stream-dev-broker-0.lsst.cloud:9094"` | Server address for the alert stream | -| prompt-proto-service.alerts.topic | string | `"alert-stream-test"` | Topic name where alerts will be sent | +| prompt-proto-service.alerts.topic | string | None, must be set | Topic name where alerts will be sent | | prompt-proto-service.alerts.username | string | `"kafka-admin"` | Username for sending alerts to the alert stream | | prompt-proto-service.apdb.config | string | None, must be set | URL to a serialized APDB configuration, or the "label:" prefix followed by the indexed name of such a config. | | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | | prompt-proto-service.cache.patchesPerImage | int | `6` | A factor by which to multiply `baseSize` for templates and other patch-based datasets. | | prompt-proto-service.cache.refcatsPerImage | int | `4` | A factor by which to multiply `baseSize` for refcat datasets. | +| prompt-proto-service.containerConcurrency | int | `1` | The number of Knative requests that can be handled simultaneously by one container | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | @@ -32,19 +33,20 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | | prompt-proto-service.instrument.skymap | string | `"latiss_v1"` | Skymap to use with the instrument | -| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores. | -| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested. | -| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). | -| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). | +| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | +| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | | prompt-proto-service.knative.extraTimeout | int | `10` | To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. This parameter adds extra time to that minimum (seconds). | | prompt-proto-service.knative.gpu | bool | `false` | GPUs enabled. | -| prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request. | +| prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.idleTimeout | int | `0` | Maximum time that a container can send nothing to Knative (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | -| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit. | -| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request. | +| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | +| prompt-proto-service.raw_microservice | string | `""` | The URI to a microservice that maps image metadata to a file location. If empty, Prompt Processing does not use a microservice. | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | string | `"0"` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | diff --git a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml index c9f5d5677e..9e0c60bf5d 100644 --- a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml @@ -14,7 +14,7 @@ prompt-proto-service: main: >- (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml, ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Isr.yaml] - preprocessing: (survey="SURVEY")=[] + preprocessing: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Preprocessing.yaml] calibRepo: s3://rubin-pp-dev-users/central_repo/ s3: @@ -28,6 +28,9 @@ prompt-proto-service: apdb: config: s3://rubin-pp-dev-users/apdb_config/cassandra/pp_apdb_latiss-dev.py + alerts: + topic: "alert-stream-test" + sasquatch: endpointUrl: https://usdf-rsp-dev.slac.stanford.edu/sasquatch-rest-proxy auth_env: false diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index dafffeab21..ec4b9cc3f7 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -14,19 +14,19 @@ prompt-proto-service: image: pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. - tag: 4.3.0 + tag: 4.5.1 instrument: pipelines: + # BLOCK-306 is photographic imaging + # BLOCK-T17 is daytime checkout + # BLOCK-271 is photon transfer curve calibrations # BLOCK-295 is the daily calibration sequence as of May 27, 2024 main: >- - (survey="AUXTEL_PHOTO_IMAGING")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml, + (survey="BLOCK-306")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml, ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/SingleFrame.yaml, ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Isr.yaml] - (survey="AUXTEL_DRP_IMAGING")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml, - ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/SingleFrame.yaml, - ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Isr.yaml] - (survey="BLOCK-T17")=[] + (survey="BLOCK-T17")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Isr-cal.yaml] (survey="cwfs")=[] (survey="cwfs-focus-sweep")=[] (survey="spec-survey")=[] @@ -34,8 +34,7 @@ prompt-proto-service: (survey="BLOCK-295")=[] (survey="")=[] preprocessing: >- - (survey="AUXTEL_PHOTO_IMAGING")=[] - (survey="AUXTEL_DRP_IMAGING")=[] + (survey="BLOCK-306")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Preprocessing.yaml] (survey="BLOCK-T17")=[] (survey="cwfs")=[] (survey="cwfs-focus-sweep")=[] @@ -49,6 +48,8 @@ prompt-proto-service: imageBucket: rubin-summit endpointUrl: https://s3dfrgw.slac.stanford.edu + raw_microservice: http://172.24.5.144:8080/presence + imageNotifications: kafkaClusterAddress: prompt-processing-2-kafka-bootstrap.kafka:9092 topic: rubin-prompt-processing-prod @@ -60,6 +61,9 @@ prompt-proto-service: apdb: config: s3://rubin-summit-users/apdb_config/cassandra/pp_apdb_latiss.py + alerts: + topic: "latiss-alerts" + sasquatch: endpointUrl: https://usdf-rsp-dev.slac.stanford.edu/sasquatch-rest-proxy namespace: lsst.prompt.prod diff --git a/applications/prompt-proto-service-latiss/values.yaml b/applications/prompt-proto-service-latiss/values.yaml index 410e4e5225..38fddacd35 100644 --- a/applications/prompt-proto-service-latiss/values.yaml +++ b/applications/prompt-proto-service-latiss/values.yaml @@ -73,6 +73,10 @@ prompt-proto-service: # -- Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. disableBucketValidation: '0' + # -- The URI to a microservice that maps image metadata to a file location. + # If empty, Prompt Processing does not use a microservice. + raw_microservice: "" + imageNotifications: # -- Hostname and port of the Kafka provider # @default -- None, must be set @@ -95,7 +99,8 @@ prompt-proto-service: # -- Server address for the alert stream server: "usdf-alert-stream-dev-broker-0.lsst.cloud:9094" # -- Topic name where alerts will be sent - topic: "alert-stream-test" + # @default -- None, must be set + topic: "" registry: # -- If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. @@ -117,21 +122,23 @@ prompt-proto-service: auth_env: true knative: - # -- The cpu cores requested. + # -- The cpu cores requested for the full pod (see `containerConcurrency`). cpuRequest: 1 - # -- The maximum cpu cores. + # -- The maximum cpu cores for the full pod (see `containerConcurrency`). cpuLimit: 1 # -- The storage space reserved for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageRequest: "5Gi" # -- The maximum storage space allowed for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageLimit: "5Gi" # -- GPUs enabled. gpu: false - # -- The number of GPUs to request. + # -- The number of GPUs to request for the full pod (see `containerConcurrency`). gpuRequest: 0 - # -- The minimum memory to request. + # -- The minimum memory to request for the full pod (see `containerConcurrency`). memoryRequest: "2Gi" - # -- The maximum memory limit. + # -- The maximum memory limit for the full pod (see `containerConcurrency`). memoryLimit: "8Gi" # -- To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. # This parameter adds extra time to that minimum (seconds). @@ -145,6 +152,9 @@ prompt-proto-service: # If 0, idle timeout is ignored. responseStartTimeout: 0 + # -- The number of Knative requests that can be handled simultaneously by one container + containerConcurrency: 1 + # -- Kubernetes YAML configs for extra container volume(s). # Any volumes required by other config options are automatically handled by the Helm chart. additionalVolumeMounts: [] diff --git a/applications/prompt-proto-service-lsstcam/README.md b/applications/prompt-proto-service-lsstcam/README.md index abdafa6f39..b2d000f026 100644 --- a/applications/prompt-proto-service-lsstcam/README.md +++ b/applications/prompt-proto-service-lsstcam/README.md @@ -15,12 +15,13 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | | prompt-proto-service.affinity | object | `{}` | Affinity rules for the prompt processing pods | | prompt-proto-service.alerts.server | string | `"usdf-alert-stream-dev-broker-0.lsst.cloud:9094"` | Server address for the alert stream | -| prompt-proto-service.alerts.topic | string | `"alert-stream-test"` | Topic name where alerts will be sent | +| prompt-proto-service.alerts.topic | string | None, must be set | Topic name where alerts will be sent | | prompt-proto-service.alerts.username | string | `"kafka-admin"` | Username for sending alerts to the alert stream | | prompt-proto-service.apdb.config | string | None, must be set | URL to a serialized APDB configuration, or the "label:" prefix followed by the indexed name of such a config. | | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | | prompt-proto-service.cache.patchesPerImage | int | `4` | A factor by which to multiply `baseSize` for templates and other patch-based datasets. | | prompt-proto-service.cache.refcatsPerImage | int | `4` | A factor by which to multiply `baseSize` for refcat datasets. | +| prompt-proto-service.containerConcurrency | int | `1` | The number of Knative requests that can be handled simultaneously by one container | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | @@ -32,19 +33,20 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | | prompt-proto-service.instrument.skymap | string | `""` | Skymap to use with the instrument | -| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores. | -| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested. | -| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). | -| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). | +| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | +| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | | prompt-proto-service.knative.extraTimeout | int | `10` | To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. This parameter adds extra time to that minimum (seconds). | | prompt-proto-service.knative.gpu | bool | `false` | GPUs enabled. | -| prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request. | +| prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.idleTimeout | int | `0` | Maximum time that a container can send nothing to Knative (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | -| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit. | -| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request. | +| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | +| prompt-proto-service.raw_microservice | string | `""` | The URI to a microservice that maps image metadata to a file location. If empty, Prompt Processing does not use a microservice. | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | int | `0` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | diff --git a/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml index 32e0705ba0..818307f6ca 100644 --- a/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml @@ -22,4 +22,7 @@ prompt-proto-service: kafkaClusterAddress: prompt-processing-kafka-bootstrap.kafka:9092 topic: rubin-prompt-processing + alerts: + topic: "alert-stream-test" + fullnameOverride: "prompt-proto-service-lsstcam" diff --git a/applications/prompt-proto-service-lsstcam/values.yaml b/applications/prompt-proto-service-lsstcam/values.yaml index 6304b41272..a590661413 100644 --- a/applications/prompt-proto-service-lsstcam/values.yaml +++ b/applications/prompt-proto-service-lsstcam/values.yaml @@ -73,6 +73,10 @@ prompt-proto-service: # -- Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. disableBucketValidation: 0 + # -- The URI to a microservice that maps image metadata to a file location. + # If empty, Prompt Processing does not use a microservice. + raw_microservice: "" + imageNotifications: # -- Hostname and port of the Kafka provider # @default -- None, must be set @@ -95,7 +99,8 @@ prompt-proto-service: # -- Server address for the alert stream server: "usdf-alert-stream-dev-broker-0.lsst.cloud:9094" # -- Topic name where alerts will be sent - topic: "alert-stream-test" + # @default -- None, must be set + topic: "" registry: # -- If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. @@ -117,21 +122,23 @@ prompt-proto-service: auth_env: true knative: - # -- The cpu cores requested. + # -- The cpu cores requested for the full pod (see `containerConcurrency`). cpuRequest: 1 - # -- The maximum cpu cores. + # -- The maximum cpu cores for the full pod (see `containerConcurrency`). cpuLimit: 1 # -- The storage space reserved for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageRequest: "5Gi" # -- The maximum storage space allowed for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageLimit: "5Gi" # -- GPUs enabled. gpu: false - # -- The number of GPUs to request. + # -- The number of GPUs to request for the full pod (see `containerConcurrency`). gpuRequest: 0 - # -- The minimum memory to request. + # -- The minimum memory to request for the full pod (see `containerConcurrency`). memoryRequest: "2Gi" - # -- The maximum memory limit. + # -- The maximum memory limit for the full pod (see `containerConcurrency`). memoryLimit: "8Gi" # -- To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. # This parameter adds extra time to that minimum (seconds). @@ -145,6 +152,9 @@ prompt-proto-service: # If 0, idle timeout is ignored. responseStartTimeout: 0 + # -- The number of Knative requests that can be handled simultaneously by one container + containerConcurrency: 1 + # -- Kubernetes YAML configs for extra container volume(s). # Any volumes required by other config options are automatically handled by the Helm chart. additionalVolumeMounts: [] diff --git a/applications/prompt-proto-service-lsstcomcam/README.md b/applications/prompt-proto-service-lsstcomcam/README.md index 13cf5a2017..9e9b55654b 100644 --- a/applications/prompt-proto-service-lsstcomcam/README.md +++ b/applications/prompt-proto-service-lsstcomcam/README.md @@ -15,12 +15,13 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | | prompt-proto-service.affinity | object | `{}` | Affinity rules for the prompt processing pods | | prompt-proto-service.alerts.server | string | `"usdf-alert-stream-dev-broker-0.lsst.cloud:9094"` | Server address for the alert stream | -| prompt-proto-service.alerts.topic | string | `"alert-stream-test"` | Topic name where alerts will be sent | +| prompt-proto-service.alerts.topic | string | None, must be set | Topic name where alerts will be sent | | prompt-proto-service.alerts.username | string | `"kafka-admin"` | Username for sending alerts to the alert stream | | prompt-proto-service.apdb.config | string | None, must be set | URL to a serialized APDB configuration, or the "label:" prefix followed by the indexed name of such a config. | | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | | prompt-proto-service.cache.patchesPerImage | int | `4` | A factor by which to multiply `baseSize` for templates and other patch-based datasets. | | prompt-proto-service.cache.refcatsPerImage | int | `4` | A factor by which to multiply `baseSize` for refcat datasets. | +| prompt-proto-service.containerConcurrency | int | `1` | The number of Knative requests that can be handled simultaneously by one container | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | @@ -32,19 +33,20 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | | prompt-proto-service.instrument.skymap | string | `""` | Skymap to use with the instrument | -| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores. | -| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested. | -| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). | -| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). | +| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | +| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | | prompt-proto-service.knative.extraTimeout | int | `10` | To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. This parameter adds extra time to that minimum (seconds). | | prompt-proto-service.knative.gpu | bool | `false` | GPUs enabled. | -| prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request. | +| prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.idleTimeout | int | `0` | Maximum time that a container can send nothing to Knative (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | -| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit. | -| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request. | +| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | +| prompt-proto-service.raw_microservice | string | `""` | The URI to a microservice that maps image metadata to a file location. If empty, Prompt Processing does not use a microservice. | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | int | `0` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | diff --git a/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml index b6b4ce83dc..45667dadc3 100644 --- a/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml @@ -22,4 +22,7 @@ prompt-proto-service: kafkaClusterAddress: prompt-processing-kafka-bootstrap.kafka:9092 topic: rubin-prompt-processing + alerts: + topic: "alert-stream-test" + fullnameOverride: "prompt-proto-service-lsstcomcam" diff --git a/applications/prompt-proto-service-lsstcomcam/values.yaml b/applications/prompt-proto-service-lsstcomcam/values.yaml index c4f253abe0..7682298e07 100644 --- a/applications/prompt-proto-service-lsstcomcam/values.yaml +++ b/applications/prompt-proto-service-lsstcomcam/values.yaml @@ -73,6 +73,10 @@ prompt-proto-service: # -- Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. disableBucketValidation: 0 + # -- The URI to a microservice that maps image metadata to a file location. + # If empty, Prompt Processing does not use a microservice. + raw_microservice: "" + imageNotifications: # -- Hostname and port of the Kafka provider # @default -- None, must be set @@ -95,7 +99,8 @@ prompt-proto-service: # -- Server address for the alert stream server: "usdf-alert-stream-dev-broker-0.lsst.cloud:9094" # -- Topic name where alerts will be sent - topic: "alert-stream-test" + # @default -- None, must be set + topic: "" registry: # -- If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. @@ -117,21 +122,23 @@ prompt-proto-service: auth_env: true knative: - # -- The cpu cores requested. + # -- The cpu cores requested for the full pod (see `containerConcurrency`). cpuRequest: 1 - # -- The maximum cpu cores. + # -- The maximum cpu cores for the full pod (see `containerConcurrency`). cpuLimit: 1 # -- The storage space reserved for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageRequest: "5Gi" # -- The maximum storage space allowed for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageLimit: "5Gi" # -- GPUs enabled. gpu: false - # -- The number of GPUs to request. + # -- The number of GPUs to request for the full pod (see `containerConcurrency`). gpuRequest: 0 - # -- The minimum memory to request. + # -- The minimum memory to request for the full pod (see `containerConcurrency`). memoryRequest: "2Gi" - # -- The maximum memory limit. + # -- The maximum memory limit for the full pod (see `containerConcurrency`). memoryLimit: "8Gi" # -- To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. # This parameter adds extra time to that minimum (seconds). @@ -145,6 +152,9 @@ prompt-proto-service: # If 0, idle timeout is ignored. responseStartTimeout: 0 + # -- The number of Knative requests that can be handled simultaneously by one container + containerConcurrency: 1 + # -- Kubernetes YAML configs for extra container volume(s). # Any volumes required by other config options are automatically handled by the Helm chart. additionalVolumeMounts: [] diff --git a/applications/prompt-proto-service-lsstcomcamsim/README.md b/applications/prompt-proto-service-lsstcomcamsim/README.md index ad995209a9..6854bea8e2 100644 --- a/applications/prompt-proto-service-lsstcomcamsim/README.md +++ b/applications/prompt-proto-service-lsstcomcamsim/README.md @@ -15,12 +15,13 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | | prompt-proto-service.affinity | object | `{}` | Affinity rules for the prompt processing pods | | prompt-proto-service.alerts.server | string | `"usdf-alert-stream-dev-broker-0.lsst.cloud:9094"` | Server address for the alert stream | -| prompt-proto-service.alerts.topic | string | `""` | Topic name where alerts will be sent | +| prompt-proto-service.alerts.topic | string | None, must be set | Topic name where alerts will be sent | | prompt-proto-service.alerts.username | string | `"kafka-admin"` | Username for sending alerts to the alert stream | | prompt-proto-service.apdb.config | string | None, must be set | URL to a serialized APDB configuration, or the "label:" prefix followed by the indexed name of such a config. | | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | | prompt-proto-service.cache.patchesPerImage | int | `16` | A factor by which to multiply `baseSize` for templates and other patch-based datasets. | | prompt-proto-service.cache.refcatsPerImage | int | `6` | A factor by which to multiply `baseSize` for refcat datasets. | +| prompt-proto-service.containerConcurrency | int | `1` | The number of Knative requests that can be handled simultaneously by one container | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | @@ -32,19 +33,20 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | | prompt-proto-service.instrument.skymap | string | `"ops_rehersal_prep_2k_v1"` | Skymap to use with the instrument | -| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores. | -| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested. | -| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). | -| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). | +| prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | +| prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | | prompt-proto-service.knative.extraTimeout | int | `10` | To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. This parameter adds extra time to that minimum (seconds). | | prompt-proto-service.knative.gpu | bool | `false` | GPUs enabled. | -| prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request. | +| prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.idleTimeout | int | `0` | Maximum time that a container can send nothing to Knative (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | -| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit. | -| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request. | +| prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit for the full pod (see `containerConcurrency`). | +| prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | +| prompt-proto-service.raw_microservice | string | `""` | The URI to a microservice that maps image metadata to a file location. If empty, Prompt Processing does not use a microservice. | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | int | `0` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | diff --git a/applications/prompt-proto-service-lsstcomcamsim/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-lsstcomcamsim/values-usdfdev-prompt-processing.yaml index df319a9054..86f51c8ce7 100644 --- a/applications/prompt-proto-service-lsstcomcamsim/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-lsstcomcamsim/values-usdfdev-prompt-processing.yaml @@ -15,7 +15,7 @@ prompt-proto-service: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCamSim/ApPipe.yaml, ${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCamSim/SingleFrame.yaml, ${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCamSim/Isr.yaml] - preprocessing: (survey="SURVEY")=[] + preprocessing: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCamSim/Preprocessing.yaml] calibRepo: s3://rubin-pp-dev-users/central_repo/ s3: diff --git a/applications/prompt-proto-service-lsstcomcamsim/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-lsstcomcamsim/values-usdfprod-prompt-processing.yaml deleted file mode 100644 index 9f0fdf75cb..0000000000 --- a/applications/prompt-proto-service-lsstcomcamsim/values-usdfprod-prompt-processing.yaml +++ /dev/null @@ -1,56 +0,0 @@ -prompt-proto-service: - - podAnnotations: - # Expect to need roughly n_detector × request_latency / survey_cadence pods - # For a 30 s ComCam survey with 500 s latency, this is 150 - autoscaling.knative.dev/max-scale: "150" - autoscaling.knative.dev/target-utilization-percentage: "100" - # Update this field if using latest or static image tag in dev - revision: "1" - - worker: - # Embargo rack allows fast cleanup. - grace_period: 20 - - image: - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: 4.2.0 - - instrument: - pipelines: - main: >- - (survey="BLOCK-297")=[${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCamSim/ApPipe.yaml, - ${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCamSim/SingleFrame.yaml, - ${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCamSim/Isr.yaml] - (survey="")=[] - preprocessing: >- - (survey="BLOCK-297")=[] - (survey="")=[] - calibRepo: s3://rubin-summit-users - - s3: - imageBucket: rubin-summit - endpointUrl: https://sdfembs3.sdf.slac.stanford.edu - - imageNotifications: - kafkaClusterAddress: prompt-processing-2-kafka-bootstrap.kafka:9092 - topic: rubin-summit-notification - - apdb: - config: s3://rubin-summit-users/apdb_config/cassandra/pp_apdb_lsstcomcamsim_or4.py - - alerts: - topic: alerts-simulated - - sasquatch: - endpointUrl: https://usdf-rsp-dev.slac.stanford.edu/sasquatch-rest-proxy - namespace: lsst.prompt.prod - auth_env: false - - logLevel: timer.lsst.activator=DEBUG lsst.diaPipe=VERBOSE lsst.rbClassify=VERBOSE - - knative: - memoryLimit: "16Gi" - - fullnameOverride: "prompt-proto-service-lsstcomcamsim" diff --git a/applications/prompt-proto-service-lsstcomcamsim/values.yaml b/applications/prompt-proto-service-lsstcomcamsim/values.yaml index 1cab015e99..99f8eea75b 100644 --- a/applications/prompt-proto-service-lsstcomcamsim/values.yaml +++ b/applications/prompt-proto-service-lsstcomcamsim/values.yaml @@ -73,6 +73,10 @@ prompt-proto-service: # -- Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. disableBucketValidation: 0 + # -- The URI to a microservice that maps image metadata to a file location. + # If empty, Prompt Processing does not use a microservice. + raw_microservice: "" + imageNotifications: # -- Hostname and port of the Kafka provider # @default -- None, must be set @@ -95,6 +99,7 @@ prompt-proto-service: # -- Server address for the alert stream server: "usdf-alert-stream-dev-broker-0.lsst.cloud:9094" # -- Topic name where alerts will be sent + # @default -- None, must be set topic: "" registry: @@ -117,21 +122,23 @@ prompt-proto-service: auth_env: true knative: - # -- The cpu cores requested. + # -- The cpu cores requested for the full pod (see `containerConcurrency`). cpuRequest: 1 - # -- The maximum cpu cores. + # -- The maximum cpu cores for the full pod (see `containerConcurrency`). cpuLimit: 1 # -- The storage space reserved for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageRequest: "5Gi" # -- The maximum storage space allowed for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageLimit: "5Gi" # -- GPUs enabled. gpu: false - # -- The number of GPUs to request. + # -- The number of GPUs to request for the full pod (see `containerConcurrency`). gpuRequest: 0 - # -- The minimum memory to request. + # -- The minimum memory to request for the full pod (see `containerConcurrency`). memoryRequest: "2Gi" - # -- The maximum memory limit. + # -- The maximum memory limit for the full pod (see `containerConcurrency`). memoryLimit: "8Gi" # -- To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. # This parameter adds extra time to that minimum (seconds). @@ -145,6 +152,9 @@ prompt-proto-service: # If 0, idle timeout is ignored. responseStartTimeout: 0 + # -- The number of Knative requests that can be handled simultaneously by one container + containerConcurrency: 1 + # -- Kubernetes YAML configs for extra container volume(s). # Any volumes required by other config options are automatically handled by the Helm chart. additionalVolumeMounts: [] diff --git a/applications/rapid-analysis/Chart.yaml b/applications/rapid-analysis/Chart.yaml new file mode 100644 index 0000000000..c4a7da146e --- /dev/null +++ b/applications/rapid-analysis/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v2 +name: rapid-analysis +version: 1.0.0 +description: A Helm chart for deploying the Rapid Analysis services. diff --git a/applications/rapid-analysis/README.md b/applications/rapid-analysis/README.md new file mode 100644 index 0000000000..089df8b114 --- /dev/null +++ b/applications/rapid-analysis/README.md @@ -0,0 +1,50 @@ +# rapid-analysis + +A Helm chart for deploying the Rapid Analysis services. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | This specifies the scheduling constraints of the pod. | +| butlerSecret | object | `{}` | This section allows for specification of Butler secret information. If this section is used, it must contain the following attributes: _key_ (The vault key for the Butler secret), _containerPath_ (The directory location for the Butler secret), _dbUser_ (The username for the Butler backend database) | +| credentialFile | string | `""` | The name of the expected credential file for the broadcasters | +| credentialSecretsPath | string | `""` | The key for the credentials including any sub-paths. | +| env | object | `{}` | This section holds a set of key, value pairs for environmental variables (ENV_VAR: value). NOTE: RUN_ARG is taken care of by the chart using _script_. | +| envSecrets | list | `[]` | This section holds specifications for secret injection. If this section is used, each object listed must have the following attributes defined: _name_ (The label for the secret), _secretName_ (The name of the vault store reference. Uses the _namespace_ attribute to construct the full name), _secretKey_ (The key in the vault store containing the necessary secret) | +| fullnameOverride | string | `""` | Specify the deployed application name specifically. Overrides all other names. | +| gather2aSet | object | `{}` | This configures a StatefulSet used for visit-level gather processing. | +| gatherRollupSet | object | `{}` | This configures a StatefulSet used for night-summary rollup. | +| image.pullPolicy | string | `"IfNotPresent"` | The policy to apply when pulling an image for deployment. | +| image.repository | string | `"ts-dockerhub.lsst.org/rubintv-broadcaster"` | The Docker registry name for the container image. | +| image.tag | string | `"develop"` | The tag of the container image to use. | +| imagePullSecrets | list | `[]` | The list of pull secrets needed for the images. If this section is used, each object listed can have the following attributes defined: _name_ (The label identifying the pull-secret to use) | +| location | string | `""` | Provide the location where the system is running. | +| nameOverride | string | `""` | Adds an extra string to the release name. | +| namespace | string | `"rapid-analysis"` | This is the namespace where the applications will be deployed. | +| nfsMountpoint | list | `[]` | This section holds the information necessary to create a NFS mount for the container. If this section is used, each object listed can have the following attributes defined: _name_ (A label identifier for the mountpoint), _containerPath_ (The path inside the container to mount), _readOnly_ (This sets if the NFS mount is read only or read/write), _server_ (The hostname of the NFS server), _serverPath_ (The path exported by the NFS server) | +| nodeSelector | object | `{}` | This allows the specification of using specific nodes to run the pod. | +| podAnnotations | object | `{}` | This allows the specification of pod annotations. | +| pullSecretsPath | string | `""` | | +| pvcMountpoint | list | `[]` | This section holds information about existing volume claims. If the section is used, each object listed can have the following attributes defined: _name_ (The name ot the persistent volume), _containerPath_ (The path inside the container to mount), _subPath_ (persistent volume subpath, optional) | +| pvcMountpointClaim | list | `[]` | This section holds the information necessary to claim persistent volumes. If the section is used, each object listed can have the following attributes defined: _name_ (The name ot the persistent volume), _containerPath_ (The path inside the container to mount), _subPath_ (persistent volume subpath, optional) | +| redis.affinity | object | `{}` | Affinity rules for the redis pods | +| redis.enabled | bool | `false` | This specifies whether to use redis or not. | +| redis.env | object | `{}` | This section holds a set of key, value pairs for environmental variables (ENV_VAR: value). NOTE: RUN_ARG is taken care of by the chart using _script_. | +| redis.envSecrets | list | `[]` | This section holds specifications for secret injection. If this section is used, each object listed must have the following attributes defined: _name_ (The label for the secret), _secretName_ (The name of the vault store reference. Uses the _namespace_ attribute to construct the full name), _secretKey_ (The key in the vault store containing the necessary secret) | +| redis.image.pullPolicy | string | `"IfNotPresent"` | The policy to apply when pulling an image for deployment. | +| redis.image.repository | string | `"docker.io/redis"` | The Docker registry name for the redis container image. | +| redis.image.tag | string | `"latest"` | The tag of the redis container image to use. | +| redis.nodeSelector | object | `{}` | Node selection rules for the redis pods | +| redis.resources | object | `{}` | This allows the specification of resources (CPU, memory) requires to run the redis container. | +| redis.storage.classname | string | `nil` | | +| redis.storage.request | string | `"1Gi"` | The size of the storage request. | +| redis.tolerations | list | `[]` | Toleration specifications for the redis pods | +| resources | object | `{}` | This allows the specification of resources (CPU, memory) requires to run the container. | +| rubinTvSecretsPath | string | `""` | | +| scripts | object | `{}` | List of script objects to run for the broadcaster. This section MUST have the following attribute specified for each entry. _name_ (The full path for the script) The following attributes are optional _resources_ (A resource object specification) _nodeSelector_ (A node selector object specification) _tolerations_ (A list of tolerations) _affinity_ (An affinity object specification) | +| securityContext | object | `{}` | This section allows for specification of security context information. If the section is used, at least one of the following attributes must be specified. _uid_ (User id to run application as), _gid_ (Group id of the user that runs the application), _fid_ (File system context user id), | +| siteTag | string | `""` | A special tag for letting the scripts know where they are running. | +| tolerations | list | `[]` | This specifies the tolerations of the pod for any system taints. | +| vaultPrefixPath | string | `""` | The Vault prefix path | +| workerSet | object | `{}` | This configures a StatefulSet used for single frame workers. | diff --git a/applications/rapid-analysis/secrets.yaml b/applications/rapid-analysis/secrets.yaml new file mode 100644 index 0000000000..eda73c3be5 --- /dev/null +++ b/applications/rapid-analysis/secrets.yaml @@ -0,0 +1,8 @@ +redis-password: + description: >- + Password used to authenticate rubintv worker pods to their shared + redis pod. If this secret changes, both the Redis server and all + worker pods will require a restart. + generate: + type: + password diff --git a/applications/rapid-analysis/templates/_helpers.tpl b/applications/rapid-analysis/templates/_helpers.tpl new file mode 100644 index 0000000000..fe0a7eaf8e --- /dev/null +++ b/applications/rapid-analysis/templates/_helpers.tpl @@ -0,0 +1,124 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "rapid-analysis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "rapid-analysis.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "rapid-analysis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "rapid-analysis.labels" -}} +helm.sh/chart: {{ include "rapid-analysis.chart" . }} +{{ include "rapid-analysis.selectorLabels" . }} +{{- end }} + +{{/* +Script name +*/}} +{{- define "rapid-analysis.scriptName" -}} +{{- regexSplit "/" .Values.script.name -1 | last | trimSuffix ".py" | kebabcase }} +{{- end }} + +{{/* +Deployment name +*/}} +{{- define "rapid-analysis.deploymentName" -}} +{{- $name := regexSplit "/" .Values.script.name -1 | last | trimSuffix ".py" | kebabcase }} +{{- $cameraName := regexSplit "/" .Values.script.name -1 | rest | first | lower }} +{{- $camera := "" }} +{{- if eq $cameraName "auxtel" }} +{{- $camera = "at"}} +{{- else if eq $cameraName "comcam" }} +{{- $camera = "cc"}} +{{- else }} +{{- $camera = $cameraName}} +{{- end }} +{{- printf "s-%s-%s" $camera $name }} +{{- end }} + + +{{/* +Selector labels +*/}} +{{- define "rapid-analysis.selectorLabels" -}} +app.kubernetes.io/name: {{ include "rapid-analysis.deploymentName" . }} +app.kubernetes.io/instance: {{ include "rapid-analysis.name" . }} +{{- $values := regexSplit "/" .Values.script.name -1 }} +{{- if eq 1 (len $values) }} +all: misc +{{- else }} +{{- $all_label := lower (index $values 1) }} +{{- $script := index $values 2 }} +{{- if contains "Isr" $script }} +isr: {{ $all_label }} +{{- end }} +all: {{ $all_label }} +{{- if has $all_label (list "auxtel" "comcam" "bot" "ts8") }} +camera: {{ $all_label }} +{{- else }} +{{- if contains "StarTracker" $script }} +camera: startracker +{{- end }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create a default fully qualified app name for redis. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "rapid-analysis.redis.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- printf "%s-redis" .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s-redis" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Common labels - redis +*/}} +{{- define "rapid-analysis.redis.labels" -}} +helm.sh/chart: {{ include "rapid-analysis.chart" . }} +{{ include "rapid-analysis.redis.selectorLabels" . }} +{{- end }} + +{{/* +Selector labels - redis +*/}} +{{- define "rapid-analysis.redis.selectorLabels" -}} +app.kubernetes.io/name: {{ include "rapid-analysis.name" . }} +app.kubernetes.io/instance: {{ include "rapid-analysis.redis.fullname" . }} +{{- end }} diff --git a/applications/rapid-analysis/templates/configmap.yaml b/applications/rapid-analysis/templates/configmap.yaml new file mode 100644 index 0000000000..65aa6db601 --- /dev/null +++ b/applications/rapid-analysis/templates/configmap.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: env-configmap + namespace: {{ .Values.namespace }} +data: + GOOGLE_APPLICATION_CREDENTIALS: "/etc/rubintv/creds/{{ .Values.credentialFile }}" diff --git a/applications/rapid-analysis/templates/deployment.yaml b/applications/rapid-analysis/templates/deployment.yaml new file mode 100644 index 0000000000..d6a44033ca --- /dev/null +++ b/applications/rapid-analysis/templates/deployment.yaml @@ -0,0 +1,224 @@ +{{ range $script := .Values.scripts }} +{{ $_ := set $.Values "script" $script }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "rapid-analysis.deploymentName" $ }} + namespace: {{ $.Values.namespace }} + labels: + {{- include "rapid-analysis.labels" $ | nindent 4 }} +spec: + revisionHistoryLimit: 0 + selector: + matchLabels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 6 }} + template: + metadata: + {{- with $.Values.podAnnotations }} + annotations: + {{- toYaml $ | nindent 8 }} + {{- end }} + labels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 8 }} + spec: + {{- with $.Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $.Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if $.Values.securityContext }} + securityContext: + {{- if $.Values.securityContext.uid }} + runAsUser: {{ $.Values.securityContext.uid }} + {{- end }} + {{- if $.Values.securityContext.gid }} + runAsGroup: {{ $.Values.securityContext.gid }} + {{- end }} + {{- if $.Values.securityContext.fid }} + fsGroup: {{ $.Values.securityContext.fid }} + {{- end }} + {{- end }} + containers: + - name: {{ include "rapid-analysis.scriptName" $ }} + image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + env: + - name: RUN_ARG + value: {{ $script.name }} {{ $.Values.siteTag }} + - name: RAPID_ANALYSIS_LOCATION + value: {{ $.Values.location | upper | quote }} + {{- if or $.Values.env $.Values.envSecrets }} + {{- range $env_var, $env_value := $.Values.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- range $env := $.Values.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: PGPASSFILE + value: "{{ $.Values.butlerSecret.containerPath }}/postgres-credentials.txt" + - name: PGUSER + value: {{ $.Values.butlerSecret.dbUser | quote }} + - name: AWS_SHARED_CREDENTIALS_FILE + value: "{{ $.Values.butlerSecret.containerPath }}/aws-credentials.ini" + {{- end }} + {{- if $.Values.redis.enabled }} + - name: REDIS_HOST + value: "redis-service" + {{- if $.Values.redis.envSecrets }} + {{- range $env := $.Values.redis.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- end }} + envFrom: + - configMapRef: + name: env-configmap + volumeMounts: + - name: rubintv-creds + mountPath: "/etc/rubintv/creds" + readOnly: true + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + readOnly: {{ $values.readOnly }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + mountPath: {{ $.Values.butlerSecret.containerPath }} + {{- end }} + {{- if or $.Values.resources $script.resources }} + {{- $resources := "" }} + {{- if $script.resources }} + {{- $resources = $script.resources }} + {{- else }} + {{- $resources = $.Values.resources }} + {{- end }} + resources: + {{- toYaml $resources | nindent 12 }} + {{- end }} + {{- if $.Values.butlerSecret }} + initContainers: + - name: {{ $.Release.Name }}-butler-secret-perm-fixer + image: "alpine:latest" + command: + - "/bin/ash" + - "-c" + - | + cp -RL /secrets-raw/* /secrets + cat /secrets/aws-credentials.ini > new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + cat /secrets-rubintv/aws_credentials.ini >> new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + mv new-aws-credentials.ini /secrets/aws-credentials.ini + chown 73006:73006 /secrets/* + chmod 0600 /secrets/* + volumeMounts: + - name: {{ $.Release.Name }}-raw-butler-secret + mountPath: /secrets-raw + readOnly: true + - name: {{ $.Release.Name }}-butler-secret + mountPath: /secrets + - name: rubintv-aws-creds + mountPath: /secrets-rubintv + readOnly: true + {{- end }} + volumes: + - name: rubintv-creds + secret: + secretName: google-creds + - name: rubintv-aws-creds + secret: + secretName: rubintv-secrets + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + - name: {{ $values.name }} + nfs: + path: {{ $values.serverPath }} + readOnly: {{ $values.readOnly }} + server: {{ $values.server }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + emptyDir: {} + - name: {{ $.Release.Name }}-raw-butler-secret + secret: + secretName: butler-secret + defaultMode: 0600 + {{- end }} + {{- if or $.Values.nodeSelector $script.nodeSelector }} + {{- $nodeSelector := "" }} + {{- if $script.nodeSelector }} + {{- $nodeSelector = $script.nodeSelector }} + {{- else }} + {{- $nodeSelector = $.Values.nodeSelector }} + {{- end }} + nodeSelector: + {{- toYaml $nodeSelector | nindent 8 }} + {{- end }} + {{- if or $.Values.affinity $script.affinity }} + {{- $affinity := "" }} + {{- if $script.affinity }} + {{- $affinity = $script.affinity }} + {{- else }} + {{- $affinity = $.Values.affinity }} + {{- end }} + affinity: + {{- toYaml $affinity | nindent 8 }} + {{- end }} + {{- if or $.Values.tolerations $script.tolerations }} + {{- $tolerations := "" }} + {{- if $script.tolerations }} + {{- $tolerations = $script.tolerations }} + {{- else }} + {{- $tolerations = $.Values.tolerations }} + {{- end }} + tolerations: + {{- toYaml $tolerations | nindent 8 }} + {{- end }} +{{- end }} diff --git a/applications/rapid-analysis/templates/gather-rollup-set.yaml b/applications/rapid-analysis/templates/gather-rollup-set.yaml new file mode 100644 index 0000000000..ac8958cddf --- /dev/null +++ b/applications/rapid-analysis/templates/gather-rollup-set.yaml @@ -0,0 +1,231 @@ +{{ $_ := set $.Values "script" $.Values.gatherRollupSet }} +{{ $script := $.Values.gatherRollupSet }} +{{- if $script.name }} +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "rapid-analysis.deploymentName" $ }}-gatherrollupset + namespace: {{ $.Values.namespace }} + labels: + {{- include "rapid-analysis.labels" $ | nindent 4 }} +spec: + revisionHistoryLimit: 0 + selector: + matchLabels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 6 }} + replicas: {{ ($script.replicas | int) }} + podManagementPolicy: Parallel + template: + metadata: + {{- with $.Values.podAnnotations }} + annotations: + {{- toYaml $ | nindent 8 }} + {{- end }} + labels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 8 }} + spec: + {{- with $.Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $.Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if $.Values.securityContext }} + securityContext: + {{- if $.Values.securityContext.uid }} + runAsUser: {{ $.Values.securityContext.uid }} + {{- end }} + {{- if $.Values.securityContext.gid }} + runAsGroup: {{ $.Values.securityContext.gid }} + {{- end }} + {{- if $.Values.securityContext.fid }} + fsGroup: {{ $.Values.securityContext.fid }} + {{- end }} + {{- end }} + containers: + - name: {{ include "rapid-analysis.scriptName" $ }} + image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + env: + - name: RUN_ARG + value: {{ $script.name }} + - name: WORKER_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['statefulset.kubernetes.io/pod-name'] + - name: RAPID_ANALYSIS_LOCATION + value: {{ $.Values.location | upper | quote }} + {{- if or $.Values.env $.Values.envSecrets }} + {{- range $env_var, $env_value := $.Values.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- range $env := $.Values.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: PGPASSFILE + value: "{{ $.Values.butlerSecret.containerPath }}/postgres-credentials.txt" + - name: PGUSER + value: {{ $.Values.butlerSecret.dbUser | quote }} + - name: AWS_SHARED_CREDENTIALS_FILE + value: "{{ $.Values.butlerSecret.containerPath }}/aws-credentials.ini" + {{- end }} + {{- if $.Values.redis.enabled }} + - name: REDIS_HOST + value: "redis-service" + {{- if $.Values.redis.envSecrets }} + {{- range $env := $.Values.redis.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- end }} + envFrom: + - configMapRef: + name: env-configmap + volumeMounts: + - name: rubintv-creds + mountPath: "/etc/rubintv/creds" + readOnly: true + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + readOnly: {{ $values.readOnly }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + mountPath: {{ $.Values.butlerSecret.containerPath }} + {{- end }} + {{- if or $.Values.resources $script.resources }} + {{- $resources := "" }} + {{- if $script.resources }} + {{- $resources = $script.resources }} + {{- else }} + {{- $resources = $.Values.resources }} + {{- end }} + resources: + {{- toYaml $resources | nindent 12 }} + {{- end }} + {{- if $.Values.butlerSecret }} + initContainers: + - name: {{ $.Release.Name }}-butler-secret-perm-fixer + image: "alpine:latest" + command: + - "/bin/ash" + - "-c" + - | + cp -RL /secrets-raw/* /secrets + cat /secrets/aws-credentials.ini > new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + cat /secrets-rubintv/aws-credentials.ini >> new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + mv new-aws-credentials.ini /secrets/aws-credentials.ini + chown 73006:73006 /secrets/* + chmod 0600 /secrets/* + volumeMounts: + - name: {{ $.Release.Name }}-raw-butler-secret + mountPath: /secrets-raw + readOnly: true + - name: {{ $.Release.Name }}-butler-secret + mountPath: /secrets + - name: rubintv-aws-creds + mountPath: /secrets-rubintv + readOnly: true + {{- end }} + volumes: + - name: rubintv-creds + secret: + secretName: google-creds + - name: rubintv-aws-creds + secret: + secretName: rubintv-secrets + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + - name: {{ $values.name }} + nfs: + path: {{ $values.serverPath }} + readOnly: {{ $values.readOnly }} + server: {{ $values.server }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + emptyDir: {} + - name: {{ $.Release.Name }}-raw-butler-secret + secret: + secretName: butler-secret + defaultMode: 0600 + {{- end }} + {{- if or $.Values.nodeSelector $script.nodeSelector }} + {{- $nodeSelector := "" }} + {{- if $script.nodeSelector }} + {{- $nodeSelector = $script.nodeSelector }} + {{- else }} + {{- $nodeSelector = $.Values.nodeSelector }} + {{- end }} + nodeSelector: + {{- toYaml $nodeSelector | nindent 8 }} + {{- end }} + {{- if or $.Values.affinity $script.affinity }} + {{- $affinity := "" }} + {{- if $script.affinity }} + {{- $affinity = $script.affinity }} + {{- else }} + {{- $affinity = $.Values.affinity }} + {{- end }} + affinity: + {{- toYaml $affinity | nindent 8 }} + {{- end }} + {{- if or $.Values.tolerations $script.tolerations }} + {{- $tolerations := "" }} + {{- if $script.tolerations }} + {{- $tolerations = $script.tolerations }} + {{- else }} + {{- $tolerations = $.Values.tolerations }} + {{- end }} + tolerations: + {{- toYaml $tolerations | nindent 8 }} + {{- end }} +{{- end }} diff --git a/applications/rapid-analysis/templates/gather2a-set.yaml b/applications/rapid-analysis/templates/gather2a-set.yaml new file mode 100644 index 0000000000..2c1fdbee4f --- /dev/null +++ b/applications/rapid-analysis/templates/gather2a-set.yaml @@ -0,0 +1,231 @@ +{{ $_ := set $.Values "script" $.Values.gather2aSet }} +{{ $script := $.Values.gather2aSet }} +{{- if $script.name }} +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "rapid-analysis.deploymentName" $ }}-gather2aset + namespace: {{ $.Values.namespace }} + labels: + {{- include "rapid-analysis.labels" $ | nindent 4 }} +spec: + revisionHistoryLimit: 0 + selector: + matchLabels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 6 }} + replicas: {{ ($script.replicas | int) }} + podManagementPolicy: Parallel + template: + metadata: + {{- with $.Values.podAnnotations }} + annotations: + {{- toYaml $ | nindent 8 }} + {{- end }} + labels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 8 }} + spec: + {{- with $.Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $.Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if $.Values.securityContext }} + securityContext: + {{- if $.Values.securityContext.uid }} + runAsUser: {{ $.Values.securityContext.uid }} + {{- end }} + {{- if $.Values.securityContext.gid }} + runAsGroup: {{ $.Values.securityContext.gid }} + {{- end }} + {{- if $.Values.securityContext.fid }} + fsGroup: {{ $.Values.securityContext.fid }} + {{- end }} + {{- end }} + containers: + - name: {{ include "rapid-analysis.scriptName" $ }} + image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + env: + - name: RUN_ARG + value: {{ $script.name }} + - name: WORKER_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['statefulset.kubernetes.io/pod-name'] + - name: RAPID_ANALYSIS_LOCATION + value: {{ $.Values.location | upper | quote }} + {{- if or $.Values.env $.Values.envSecrets }} + {{- range $env_var, $env_value := $.Values.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- range $env := $.Values.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: PGPASSFILE + value: "{{ $.Values.butlerSecret.containerPath }}/postgres-credentials.txt" + - name: PGUSER + value: {{ $.Values.butlerSecret.dbUser | quote }} + - name: AWS_SHARED_CREDENTIALS_FILE + value: "{{ $.Values.butlerSecret.containerPath }}/aws-credentials.ini" + {{- end }} + {{- if $.Values.redis.enabled }} + - name: REDIS_HOST + value: "redis-service" + {{- if $.Values.redis.envSecrets }} + {{- range $env := $.Values.redis.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- end }} + envFrom: + - configMapRef: + name: env-configmap + volumeMounts: + - name: rubintv-creds + mountPath: "/etc/rubintv/creds" + readOnly: true + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + readOnly: {{ $values.readOnly }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + mountPath: {{ $.Values.butlerSecret.containerPath }} + {{- end }} + {{- if or $.Values.resources $script.resources }} + {{- $resources := "" }} + {{- if $script.resources }} + {{- $resources = $script.resources }} + {{- else }} + {{- $resources = $.Values.resources }} + {{- end }} + resources: + {{- toYaml $resources | nindent 12 }} + {{- end }} + {{- if $.Values.butlerSecret }} + initContainers: + - name: {{ $.Release.Name }}-butler-secret-perm-fixer + image: "alpine:latest" + command: + - "/bin/ash" + - "-c" + - | + cp -RL /secrets-raw/* /secrets + cat /secrets/aws-credentials.ini > new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + cat /secrets-rubintv/aws-credentials.ini >> new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + mv new-aws-credentials.ini /secrets/aws-credentials.ini + chown 73006:73006 /secrets/* + chmod 0600 /secrets/* + volumeMounts: + - name: {{ $.Release.Name }}-raw-butler-secret + mountPath: /secrets-raw + readOnly: true + - name: {{ $.Release.Name }}-butler-secret + mountPath: /secrets + - name: rubintv-aws-creds + mountPath: /secrets-rubintv + readOnly: true + {{- end }} + volumes: + - name: rubintv-creds + secret: + secretName: google-creds + - name: rubintv-aws-creds + secret: + secretName: rubintv-secrets + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + - name: {{ $values.name }} + nfs: + path: {{ $values.serverPath }} + readOnly: {{ $values.readOnly }} + server: {{ $values.server }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + emptyDir: {} + - name: {{ $.Release.Name }}-raw-butler-secret + secret: + secretName: butler-secret + defaultMode: 0600 + {{- end }} + {{- if or $.Values.nodeSelector $script.nodeSelector }} + {{- $nodeSelector := "" }} + {{- if $script.nodeSelector }} + {{- $nodeSelector = $script.nodeSelector }} + {{- else }} + {{- $nodeSelector = $.Values.nodeSelector }} + {{- end }} + nodeSelector: + {{- toYaml $nodeSelector | nindent 8 }} + {{- end }} + {{- if or $.Values.affinity $script.affinity }} + {{- $affinity := "" }} + {{- if $script.affinity }} + {{- $affinity = $script.affinity }} + {{- else }} + {{- $affinity = $.Values.affinity }} + {{- end }} + affinity: + {{- toYaml $affinity | nindent 8 }} + {{- end }} + {{- if or $.Values.tolerations $script.tolerations }} + {{- $tolerations := "" }} + {{- if $script.tolerations }} + {{- $tolerations = $script.tolerations }} + {{- else }} + {{- $tolerations = $.Values.tolerations }} + {{- end }} + tolerations: + {{- toYaml $tolerations | nindent 8 }} + {{- end }} +{{- end }} diff --git a/applications/rapid-analysis/templates/mountpoint-pvc.yaml b/applications/rapid-analysis/templates/mountpoint-pvc.yaml new file mode 100644 index 0000000000..4cf1a55df3 --- /dev/null +++ b/applications/rapid-analysis/templates/mountpoint-pvc.yaml @@ -0,0 +1,26 @@ +{{- if .Values.pvcMountpointClaim }} +{{- range $values := .Values.pvcMountpointClaim }} +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ $values.name }} + namespace: {{ $.Values.namespace }} + {{- if $values.ids }} + annotations: + {{- if $values.ids.uid }} + pv.beta.kubernetes.io/uid: "{{ $values.ids.uid }}" + {{- end }} + {{- if $values.ids.gid }} + pv.beta.kubernetes.io/gid: "{{ $values.ids.gid }}" + {{- end }} + {{- end }} +spec: + accessModes: + - {{ $values.accessMode | quote }} + resources: + requests: + storage: {{ $values.claimSize }} + storageClassName: {{ $values.name }} +{{- end }} +{{- end }} diff --git a/applications/rapid-analysis/templates/redis-service.yaml b/applications/rapid-analysis/templates/redis-service.yaml new file mode 100644 index 0000000000..0ac2c01ced --- /dev/null +++ b/applications/rapid-analysis/templates/redis-service.yaml @@ -0,0 +1,21 @@ +{{- if .Values.redis.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: redis-service + namespace: {{ .Values.namespace }} + {{- with $.Values.redis.serviceAnnotations }} + annotations: + {{- toYaml $.Values.redis.serviceAnnotations | nindent 4 }} + {{- end }} +spec: + type: LoadBalancer + internalTrafficPolicy: Cluster + selector: + app.kubernetes.io/instance: {{ include "rapid-analysis.redis.fullname" . }} + ports: + - name: redis + protocol: TCP + port: {{ .Values.redis.port }} + targetPort: {{ .Values.redis.port }} +{{- end }} diff --git a/applications/rapid-analysis/templates/redis-statefulset.yaml b/applications/rapid-analysis/templates/redis-statefulset.yaml new file mode 100644 index 0000000000..224d83c500 --- /dev/null +++ b/applications/rapid-analysis/templates/redis-statefulset.yaml @@ -0,0 +1,90 @@ +{{- if .Values.redis.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: redis + namespace: {{ .Values.namespace }} + labels: + {{- include "rapid-analysis.redis.labels" . | nindent 4 }} +spec: + serviceName: redis-service + selector: + matchLabels: + {{- include "rapid-analysis.redis.selectorLabels" . | nindent 6 }} + replicas: {{ .Values.redis.replicas | default 1 }} + template: + metadata: + labels: + {{- include "rapid-analysis.redis.selectorLabels" . | nindent 8 }} + spec: + securityContext: + fsGroup: 999 + runAsGroup: 999 + runAsNonRoot: true + runAsUser: 999 + containers: + - name: redis + image: "{{ .Values.redis.image.repository }}:{{ .Values.redis.image.tag }}" + imagePullPolicy: {{ .Values.redis.image.pullPolicy }} + command: [ "redis-server", "--appendonly", "yes", "--requirepass", "$(REDIS_PASSWORD)" ] + ports: + - containerPort: {{ .Values.redis.port }} + env: + {{- range $env_var, $env_value := .Values.redis.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- range $env := .Values.redis.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + livenessProbe: + exec: + command: + - sh + - '-c' + - 'redis-cli -h $(hostname) -a $(REDIS_PASSWORD) incr health:counter' + failureThreshold: 3 + initialDelaySeconds: 15 + periodSeconds: 60 + successThreshold: 1 + timeoutSeconds: 1 + {{- with $.Values.redis.resources }} + resources: + {{- toYaml $.Values.redis.resources | nindent 10 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + volumeMounts: + - mountPath: /data + name: data + {{- with $.Values.redis.nodeSelector }} + nodeSelector: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.redis.affinity }} + affinity: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.redis.tolerations }} + tolerations: + {{- toYaml $ | nindent 8 }} + {{- end }} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + {{- if $.Values.redis.storage.classname }} + storageClassName: {{ $.Values.redis.storage.classname }} + {{- end }} + resources: + requests: + storage: {{ $.Values.redis.storage.request }} +{{- end }} diff --git a/applications/rapid-analysis/templates/vault-secret.yaml b/applications/rapid-analysis/templates/vault-secret.yaml new file mode 100644 index 0000000000..7b3ccf0a19 --- /dev/null +++ b/applications/rapid-analysis/templates/vault-secret.yaml @@ -0,0 +1,44 @@ +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: pull-secret + namespace: {{ .Values.namespace }} + labels: + app.kubernetes.io/name: {{ include "rapid-analysis.name" . }} +spec: + path: {{ required "vaultSecretsPath must be set" .Values.global.vaultSecretsPath }}/{{ required "pullSecretsPath must be set" .Values.pullSecretsPath }} + type: kubernetes.io/dockerconfigjson +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: google-creds + namespace: {{ .Values.namespace }} + labels: + app.kubernetes.io/name: {{ include "rapid-analysis.name" . }} +spec: + path: {{ required "vaultSecretsPath must be set" .Values.global.vaultSecretsPath }}/{{ required "credentialSecretsPath must be set" .Values.credentialSecretsPath }} + type: Opaque +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: butler-secret + namespace: {{ .Values.namespace }} + labels: + app.kubernetes.io/name: {{ include "rapid-analysis.name" . }} +spec: + path: {{ required "vaultSecretsPath must be set" .Values.global.vaultSecretsPath }}/{{ required "butlerSecret.key must be set" .Values.butlerSecret.key }} + type: Opaque +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: rubintv-secrets + namespace: {{ .Values.namespace }} + labels: + app.kubernetes.io/name: {{ include "rapid-analysis.name" . }} +spec: + path: {{ required "vaultSecretsPath must be set" .Values.global.vaultSecretsPath }}/{{ required "rubinTvSecretsPath must be set" .Values.rubinTvSecretsPath }} + type: Opaque diff --git a/applications/rapid-analysis/templates/worker-set.yaml b/applications/rapid-analysis/templates/worker-set.yaml new file mode 100644 index 0000000000..ad87fbc2b8 --- /dev/null +++ b/applications/rapid-analysis/templates/worker-set.yaml @@ -0,0 +1,231 @@ +{{ $_ := set $.Values "script" $.Values.workerSet }} +{{ $script := $.Values.workerSet }} +{{- if $script.name }} +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "rapid-analysis.deploymentName" $ }}-workerset + namespace: {{ $.Values.namespace }} + labels: + {{- include "rapid-analysis.labels" $ | nindent 4 }} +spec: + revisionHistoryLimit: 0 + selector: + matchLabels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 6 }} + replicas: {{ ($script.replicas | int) }} + podManagementPolicy: Parallel + template: + metadata: + {{- with $.Values.podAnnotations }} + annotations: + {{- toYaml $ | nindent 8 }} + {{- end }} + labels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 8 }} + spec: + {{- with $.Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $.Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if $.Values.securityContext }} + securityContext: + {{- if $.Values.securityContext.uid }} + runAsUser: {{ $.Values.securityContext.uid }} + {{- end }} + {{- if $.Values.securityContext.gid }} + runAsGroup: {{ $.Values.securityContext.gid }} + {{- end }} + {{- if $.Values.securityContext.fid }} + fsGroup: {{ $.Values.securityContext.fid }} + {{- end }} + {{- end }} + containers: + - name: {{ include "rapid-analysis.scriptName" $ }} + image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + env: + - name: RUN_ARG + value: {{ $script.name }} + - name: WORKER_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['statefulset.kubernetes.io/pod-name'] + - name: RAPID_ANALYSIS_LOCATION + value: {{ $.Values.location | upper | quote }} + {{- if or $.Values.env $.Values.envSecrets }} + {{- range $env_var, $env_value := $.Values.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- range $env := $.Values.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: PGPASSFILE + value: "{{ $.Values.butlerSecret.containerPath }}/postgres-credentials.txt" + - name: PGUSER + value: {{ $.Values.butlerSecret.dbUser | quote }} + - name: AWS_SHARED_CREDENTIALS_FILE + value: "{{ $.Values.butlerSecret.containerPath }}/aws-credentials.ini" + {{- end }} + {{- if $.Values.redis.enabled }} + - name: REDIS_HOST + value: "redis-service" + {{- if $.Values.redis.envSecrets }} + {{- range $env := $.Values.redis.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- end }} + envFrom: + - configMapRef: + name: env-configmap + volumeMounts: + - name: rubintv-creds + mountPath: "/etc/rubintv/creds" + readOnly: true + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + readOnly: {{ $values.readOnly }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + mountPath: {{ $.Values.butlerSecret.containerPath }} + {{- end }} + {{- if or $.Values.resources $script.resources }} + {{- $resources := "" }} + {{- if $script.resources }} + {{- $resources = $script.resources }} + {{- else }} + {{- $resources = $.Values.resources }} + {{- end }} + resources: + {{- toYaml $resources | nindent 12 }} + {{- end }} + {{- if $.Values.butlerSecret }} + initContainers: + - name: {{ $.Release.Name }}-butler-secret-perm-fixer + image: "alpine:latest" + command: + - "/bin/ash" + - "-c" + - | + cp -RL /secrets-raw/* /secrets + cat /secrets/aws-credentials.ini > new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + cat /secrets-rubintv/aws-credentials.ini >> new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + mv new-aws-credentials.ini /secrets/aws-credentials.ini + chown 73006:73006 /secrets/* + chmod 0600 /secrets/* + volumeMounts: + - name: {{ $.Release.Name }}-raw-butler-secret + mountPath: /secrets-raw + readOnly: true + - name: {{ $.Release.Name }}-butler-secret + mountPath: /secrets + - name: rubintv-aws-creds + mountPath: /secrets-rubintv + readOnly: true + {{- end }} + volumes: + - name: rubintv-creds + secret: + secretName: google-creds + - name: rubintv-aws-creds + secret: + secretName: rubintv-secrets + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + - name: {{ $values.name }} + nfs: + path: {{ $values.serverPath }} + readOnly: {{ $values.readOnly }} + server: {{ $values.server }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + emptyDir: {} + - name: {{ $.Release.Name }}-raw-butler-secret + secret: + secretName: butler-secret + defaultMode: 0600 + {{- end }} + {{- if or $.Values.nodeSelector $script.nodeSelector }} + {{- $nodeSelector := "" }} + {{- if $script.nodeSelector }} + {{- $nodeSelector = $script.nodeSelector }} + {{- else }} + {{- $nodeSelector = $.Values.nodeSelector }} + {{- end }} + nodeSelector: + {{- toYaml $nodeSelector | nindent 8 }} + {{- end }} + {{- if or $.Values.affinity $script.affinity }} + {{- $affinity := "" }} + {{- if $script.affinity }} + {{- $affinity = $script.affinity }} + {{- else }} + {{- $affinity = $.Values.affinity }} + {{- end }} + affinity: + {{- toYaml $affinity | nindent 8 }} + {{- end }} + {{- if or $.Values.tolerations $script.tolerations }} + {{- $tolerations := "" }} + {{- if $script.tolerations }} + {{- $tolerations = $script.tolerations }} + {{- else }} + {{- $tolerations = $.Values.tolerations }} + {{- end }} + tolerations: + {{- toYaml $tolerations | nindent 8 }} + {{- end }} +{{- end }} diff --git a/applications/rapid-analysis/values-summit.yaml b/applications/rapid-analysis/values-summit.yaml new file mode 100644 index 0000000000..185b063e84 --- /dev/null +++ b/applications/rapid-analysis/values-summit.yaml @@ -0,0 +1,151 @@ +image: + repository: ts-dockerhub.lsst.org/rapid-analysis + tag: c0039 + pullPolicy: Always +location: SUMMIT +env: + DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml +scripts: +- name: summit/auxTel/runBackgroundService.py + resources: + requests: + cpu: 0.5 + memory: 4G + limits: + cpu: 1.0 + memory: 10G +- name: summit/auxTel/runButlerWatcher.py +- name: summit/auxTel/runCalibrateCcdRunner.py +- name: summit/auxTel/runImExaminer.py +- name: summit/auxTel/runIsrRunner.py +- name: summit/auxTel/runMetadataCreator.py +- name: summit/auxTel/runMetadataServer.py +- name: summit/auxTel/runMonitor.py + resources: + requests: + cpu: 0.5 + memory: 1G + limits: + cpu: 1.0 + memory: 10G +- name: summit/auxTel/runMountTorquePlotter.py +- name: summit/auxTel/runNightReporter.py +- name: summit/auxTel/runSpecExaminer.py + resources: + requests: + cpu: 0.5 + memory: 2G + limits: + cpu: 1.0 + memory: 4G +- name: summit/misc/runAllSky.py + resources: + requests: + cpu: 1.0 + memory: 4G + limits: + cpu: 2 + memory: 6G +- name: summit/misc/runStarTracker.py +- name: summit/misc/runStarTrackerCatchup.py +- name: summit/misc/runStarTrackerFast.py +- name: summit/misc/runStarTrackerMetadata.py +- name: summit/misc/runStarTrackerNightReport.py +- name: summit/misc/runStarTrackerWide.py +- name: summit/misc/runTmaTelemetry.py +- name: summit/LSSTComCam/runButlerWatcher.py +- name: summit/LSSTComCam/runHeadNode.py +- name: summit/LSSTComCam/runMetadataServer.py +- name: summit/LSSTComCam/runPlotter.py +workerSet: + name: summit/LSSTComCam/runSfmRunner.py + replicas: 36 + resources: + requests: + cpu: 1.0 + memory: 4G + limits: + cpu: 1.0 + memory: 8G +credentialFile: google_write_creds +pullSecretsPath: pull-secret +rubinTvSecretsPath: rubintv +credentialSecretsPath: rubintv-broadcaster +butlerSecret: + key: butler-secret + containerPath: /home/saluser/.lsst + dbUser: oods +imagePullSecrets: +- name: pull-secret +nfsMountpoint: +- name: auxtel-gen3-data + containerPath: /repo/LATISS + readOnly: false + server: nfs-auxtel.cp.lsst.org + serverPath: /auxtel/repo/LATISS +- name: comcam-gen3-data + containerPath: /repo/LSSTComCam + readOnly: false + server: nfs3.cp.lsst.org + serverPath: /comcam/repo/LSSTComCam +- name: auxtel-data + containerPath: /readonly/lsstdata/auxtel + readOnly: true + server: nfs-auxtel.cp.lsst.org + serverPath: /auxtel/lsstdata +- name: comcam-data + containerPath: /readonly/lsstdata/comcam + readOnly: true + server: nfs3.cp.lsst.org + serverPath: /comcam/lsstdata +- name: project-shared + containerPath: /project + readOnly: false + server: nfs1.cp.lsst.org + serverPath: /project +- name: auxtel-gen3-data-temp + containerPath: /data/lsstdata/base/auxtel + readOnly: true + server: nfs-auxtel.cp.lsst.org + serverPath: /auxtel/lsstdata/base/auxtel +- name: comcam-gen3-data-temp + containerPath: /data/lsstdata/base/comcam + readOnly: true + server: nfs3.cp.lsst.org + serverPath: /comcam/lsstdata/base/comcam +- name: allsky-data + containerPath: /data/allsky + readOnly: true + server: nfs-auxtel.cp.lsst.org + serverPath: /auxtel/allsky +- name: scratch-shared + containerPath: /scratch + readOnly: false + server: nfs1.cp.lsst.org + serverPath: /scratch/rubintv +resources: + requests: + cpu: 0.5 + memory: 1G + limits: + cpu: 1.0 + memory: 2.5G +redis: + enabled: true + port: 6379 + env: + MASTER: true + envSecrets: + - name: REDIS_PASSWORD + secretName: rubintv-secrets + secretKey: redis-password + storage: + classname: rook-ceph-block + request: 10Gi + resources: + requests: + cpu: 100m + memory: 1Gi + limits: + cpu: 1 + memory: 50Gi diff --git a/applications/rapid-analysis/values-tucson-teststand.yaml b/applications/rapid-analysis/values-tucson-teststand.yaml new file mode 100644 index 0000000000..8604e12165 --- /dev/null +++ b/applications/rapid-analysis/values-tucson-teststand.yaml @@ -0,0 +1,121 @@ +image: + repository: ts-dockerhub.lsst.org/rapid-analysis + tag: c0039 + pullPolicy: Always +env: + DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml + DEPLOY_BRANCH: deploy-tts +siteTag: tts +location: TTS +scripts: +- name: summit/auxTel/runBackgroundService.py +- name: summit/auxTel/runButlerWatcher.py +- name: summit/auxTel/runCalibrateCcdRunner.py +- name: summit/auxTel/runImExaminer.py +- name: summit/auxTel/runIsrRunner.py +- name: summit/auxTel/runMetadataCreator.py +- name: summit/auxTel/runMetadataServer.py +- name: summit/auxTel/runMonitor.py +- name: summit/auxTel/runMountTorquePlotter.py +- name: summit/auxTel/runNightReporter.py +- name: summit/auxTel/runSpecExaminer.py +- name: summit/comCam/runButlerWatcher.py +- name: summit/comCam/runIsrRunner_000.py +- name: summit/comCam/runIsrRunner_001.py +- name: summit/comCam/runIsrRunner_002.py +- name: summit/comCam/runIsrRunner_003.py +- name: summit/comCam/runIsrRunner_004.py +- name: summit/comCam/runIsrRunner_005.py +- name: summit/comCam/runIsrRunner_006.py +- name: summit/comCam/runIsrRunner_007.py +- name: summit/comCam/runIsrRunner_008.py +- name: summit/comCam/runMetadataServer.py +- name: summit/comCam/runPlotter.py + resources: + requests: + cpu: 0.5 + memory: 4G + limits: + cpu: 1.0 + memory: 6G +- name: summit/misc/runTmaTelemetry.py +# TODO: remove google credentials +credentialFile: google_write_creds +vaultPrefixPath: secret/k8s_operator/tucson-teststand.lsst.codes +pullSecretsPath: pull-secret +rubinTvSecretsPath: rubintv +# TODO: remove google credentials +credentialSecretsPath: rubintv +butlerSecret: + key: butler-secret + containerPath: /home/saluser/.lsst + dbUser: oods +imagePullSecrets: +- name: pull-secret +nfsMountpoint: +- name: auxtel-gen3-data + containerPath: /repo/LATISS + readOnly: false + server: nfs-auxtel.tu.lsst.org + serverPath: /auxtel/repo/LATISS +- name: comcam-gen3-data + containerPath: /repo/LSSTComCam + readOnly: false + server: comcam-archiver.tu.lsst.org + serverPath: /repo/LSSTComCam +- name: auxtel-data + containerPath: /readonly/lsstdata/auxtel + readOnly: true + server: nfs-auxtel.tu.lsst.org + serverPath: /auxtel/lsstdata +- name: comcam-data + containerPath: /readonly/lsstdata/comcam + readOnly: true + server: comcam-archiver.tu.lsst.org + serverPath: /lsstdata +- name: project-shared + containerPath: /project + readOnly: false + server: nfs-project.tu.lsst.org + serverPath: /project +- name: auxtel-gen3-data-temp + containerPath: /data/lsstdata/TTS/auxtel + readOnly: true + server: nfs-auxtel.tu.lsst.org + serverPath: /auxtel/lsstdata/TTS/auxtel +- name: comcam-gen3-data-temp + containerPath: /data/lsstdata/TTS/comcam + readOnly: true + server: comcam-archiver.tu.lsst.org + serverPath: /lsstdata/TTS/comcam +- name: scratch-shared + containerPath: /scratch + readOnly: false + server: nfs-scratch.tu.lsst.org + serverPath: /scratch/rubintv +resources: + requests: + cpu: 0.5 + memory: 1G + limits: + cpu: 1.0 + memory: 2.5G +redis: + enabled: true + port: 6379 + env: + MASTER: true + envSecrets: + - name: REDIS_PASSWORD + secretName: rubintv-secrets + secretKey: redis-password + storage: + classname: rook-ceph-block + request: 10Gi + resources: + requests: + cpu: 100m + memory: 1Gi + limits: + cpu: 1 + memory: 50Gi diff --git a/applications/rapid-analysis/values.yaml b/applications/rapid-analysis/values.yaml new file mode 100644 index 0000000000..7151ddc993 --- /dev/null +++ b/applications/rapid-analysis/values.yaml @@ -0,0 +1,130 @@ +image: + # -- The Docker registry name for the container image. + repository: ts-dockerhub.lsst.org/rubintv-broadcaster + # -- The tag of the container image to use. + tag: develop + # -- The policy to apply when pulling an image for deployment. + pullPolicy: IfNotPresent +# -- This is the namespace where the applications will be deployed. +namespace: rapid-analysis +# -- A special tag for letting the scripts know where they are running. +siteTag: "" +# -- Provide the location where the system is running. +location: "" +# -- List of script objects to run for the broadcaster. +# This section MUST have the following attribute specified for each entry. +# _name_ (The full path for the script) +# The following attributes are optional +# _resources_ (A resource object specification) +# _nodeSelector_ (A node selector object specification) +# _tolerations_ (A list of tolerations) +# _affinity_ (An affinity object specification) +scripts: {} +# -- This section holds a set of key, value pairs for environmental variables (ENV_VAR: value). +# NOTE: RUN_ARG is taken care of by the chart using _script_. +env: {} +# -- This section holds specifications for secret injection. +# If this section is used, each object listed must have the following attributes defined: +# _name_ (The label for the secret), +# _secretName_ (The name of the vault store reference. Uses the _namespace_ attribute to construct the full name), +# _secretKey_ (The key in the vault store containing the necessary secret) +envSecrets: [] +# -- The Vault prefix path +vaultPrefixPath: "" +# The key for the pull secrets including any sub-paths. +pullSecretsPath: "" +# Path for the rubin tv specific secrets vault. +rubinTvSecretsPath: "" +# -- This key allows specification of a script to override the entrypoint. +# -- The list of pull secrets needed for the images. +# If this section is used, each object listed can have the following attributes defined: +# _name_ (The label identifying the pull-secret to use) +imagePullSecrets: [] +# -- This section allows for specification of Butler secret information. +# If this section is used, it must contain the following attributes: +# _key_ (The vault key for the Butler secret), +# _containerPath_ (The directory location for the Butler secret), +# _dbUser_ (The username for the Butler backend database) +butlerSecret: {} +# -- This section holds the information necessary to create a NFS mount for the container. +# If this section is used, each object listed can have the following attributes defined: +# _name_ (A label identifier for the mountpoint), +# _containerPath_ (The path inside the container to mount), +# _readOnly_ (This sets if the NFS mount is read only or read/write), +# _server_ (The hostname of the NFS server), +# _serverPath_ (The path exported by the NFS server) +nfsMountpoint: [] +# -- This section holds information about existing volume claims. +# If the section is used, each object listed can have the following attributes defined: +# _name_ (The name ot the persistent volume), +# _containerPath_ (The path inside the container to mount), +# _subPath_ (persistent volume subpath, optional) +pvcMountpoint: [] +# -- This section holds the information necessary to claim persistent volumes. +# If the section is used, each object listed can have the following attributes defined: +# _name_ (The name ot the persistent volume), +# _containerPath_ (The path inside the container to mount), +# _subPath_ (persistent volume subpath, optional) +pvcMountpointClaim: [] +# -- The key for the credentials including any sub-paths. +credentialSecretsPath: "" +# -- The name of the expected credential file for the broadcasters +credentialFile: "" +# -- Adds an extra string to the release name. +nameOverride: "" +# -- Specify the deployed application name specifically. Overrides all other names. +fullnameOverride: "" +# -- This allows the specification of pod annotations. +podAnnotations: {} +# -- This allows the specification of resources (CPU, memory) requires to run the container. +resources: {} +# -- This allows the specification of using specific nodes to run the pod. +nodeSelector: {} +# -- This specifies the tolerations of the pod for any system taints. +tolerations: [] +# -- This specifies the scheduling constraints of the pod. +affinity: {} +# -- This section allows for specification of security context information. +# If the section is used, at least one of the following attributes must be specified. +# _uid_ (User id to run application as), +# _gid_ (Group id of the user that runs the application), +# _fid_ (File system context user id), +securityContext: {} +# -- This configures a StatefulSet used for single frame workers. +workerSet: {} +# -- This configures a StatefulSet used for visit-level gather processing. +gather2aSet: {} +# -- This configures a StatefulSet used for night-summary rollup. +gatherRollupSet: {} +redis: + # -- This specifies whether to use redis or not. + enabled: false + image: + # -- The Docker registry name for the redis container image. + repository: docker.io/redis + # -- The tag of the redis container image to use. + tag: latest + # -- The policy to apply when pulling an image for deployment. + pullPolicy: IfNotPresent + # -- This section holds a set of key, value pairs for environmental variables (ENV_VAR: value). + # NOTE: RUN_ARG is taken care of by the chart using _script_. + env: {} + # -- This section holds specifications for secret injection. + # If this section is used, each object listed must have the following attributes defined: + # _name_ (The label for the secret), + # _secretName_ (The name of the vault store reference. Uses the _namespace_ attribute to construct the full name), + # _secretKey_ (The key in the vault store containing the necessary secret) + envSecrets: [] + storage: + # str -- The storage class name for the data store request. + classname: + # -- The size of the storage request. + request: 1Gi + # -- This allows the specification of resources (CPU, memory) requires to run the redis container. + resources: {} + # -- Node selection rules for the redis pods + nodeSelector: {} + # -- Toleration specifications for the redis pods + tolerations: [] + # -- Affinity rules for the redis pods + affinity: {} diff --git a/applications/rubintv/values-summit.yaml b/applications/rubintv/values-summit.yaml index 62ef88610c..07a3594fb2 100644 --- a/applications/rubintv/values-summit.yaml +++ b/applications/rubintv/values-summit.yaml @@ -20,11 +20,11 @@ rubintv: - name: DDV_CLIENT_WS_ADDRESS value: "rubintv/ws/ddv" image: - tag: v2.2.0 + tag: v2.3.1 pullPolicy: Always workers: - replicas: 0 + replicas: 1 image: repository: ts-dockerhub.lsst.org/rapid-analysis tag: c0037 @@ -32,7 +32,7 @@ rubintv: uid: 73006 gid: 73006 scriptsLocation: /repos/rubintv_analysis_service/scripts - script: rubintv_worker.py -a rubintv-dev -p 8080 -l summit + script: rubintv_worker.py -a rubintv -p 8080 -l summit env: - name: S3_ENDPOINT_URL value: *s3E @@ -43,9 +43,12 @@ rubintv: - name: DEPLOY_BRANCH value: *dbE resources: + requests: + cpu: 0.5 + memory: 1G limits: - cpu: 2.0 - memory: "8Gi" + cpu: 1.0 + memory: 2.5G global: tsVaultSecretsPath: "" diff --git a/applications/rubintv/values-tucson-teststand.yaml b/applications/rubintv/values-tucson-teststand.yaml new file mode 100644 index 0000000000..64526e159d --- /dev/null +++ b/applications/rubintv/values-tucson-teststand.yaml @@ -0,0 +1,42 @@ +rubintv: + siteTag: "tucson" + separateSecrets: true + + imagePullSecrets: + - name: pull-secret + + frontend: + debug: true + env: + - name: S3_ENDPOINT_URL + value: &s3E "https://s3.rubintv.tu.lsst.org" + - name: RAPID_ANALYSIS_LOCATION + value: "TTS" + image: + tag: deploy + pullPolicy: Always + + workers: + replicas: 1 + image: + repository: ts-dockerhub.lsst.org/rapid-analysis + tag: c0037 + pullPolicy: Always + uid: 73006 + gid: 73006 + scriptsLocation: /repos/rubintv_analysis_service/scripts + script: rubintv_worker.py -a rubintv-dev -p 8080 -c /repos/rubintv_analysis_service/scripts/config-temporal.yaml + env: + - name: S3_ENDPOINT_URL + value: *s3E + - name: DAF_BUTLER_REPOSITORY_INDEX + value: "s3://rubin-summit-users/data-repos.yaml" + - name: DAF_BUTLER_REPOSITORY + value: "/sdf/group/rubin/repo/ir2/butler.yaml" + resources: + limits: + cpu: 2.0 + memory: "8Gi" + +global: + tsVaultSecretsPath: "" diff --git a/applications/rubintv/values-usdfprod.yaml b/applications/rubintv/values-usdfprod.yaml index 2499da043c..9818e96584 100644 --- a/applications/rubintv/values-usdfprod.yaml +++ b/applications/rubintv/values-usdfprod.yaml @@ -16,7 +16,7 @@ rubintv: - name: DDV_CLIENT_WS_ADDRESS value: "rubintv/ws/ddv" image: - tag: v2.2.0 + tag: v2.3.1 pullPolicy: Always workers: diff --git a/applications/sasquatch/Chart.yaml b/applications/sasquatch/Chart.yaml index 93bb8ee863..723f35d9f7 100644 --- a/applications/sasquatch/Chart.yaml +++ b/applications/sasquatch/Chart.yaml @@ -46,12 +46,19 @@ dependencies: - name: telegraf-kafka-consumer condition: telegraf-kafka-consumer.enabled version: 1.0.0 + - name: telegraf-kafka-consumer + alias: telegraf-kafka-consumer-oss + condition: telegraf-kafka-consumer-oss.enabled + version: 1.0.0 - name: rest-proxy condition: rest-proxy.enabled version: 1.0.0 - name: square-events condition: squareEvents.enabled version: 1.0.0 + - name: app-metrics + condition: app-metrics.enabled + version: 1.0.0 annotations: phalanx.lsst.io/docs: | diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index dc8b8488d3..cde6e253fc 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -18,6 +18,8 @@ Rubin Observatory's telemetry service | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| app-metrics.apps | list | `[]` | The apps to create configuration for. | +| app-metrics.enabled | bool | `false` | Enable the app-metrics subchart with topic, user, and telegraf configurations | | chronograf.enabled | bool | `true` | Whether Chronograf is enabled | | chronograf.env | object | See `values.yaml` | Additional environment variables for Chronograf | | chronograf.envFromSecret | string | `"sasquatch"` | Name of secret to use. The keys `generic_client_id`, `generic_client_secret`, and `token_secret` should be set. | @@ -81,6 +83,26 @@ Rubin Observatory's telemetry service | strimzi-registry-operator.clusterNamespace | string | `"sasquatch"` | Namespace where the Strimzi Kafka cluster is deployed | | strimzi-registry-operator.operatorNamespace | string | `"sasquatch"` | Namespace where the strimzi-registry-operator is deployed | | telegraf-kafka-consumer | object | `{}` | Overrides for telegraf-kafka-consumer configuration | +| app-metrics.affinity | object | `{}` | Affinity for pod assignment | +| app-metrics.apps | list | `[]` | A list of applications that will publish metrics events, and the keys that should be ingested into InfluxDB as tags. The names should be the same as the app names in Phalanx. | +| app-metrics.args | list | `[]` | Arguments passed to the Telegraf agent containers | +| app-metrics.cluster.name | string | `"sasquatch"` | | +| app-metrics.debug | bool | false | Run Telegraf in debug mode. | +| app-metrics.env | list | See `values.yaml` | Telegraf agent enviroment variables | +| app-metrics.envFromSecret | string | `""` | Name of the secret with values to be added to the environment. | +| app-metrics.globalAppConfig | object | `{}` | app-metrics configuration in any environment in which the subchart is enabled. This should stay globally specified here, and it shouldn't be overridden. See [here](https://sasquatch.lsst.io/user-guide/app-metrics.html#configuration) for the structure of this value. | +| app-metrics.globalInfluxTags | list | `["service"]` | Keys in an every event sent by any app that should be recorded in InfluxDB as "tags" (vs. "fields"). These will be concatenated with the `influxTags` from `globalAppConfig` | +| app-metrics.image.pullPolicy | string | `"Always"` | Image pull policy | +| app-metrics.image.repo | string | `"docker.io/library/telegraf"` | Telegraf image repository | +| app-metrics.image.tag | string | `"1.30.2-alpine"` | Telegraf image tag | +| app-metrics.imagePullSecrets | list | `[]` | Secret names to use for Docker pulls | +| app-metrics.influxdb.url | string | `"http://sasquatch-influxdb.sasquatch:8086"` | URL of the InfluxDB v1 instance to write to | +| app-metrics.nodeSelector | object | `{}` | Node labels for pod assignment | +| app-metrics.podAnnotations | object | `{}` | Annotations for telegraf-kafka-consumers pods | +| app-metrics.podLabels | object | `{}` | Labels for telegraf-kafka-consumer pods | +| app-metrics.replicaCount | int | `3` | Number of Telegraf replicas. Multiple replicas increase availability. | +| app-metrics.resources | object | See `values.yaml` | Kubernetes resources requests and limits | +| app-metrics.tolerations | list | `[]` | Tolerations for pod assignment | | influxdb-enterprise.bootstrap.auth.secretName | string | `"sasquatch"` | Enable authentication of the data nodes using this secret, by creating a username and password for an admin account. The secret must contain keys `username` and `password`. | | influxdb-enterprise.bootstrap.ddldml.configMap | string | Do not run DDL or DML | A config map containing DDL and DML that define databases, retention policies, and inject some data. The keys `ddl` and `dml` must exist, even if one of them is empty. DDL is executed before DML to ensure databases and retention policies exist. | | influxdb-enterprise.bootstrap.ddldml.resources | object | `{}` | Kubernetes resources and limits for the bootstrap job | @@ -163,7 +185,9 @@ Rubin Observatory's telemetry service | influxdb-enterprise.meta.service.loadBalancerIP | string | Do not allocate a load balancer IP | Load balancer IP for the meta service | | influxdb-enterprise.meta.service.nodePort | int | Do not allocate a node port | Node port for the meta service | | influxdb-enterprise.meta.service.type | string | `"ClusterIP"` | Service type for the meta service | -| influxdb-enterprise.meta.sharedSecret.secretName | string | `"influxdb-enterprise-shared-secret"` | Shared secret used by the internal API for JWT authentication between InfluxDB nodes. Must have a key named `secret` that should be a long, random string See [documentation for shared-internal-secret](https://docs.influxdata.com/enterprise_influxdb/v1/administration/configure/config-data-nodes/#meta-internal-shared-secret). | +| influxdb-enterprise.meta.sharedSecret.secret | object | `{"key":"secret","name":"influxdb-enterprise-shared-secret"}` | Shared secret used by the internal API for JWT authentication between InfluxDB nodes. Must have a key named `secret` that should be a long, random string See [documentation for shared-internal-secret](https://docs.influxdata.com/enterprise_influxdb/v1/administration/configure/config-data-nodes/#meta-internal-shared-secret). | +| influxdb-enterprise.meta.sharedSecret.secret.key | string | `"secret"` | Key within that secret that contains the shared secret | +| influxdb-enterprise.meta.sharedSecret.secret.name | string | `"influxdb-enterprise-shared-secret"` | Name of the secret containing the shared secret | | influxdb-enterprise.meta.tolerations | list | `[]` | Tolerations for meta pods | | influxdb-enterprise.nameOverride | string | `""` | Override the base name for resources | | influxdb-enterprise.serviceAccount.annotations | object | `{}` | Annotations to add to the service account | @@ -311,7 +335,7 @@ Rubin Observatory's telemetry service | rest-proxy.heapOptions | string | `"-Xms512M -Xmx512M"` | Kafka REST proxy JVM Heap Option | | rest-proxy.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | | rest-proxy.image.repository | string | `"confluentinc/cp-kafka-rest"` | Kafka REST proxy image repository | -| rest-proxy.image.tag | string | `"7.7.0"` | Kafka REST proxy image tag | +| rest-proxy.image.tag | string | `"7.7.1"` | Kafka REST proxy image tag | | rest-proxy.ingress.annotations | object | See `values.yaml` | Additional annotations to add to the ingress | | rest-proxy.ingress.enabled | bool | `false` | Whether to enable the ingress | | rest-proxy.ingress.hostname | string | None, must be set if ingress is enabled | Ingress hostname | @@ -363,7 +387,7 @@ Rubin Observatory's telemetry service | strimzi-kafka.kafka.storage.size | string | `"500Gi"` | Size of the backing storage disk for each of the Kafka brokers | | strimzi-kafka.kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes | | strimzi-kafka.kafka.tolerations | list | `[]` | Tolerations for Kafka broker pod assignment | -| strimzi-kafka.kafka.version | string | `"3.7.1"` | Version of Kafka to deploy | +| strimzi-kafka.kafka.version | string | `"3.8.0"` | Version of Kafka to deploy | | strimzi-kafka.kafkaController.enabled | bool | `false` | Enable Kafka Controller | | strimzi-kafka.kafkaController.resources | object | See `values.yaml` | Kubernetes requests and limits for the Kafka Controller | | strimzi-kafka.kafkaController.storage.size | string | `"20Gi"` | Size of the backing storage disk for each of the Kafka controllers | @@ -400,23 +424,24 @@ Rubin Observatory's telemetry service | telegraf-kafka-consumer.enabled | bool | `false` | Wether the Telegraf Kafka Consumer is enabled | | telegraf-kafka-consumer.env | list | See `values.yaml` | Telegraf agent enviroment variables | | telegraf-kafka-consumer.envFromSecret | string | `""` | Name of the secret with values to be added to the environment. | -| telegraf-kafka-consumer.image.pullPolicy | string | `"Always"` | Image pull policy | -| telegraf-kafka-consumer.image.repo | string | `"docker.io/library/telegraf"` | Telegraf image repository | -| telegraf-kafka-consumer.image.tag | string | `"1.30.2-alpine"` | Telegraf image tag | +| telegraf-kafka-consumer.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | +| telegraf-kafka-consumer.image.repo | string | `"docker.io/lsstsqre/telegraf"` | Telegraf image repository | +| telegraf-kafka-consumer.image.tag | string | `"avro-mutex"` | Telegraf image tag | | telegraf-kafka-consumer.imagePullSecrets | list | `[]` | Secret names to use for Docker pulls | | telegraf-kafka-consumer.influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to | | telegraf-kafka-consumer.influxdb.url | string | `"http://sasquatch-influxdb.sasquatch:8086"` | URL of the InfluxDB v1 instance to write to | | telegraf-kafka-consumer.kafkaConsumers.test.collection_jitter | string | "0s" | Data collection jitter. This is used to jitter the collection by a random amount. Each plugin will sleep for a random time within jitter before collecting. | +| telegraf-kafka-consumer.kafkaConsumers.test.compression_codec | int | 3 | Compression codec. 0 : None, 1 : Gzip, 2 : Snappy, 3 : LZ4, 4 : ZSTD | | telegraf-kafka-consumer.kafkaConsumers.test.consumer_fetch_default | string | "20MB" | Maximum amount of data the server should return for a fetch request. | | telegraf-kafka-consumer.kafkaConsumers.test.debug | bool | false | Run Telegraf in debug mode. | | telegraf-kafka-consumer.kafkaConsumers.test.enabled | bool | `false` | Enable the Telegraf Kafka consumer. | | telegraf-kafka-consumer.kafkaConsumers.test.fields | list | `[]` | List of Avro fields to be recorded as InfluxDB fields. If not specified, any Avro field that is not marked as a tag will become an InfluxDB field. | -| telegraf-kafka-consumer.kafkaConsumers.test.flush_interval | string | "1s" | Data flushing interval for all outputs. Don’t set this below interval. Maximum flush_interval is flush_interval + flush_jitter | +| telegraf-kafka-consumer.kafkaConsumers.test.flush_interval | string | "10s" | Data flushing interval for all outputs. Don’t set this below interval. Maximum flush_interval is flush_interval + flush_jitter | | telegraf-kafka-consumer.kafkaConsumers.test.flush_jitter | string | "0s" | Jitter the flush interval by a random amount. This is primarily to avoid large write spikes for users running a large number of telegraf instances. | -| telegraf-kafka-consumer.kafkaConsumers.test.interval | string | "1s" | Data collection interval for the Kafka consumer. | | telegraf-kafka-consumer.kafkaConsumers.test.max_processing_time | string | "5s" | Maximum processing time for a single message. | +| telegraf-kafka-consumer.kafkaConsumers.test.max_undelivered_messages | int | 10000 | Maximum number of undelivered messages. Should be a multiple of metric_batch_size, setting it too low may never flush the broker's messages. | | telegraf-kafka-consumer.kafkaConsumers.test.metric_batch_size | int | 1000 | Sends metrics to the output in batches of at most metric_batch_size metrics. | -| telegraf-kafka-consumer.kafkaConsumers.test.metric_buffer_limit | int | 10000 | Caches metric_buffer_limit metrics for each output, and flushes this buffer on a successful write. This should be a multiple of metric_batch_size and could not be less than 2 times metric_batch_size. | +| telegraf-kafka-consumer.kafkaConsumers.test.metric_buffer_limit | int | 100000 | Caches metric_buffer_limit metrics for each output, and flushes this buffer on a successful write. This should be a multiple of metric_batch_size and could not be less than 2 times metric_batch_size. | | telegraf-kafka-consumer.kafkaConsumers.test.offset | string | `"oldest"` | Kafka consumer offset. Possible values are `oldest` and `newest`. | | telegraf-kafka-consumer.kafkaConsumers.test.precision | string | "1us" | Data precision. | | telegraf-kafka-consumer.kafkaConsumers.test.replicaCount | int | `1` | Number of Telegraf Kafka consumer replicas. Increase this value to increase the consumer throughput. | @@ -431,3 +456,40 @@ Rubin Observatory's telemetry service | telegraf-kafka-consumer.podLabels | object | `{}` | Labels for telegraf-kafka-consumer pods | | telegraf-kafka-consumer.resources | object | See `values.yaml` | Kubernetes resources requests and limits | | telegraf-kafka-consumer.tolerations | list | `[]` | Tolerations for pod assignment | +| telegraf-kafka-consumer-oss.affinity | object | `{}` | Affinity for pod assignment | +| telegraf-kafka-consumer-oss.args | list | `[]` | Arguments passed to the Telegraf agent containers | +| telegraf-kafka-consumer-oss.enabled | bool | `false` | Wether the Telegraf Kafka Consumer is enabled | +| telegraf-kafka-consumer-oss.env | list | See `values.yaml` | Telegraf agent enviroment variables | +| telegraf-kafka-consumer-oss.envFromSecret | string | `""` | Name of the secret with values to be added to the environment. | +| telegraf-kafka-consumer-oss.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | +| telegraf-kafka-consumer-oss.image.repo | string | `"docker.io/lsstsqre/telegraf"` | Telegraf image repository | +| telegraf-kafka-consumer-oss.image.tag | string | `"avro-mutex"` | Telegraf image tag | +| telegraf-kafka-consumer-oss.imagePullSecrets | list | `[]` | Secret names to use for Docker pulls | +| telegraf-kafka-consumer-oss.influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to | +| telegraf-kafka-consumer-oss.influxdb.url | string | `"http://sasquatch-influxdb.sasquatch:8086"` | URL of the InfluxDB v1 instance to write to | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.collection_jitter | string | "0s" | Data collection jitter. This is used to jitter the collection by a random amount. Each plugin will sleep for a random time within jitter before collecting. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.compression_codec | int | 3 | Compression codec. 0 : None, 1 : Gzip, 2 : Snappy, 3 : LZ4, 4 : ZSTD | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.consumer_fetch_default | string | "20MB" | Maximum amount of data the server should return for a fetch request. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.debug | bool | false | Run Telegraf in debug mode. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.enabled | bool | `false` | Enable the Telegraf Kafka consumer. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.fields | list | `[]` | List of Avro fields to be recorded as InfluxDB fields. If not specified, any Avro field that is not marked as a tag will become an InfluxDB field. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.flush_interval | string | "10s" | Data flushing interval for all outputs. Don’t set this below interval. Maximum flush_interval is flush_interval + flush_jitter | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.flush_jitter | string | "0s" | Jitter the flush interval by a random amount. This is primarily to avoid large write spikes for users running a large number of telegraf instances. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.max_processing_time | string | "5s" | Maximum processing time for a single message. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.max_undelivered_messages | int | 10000 | Maximum number of undelivered messages. Should be a multiple of metric_batch_size, setting it too low may never flush the broker's messages. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.metric_batch_size | int | 1000 | Sends metrics to the output in batches of at most metric_batch_size metrics. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.metric_buffer_limit | int | 100000 | Caches metric_buffer_limit metrics for each output, and flushes this buffer on a successful write. This should be a multiple of metric_batch_size and could not be less than 2 times metric_batch_size. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.offset | string | `"oldest"` | Kafka consumer offset. Possible values are `oldest` and `newest`. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.precision | string | "1us" | Data precision. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.replicaCount | int | `1` | Number of Telegraf Kafka consumer replicas. Increase this value to increase the consumer throughput. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.tags | list | `[]` | List of Avro fields to be recorded as InfluxDB tags. The Avro fields specified as tags will be converted to strings before ingestion into InfluxDB. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.timestamp_field | string | `"private_efdStamp"` | Avro field to be used as the InfluxDB timestamp (optional). If unspecified or set to the empty string, Telegraf will use the time it received the measurement. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.timestamp_format | string | `"unix"` | Timestamp format. Possible values are `unix` (the default if unset) a timestamp in seconds since the Unix epoch, `unix_ms` (milliseconds), `unix_us` (microsseconds), or `unix_ns` (nanoseconds). | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.topicRegexps | string | `"[ \".*Test\" ]\n"` | List of regular expressions to specify the Kafka topics consumed by this agent. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.union_field_separator | string | `""` | Union field separator: if a single Avro field is flattened into more than one InfluxDB field (e.g. an array `a`, with four members, would yield `a0`, `a1`, `a2`, `a3`; if the field separator were `_`, these would be `a_0`...`a_3`. | +| telegraf-kafka-consumer-oss.kafkaConsumers.test.union_mode | string | `"nullable"` | Union mode: this can be one of `flatten`, `nullable`, or `any`. See `values.yaml` for extensive discussion. | +| telegraf-kafka-consumer-oss.nodeSelector | object | `{}` | Node labels for pod assignment | +| telegraf-kafka-consumer-oss.podAnnotations | object | `{}` | Annotations for telegraf-kafka-consumers pods | +| telegraf-kafka-consumer-oss.podLabels | object | `{}` | Labels for telegraf-kafka-consumer pods | +| telegraf-kafka-consumer-oss.resources | object | See `values.yaml` | Kubernetes resources requests and limits | +| telegraf-kafka-consumer-oss.tolerations | list | `[]` | Tolerations for pod assignment | diff --git a/applications/sasquatch/charts/app-metrics/Chart.yaml b/applications/sasquatch/charts/app-metrics/Chart.yaml new file mode 100644 index 0000000000..1152b5b2ca --- /dev/null +++ b/applications/sasquatch/charts/app-metrics/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: app-metrics +version: 1.0.0 +appVersion: "1.0.0" +description: Kafka topics, users, and a telegraf connector for metrics events. +type: application diff --git a/applications/sasquatch/charts/app-metrics/README.md b/applications/sasquatch/charts/app-metrics/README.md new file mode 100644 index 0000000000..1cb6c56b6d --- /dev/null +++ b/applications/sasquatch/charts/app-metrics/README.md @@ -0,0 +1,28 @@ +# app-metrics + +Kafka topics, users, and a telegraf connector for metrics events. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity for pod assignment | +| apps | list | `[]` | A list of applications that will publish metrics events, and the keys that should be ingested into InfluxDB as tags. The names should be the same as the app names in Phalanx. | +| args | list | `[]` | Arguments passed to the Telegraf agent containers | +| cluster.name | string | `"sasquatch"` | | +| debug | bool | false | Run Telegraf in debug mode. | +| env | list | See `values.yaml` | Telegraf agent enviroment variables | +| envFromSecret | string | `""` | Name of the secret with values to be added to the environment. | +| globalAppConfig | object | `{}` | app-metrics configuration in any environment in which the subchart is enabled. This should stay globally specified here, and it shouldn't be overridden. See [here](https://sasquatch.lsst.io/user-guide/app-metrics.html#configuration) for the structure of this value. | +| globalInfluxTags | list | `["service"]` | Keys in an every event sent by any app that should be recorded in InfluxDB as "tags" (vs. "fields"). These will be concatenated with the `influxTags` from `globalAppConfig` | +| image.pullPolicy | string | `"Always"` | Image pull policy | +| image.repo | string | `"docker.io/library/telegraf"` | Telegraf image repository | +| image.tag | string | `"1.30.2-alpine"` | Telegraf image tag | +| imagePullSecrets | list | `[]` | Secret names to use for Docker pulls | +| influxdb.url | string | `"http://sasquatch-influxdb.sasquatch:8086"` | URL of the InfluxDB v1 instance to write to | +| nodeSelector | object | `{}` | Node labels for pod assignment | +| podAnnotations | object | `{}` | Annotations for telegraf-kafka-consumers pods | +| podLabels | object | `{}` | Labels for telegraf-kafka-consumer pods | +| replicaCount | int | `3` | Number of Telegraf replicas. Multiple replicas increase availability. | +| resources | object | See `values.yaml` | Kubernetes resources requests and limits | +| tolerations | list | `[]` | Tolerations for pod assignment | diff --git a/applications/sasquatch/charts/app-metrics/templates/_helpers.tpl b/applications/sasquatch/charts/app-metrics/templates/_helpers.tpl new file mode 100644 index 0000000000..f88a9ae075 --- /dev/null +++ b/applications/sasquatch/charts/app-metrics/templates/_helpers.tpl @@ -0,0 +1,10 @@ +{{/* +Convert a list to a TOML array of quoted string values +*/}} +{{- define "helpers.toTomlArray" -}} +{{- $items := list -}} +{{- range . -}} +{{- $items = (quote .) | append $items -}} +{{- end -}} +[ {{ join ", " $items }} ] +{{- end -}} diff --git a/applications/sasquatch/charts/app-metrics/templates/kafka-topics.yaml b/applications/sasquatch/charts/app-metrics/templates/kafka-topics.yaml new file mode 100644 index 0000000000..70db2590de --- /dev/null +++ b/applications/sasquatch/charts/app-metrics/templates/kafka-topics.yaml @@ -0,0 +1,15 @@ +{{- range .Values.apps }} +--- +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaTopic +metadata: + name: "lsst.square.app-metrics.events.{{ . }}" + labels: + strimzi.io/cluster: {{ $.Values.cluster.name }} +spec: + partitions: 10 + replicas: 3 + config: + # http://kafka.apache.org/documentation/#topicconfigs + retention.ms: 86400000 # 1 day +{{- end }} diff --git a/applications/sasquatch/charts/app-metrics/templates/kafka-users.yaml b/applications/sasquatch/charts/app-metrics/templates/kafka-users.yaml new file mode 100644 index 0000000000..9ddab60b5e --- /dev/null +++ b/applications/sasquatch/charts/app-metrics/templates/kafka-users.yaml @@ -0,0 +1,31 @@ +{{- range .Values.apps }} +--- +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaUser +metadata: + name: app-metrics-{{ . }} + labels: + strimzi.io/cluster: {{ $.Values.cluster.name }} +spec: + authentication: + type: tls + authorization: + type: simple + acls: + - resource: + type: group + name: app-metrics-events + patternType: prefix + operations: + - "Read" + host: "*" + - resource: + type: topic + name: "lsst.square.app-metrics.events.{{ . }}" + patternType: literal + operations: + - "Describe" + - "Read" + - "Write" + host: "*" +{{- end }} diff --git a/applications/sasquatch/charts/app-metrics/templates/telegraf-configmap.yaml b/applications/sasquatch/charts/app-metrics/templates/telegraf-configmap.yaml new file mode 100644 index 0000000000..e8a60a4ae3 --- /dev/null +++ b/applications/sasquatch/charts/app-metrics/templates/telegraf-configmap.yaml @@ -0,0 +1,68 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: sasquatch-telegraf-app-metrics + labels: + app.kubernetes.io/name: sasquatch-telegraf-app-metrics + app.kubernetes.io/instance: sasquatch-telegraf-app-metrics + app.kubernetes.io/part-of: sasquatch +data: + telegraf.conf: |+ + [agent] + metric_batch_size = 5000 + metric_buffer_limit = 100000 + collection_jitter = "0s" + flush_interval = "10s" + flush_jitter = "0s" + debug = {{ default false .Values.debug }} + omit_hostname = true + + [[outputs.influxdb]] + urls = [ + {{ .Values.influxdb.url | quote }} + ] + database = "telegraf-kafka-app-metrics-consumer" + username = "${INFLUXDB_USER}" + password = "${INFLUXDB_PASSWORD}" + + [[outputs.influxdb]] + namepass = ["telegraf_*"] + urls = [ + {{ .Values.influxdb.url | quote }} + ] + database = "telegraf" + username = "${INFLUXDB_USER}" + password = "${INFLUXDB_PASSWORD}" + + {{- range $index, $app := .Values.apps }} + {{- $globalInfluxTags := $.Values.globalInfluxTags | default list }} + {{- $appInfluxTags := (index $.Values.globalAppConfig $app "influxTags") | default list }} + {{- $influxTags := concat $globalInfluxTags $appInfluxTags }} + [[inputs.kafka_consumer]] + brokers = [ + "sasquatch-kafka-brokers.sasquatch:9092" + ] + consumer_group = "telegraf-kafka-consumer-app-metrics" + sasl_mechanism = "SCRAM-SHA-512" + sasl_password = "$TELEGRAF_PASSWORD" + sasl_username = "telegraf" + data_format = "avro" + avro_schema_registry = "http://sasquatch-schema-registry.sasquatch:8081" + avro_timestamp = "timestamp_ns" + avro_timestamp_format = "unix_ns" + avro_union_mode = "nullable" + avro_tags = {{ include "helpers.toTomlArray" $influxTags }} + topics = [ + "lsst.square.app-metrics.events.{{ $app }}", + ] + max_processing_time = "5s" + consumer_fetch_default = "5MB" + max_undelivered_messages = 10000 + compression_codec = 3 + {{- end }} + + [[inputs.internal]] + name_prefix = "telegraf_" + collect_memstats = true + tags = { instance = "app-metrics" } diff --git a/applications/sasquatch/charts/app-metrics/templates/telegraf-deployment.yaml b/applications/sasquatch/charts/app-metrics/templates/telegraf-deployment.yaml new file mode 100644 index 0000000000..9a0c3dd017 --- /dev/null +++ b/applications/sasquatch/charts/app-metrics/templates/telegraf-deployment.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: sasquatch-telegraf-app-metrics + labels: + app.kubernetes.io/name: sasquatch-telegraf-app-metrics + app.kubernetes.io/instance: sasquatch-telegraf-app-metrics + app.kubernetes.io/part-of: sasquatch +spec: + replicas: {{ default 1 .Values.replicaCount }} + selector: + matchLabels: + app.kubernetes.io/instance: sasquatch-telegraf-app-metrics + template: + metadata: + labels: + app.kubernetes.io/instance: sasquatch-telegraf-app-metrics + annotations: + checksum/config: {{ include (print $.Template.BasePath "/telegraf-configmap.yaml") $ | sha256sum }} + {{- if .Values.podAnnotations }} + {{- toYaml .Values.podAnnotations | nindent 8 }} + {{- end }} + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + containers: + - name: telegraf + securityContext: + capabilities: + drop: + - all + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + image: "{{ .Values.image.repo }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: + {{- toYaml .Values.resources | nindent 10 }} + {{- end }} + {{- if .Values.args }} + args: + {{- toYaml .Values.args | nindent 8 }} + {{- end }} + {{- if .Values.env }} + env: + {{- toYaml .Values.env | nindent 8 }} + {{- end }} + {{- if .Values.envFromSecret }} + envFrom: + - secretRef: + name: {{ .Values.envFromSecret }} + {{- end }} + volumeMounts: + - name: config + mountPath: /etc/telegraf + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: + {{- toYaml .Values.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: + {{- toYaml .Values.affinity | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: + {{- toYaml .Values.tolerations | nindent 8 }} + {{- end }} + volumes: + - name: config + configMap: + name: sasquatch-telegraf-app-metrics diff --git a/applications/sasquatch/charts/app-metrics/values.yaml b/applications/sasquatch/charts/app-metrics/values.yaml new file mode 100644 index 0000000000..d5bc17418f --- /dev/null +++ b/applications/sasquatch/charts/app-metrics/values.yaml @@ -0,0 +1,102 @@ +## Default values.yaml for the Metrics Events subchart. + +# -- app-metrics configuration in any environment in which the subchart is +# enabled. This should stay globally specified here, and it shouldn't be +# overridden. +# See [here](https://sasquatch.lsst.io/user-guide/app-metrics.html#configuration) +# for the structure of this value. +globalAppConfig: {} + +# -- A list of applications that will publish metrics events, and the keys that should be ingested into InfluxDB as tags. +# The names should be the same as the app names in Phalanx. +apps: [] + +# -- Keys in an every event sent by any app that should be recorded in InfluxDB +# as "tags" (vs. "fields"). These will be concatenated with the `influxTags` from +# `globalAppConfig` +globalInfluxTags: ["service"] + +cluster: + # The name of the Strimzi cluster. Synchronize this with the cluster name in + # the parent Sasquatch chart. + name: sasquatch + +# These values refer to the telegraf deployment and config + +image: + # -- Telegraf image repository + repo: "docker.io/library/telegraf" + + # -- Telegraf image tag + tag: "1.30.2-alpine" + + # -- Image pull policy + pullPolicy: "Always" + +# -- Annotations for telegraf-kafka-consumers pods +podAnnotations: {} + +# -- Labels for telegraf-kafka-consumer pods +podLabels: {} + +# -- Secret names to use for Docker pulls +imagePullSecrets: [] + +# -- Arguments passed to the Telegraf agent containers +args: [] + +# -- Telegraf agent enviroment variables +# @default -- See `values.yaml` +env: + - name: TELEGRAF_PASSWORD + valueFrom: + secretKeyRef: + name: sasquatch + # Telegraf KafkaUser password. + key: telegraf-password + - name: INFLUXDB_USER + valueFrom: + secretKeyRef: + name: sasquatch + # InfluxDB v1 user + key: influxdb-user + - name: INFLUXDB_PASSWORD + valueFrom: + secretKeyRef: + name: sasquatch + # InfluxDB v1 password + key: influxdb-password + +# -- Name of the secret with values to be added to the environment. +envFromSecret: "" + +# -- Run Telegraf in debug mode. +# @default -- false +debug: false + +influxdb: + # -- URL of the InfluxDB v1 instance to write to + url: "http://sasquatch-influxdb.sasquatch:8086" + +# -- Number of Telegraf replicas. Multiple replicas increase availability. +replicaCount: 3 + + +# -- Kubernetes resources requests and limits +# @default -- See `values.yaml` +resources: + limits: + cpu: "2" + memory: "4Gi" + requests: + cpu: "0.5" + memory: "1Gi" + +# -- Node labels for pod assignment +nodeSelector: {} + +# -- Affinity for pod assignment +affinity: {} + +# -- Tolerations for pod assignment +tolerations: [] diff --git a/applications/sasquatch/charts/influxdb-enterprise/README.md b/applications/sasquatch/charts/influxdb-enterprise/README.md index aba97b90d0..12233edf75 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/README.md +++ b/applications/sasquatch/charts/influxdb-enterprise/README.md @@ -92,7 +92,9 @@ Run InfluxDB Enterprise on Kubernetes | meta.service.loadBalancerIP | string | Do not allocate a load balancer IP | Load balancer IP for the meta service | | meta.service.nodePort | int | Do not allocate a node port | Node port for the meta service | | meta.service.type | string | `"ClusterIP"` | Service type for the meta service | -| meta.sharedSecret.secretName | string | `"influxdb-enterprise-shared-secret"` | Shared secret used by the internal API for JWT authentication between InfluxDB nodes. Must have a key named `secret` that should be a long, random string See [documentation for shared-internal-secret](https://docs.influxdata.com/enterprise_influxdb/v1/administration/configure/config-data-nodes/#meta-internal-shared-secret). | +| meta.sharedSecret.secret | object | `{"key":"secret","name":"influxdb-enterprise-shared-secret"}` | Shared secret used by the internal API for JWT authentication between InfluxDB nodes. Must have a key named `secret` that should be a long, random string See [documentation for shared-internal-secret](https://docs.influxdata.com/enterprise_influxdb/v1/administration/configure/config-data-nodes/#meta-internal-shared-secret). | +| meta.sharedSecret.secret.key | string | `"secret"` | Key within that secret that contains the shared secret | +| meta.sharedSecret.secret.name | string | `"influxdb-enterprise-shared-secret"` | Name of the secret containing the shared secret | | meta.tolerations | list | `[]` | Tolerations for meta pods | | nameOverride | string | `""` | Override the base name for resources | | serviceAccount.annotations | object | `{}` | Annotations to add to the service account | diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml index fa28e08cf4..1cc01f575a 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml @@ -90,7 +90,7 @@ spec: path: /ping port: http readinessProbe: - initialDelaySeconds: 30 + initialDelaySeconds: 60 httpGet: path: /ping port: http diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/meta-statefulset.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-statefulset.yaml index beff940f34..cf543c32a4 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/templates/meta-statefulset.yaml +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-statefulset.yaml @@ -59,8 +59,8 @@ spec: - name: INFLUXDB_META_INTERNAL_SHARED_SECRET valueFrom: secretKeyRef: - name: {{ .Values.meta.sharedSecret.secretName }} - key: secret + name: {{ .Values.meta.sharedSecret.secret.name }} + key: {{ .Values.meta.sharedSecret.secret.key }} {{- if .Values.meta.env }} {{ toYaml .Values.meta.env | indent 12 }} {{- end}} diff --git a/applications/sasquatch/charts/influxdb-enterprise/values.yaml b/applications/sasquatch/charts/influxdb-enterprise/values.yaml index 412b131f72..0709b449c6 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/values.yaml +++ b/applications/sasquatch/charts/influxdb-enterprise/values.yaml @@ -137,7 +137,12 @@ meta: # InfluxDB nodes. Must have a key named `secret` that should be a long, # random string See [documentation for # shared-internal-secret](https://docs.influxdata.com/enterprise_influxdb/v1/administration/configure/config-data-nodes/#meta-internal-shared-secret). - secretName: influxdb-enterprise-shared-secret + secret: + # -- Name of the secret containing the shared secret + name: influxdb-enterprise-shared-secret + + # -- Key within that secret that contains the shared secret + key: secret service: # -- Service type for the meta service diff --git a/applications/sasquatch/charts/rest-proxy/README.md b/applications/sasquatch/charts/rest-proxy/README.md index 2daa2e6d24..eea798d3ae 100644 --- a/applications/sasquatch/charts/rest-proxy/README.md +++ b/applications/sasquatch/charts/rest-proxy/README.md @@ -16,7 +16,7 @@ A subchart to deploy Confluent REST proxy for Sasquatch. | heapOptions | string | `"-Xms512M -Xmx512M"` | Kafka REST proxy JVM Heap Option | | image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | | image.repository | string | `"confluentinc/cp-kafka-rest"` | Kafka REST proxy image repository | -| image.tag | string | `"7.7.0"` | Kafka REST proxy image tag | +| image.tag | string | `"7.7.1"` | Kafka REST proxy image tag | | ingress.annotations | object | See `values.yaml` | Additional annotations to add to the ingress | | ingress.enabled | bool | `false` | Whether to enable the ingress | | ingress.hostname | string | None, must be set if ingress is enabled | Ingress hostname | diff --git a/applications/sasquatch/charts/rest-proxy/values.yaml b/applications/sasquatch/charts/rest-proxy/values.yaml index e396a6e9bf..ef0cd8cbac 100644 --- a/applications/sasquatch/charts/rest-proxy/values.yaml +++ b/applications/sasquatch/charts/rest-proxy/values.yaml @@ -11,7 +11,7 @@ image: pullPolicy: IfNotPresent # -- Kafka REST proxy image tag - tag: 7.7.0 + tag: 7.7.1 service: # -- Kafka REST proxy service port diff --git a/applications/sasquatch/charts/square-events/templates/squarebot-topics.yaml b/applications/sasquatch/charts/square-events/templates/squarebot-topics.yaml index 1517ea6c55..25eba2af35 100644 --- a/applications/sasquatch/charts/square-events/templates/squarebot-topics.yaml +++ b/applications/sasquatch/charts/square-events/templates/squarebot-topics.yaml @@ -2,7 +2,20 @@ apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaTopic metadata: - name: "lsst.square-events.squarebot.slack.interaction" + name: "lsst.square-events.squarebot.slack.interaction.block-actions" + labels: + strimzi.io/cluster: {{ .Values.cluster.name }} +spec: + partitions: 4 + replicas: 3 + config: + # http://kafka.apache.org/documentation/#topicconfigs + retention.ms: 1800000 # 30 minutes +--- +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaTopic +metadata: + name: "lsst.square-events.squarebot.slack.interaction.view-submission" labels: strimzi.io/cluster: {{ .Values.cluster.name }} spec: diff --git a/applications/sasquatch/charts/square-events/templates/squarebot-user.yaml b/applications/sasquatch/charts/square-events/templates/squarebot-user.yaml index 3b0f8e252a..1285a4ec6f 100644 --- a/applications/sasquatch/charts/square-events/templates/squarebot-user.yaml +++ b/applications/sasquatch/charts/square-events/templates/squarebot-user.yaml @@ -64,7 +64,16 @@ spec: - "Describe" - resource: type: topic - name: "lsst.square-events.squarebot.slack.interaction" + name: "lsst.square-events.squarebot.slack.interaction.block-actions" + patternType: literal + type: allow + host: "*" + operations: + - "Write" + - "Describe" + - resource: + type: topic + name: "lsst.square-events.squarebot.slack.interaction.view-submission" patternType: literal type: allow host: "*" diff --git a/applications/sasquatch/charts/square-events/templates/templatebot-user.yaml b/applications/sasquatch/charts/square-events/templates/templatebot-user.yaml new file mode 100644 index 0000000000..580bfa028f --- /dev/null +++ b/applications/sasquatch/charts/square-events/templates/templatebot-user.yaml @@ -0,0 +1,62 @@ +--- +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaUser +metadata: + name: templatebot + labels: + strimzi.io/cluster: {{ .Values.cluster.name }} +spec: + template: + secret: + metadata: + annotations: + replicator.v1.mittwald.de/replication-allowed: "true" + replicator.v1.mittwald.de/replication-allowed-namespaces: "templatebot" + authentication: + type: tls + authorization: + type: simple + acls: + - resource: + type: group + name: "templatebot" + patternType: prefix + operations: + - "Read" + host: "*" + - resource: + type: topic + name: "lsst.square-events.squarebot.slack.app.mention" + patternType: literal + type: allow + host: "*" + operations: + - "Read" + - "Describe" + - resource: + type: topic + name: "lsst.square-events.squarebot.slack.message.im" + patternType: literal + type: allow + host: "*" + operations: + - "Read" + - "Describe" + - resource: + type: topic + name: "lsst.square-events.squarebot.slack.interaction.block-actions" + patternType: literal + type: allow + host: "*" + operations: + - "Read" + - "Describe" + - resource: + type: topic + name: "lsst.square-events.squarebot.slack.interaction.view-submission" + patternType: literal + type: allow + host: "*" + operations: + - "Read" + - "Describe" diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index ce4efaea25..556761d75d 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -41,7 +41,7 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | kafka.storage.size | string | `"500Gi"` | Size of the backing storage disk for each of the Kafka brokers | | kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes | | kafka.tolerations | list | `[]` | Tolerations for Kafka broker pod assignment | -| kafka.version | string | `"3.7.1"` | Version of Kafka to deploy | +| kafka.version | string | `"3.8.0"` | Version of Kafka to deploy | | kafkaController.enabled | bool | `false` | Enable Kafka Controller | | kafkaController.resources | object | See `values.yaml` | Kubernetes requests and limits for the Kafka Controller | | kafkaController.storage.size | string | `"20Gi"` | Size of the backing storage disk for each of the Kafka controllers | diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index 2ae8501f6f..6d587fd746 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -11,7 +11,7 @@ cluster: kafka: # -- Version of Kafka to deploy - version: "3.7.1" + version: "3.8.0" # -- Number of Kafka broker replicas to run replicas: 3 @@ -285,6 +285,7 @@ users: # -- Enable user consdb enabled: false + mirrormaker2: # -- Enable replication in the target (passive) cluster enabled: false diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md index d9c8dbcb70..0be7c27bdb 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md @@ -11,23 +11,24 @@ Telegraf is an agent written in Go for collecting, processing, aggregating, and | enabled | bool | `false` | Wether the Telegraf Kafka Consumer is enabled | | env | list | See `values.yaml` | Telegraf agent enviroment variables | | envFromSecret | string | `""` | Name of the secret with values to be added to the environment. | -| image.pullPolicy | string | `"Always"` | Image pull policy | -| image.repo | string | `"docker.io/library/telegraf"` | Telegraf image repository | -| image.tag | string | `"1.30.2-alpine"` | Telegraf image tag | +| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | +| image.repo | string | `"docker.io/lsstsqre/telegraf"` | Telegraf image repository | +| image.tag | string | `"avro-mutex"` | Telegraf image tag | | imagePullSecrets | list | `[]` | Secret names to use for Docker pulls | | influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to | | influxdb.url | string | `"http://sasquatch-influxdb.sasquatch:8086"` | URL of the InfluxDB v1 instance to write to | | kafkaConsumers.test.collection_jitter | string | "0s" | Data collection jitter. This is used to jitter the collection by a random amount. Each plugin will sleep for a random time within jitter before collecting. | +| kafkaConsumers.test.compression_codec | int | 3 | Compression codec. 0 : None, 1 : Gzip, 2 : Snappy, 3 : LZ4, 4 : ZSTD | | kafkaConsumers.test.consumer_fetch_default | string | "20MB" | Maximum amount of data the server should return for a fetch request. | | kafkaConsumers.test.debug | bool | false | Run Telegraf in debug mode. | | kafkaConsumers.test.enabled | bool | `false` | Enable the Telegraf Kafka consumer. | | kafkaConsumers.test.fields | list | `[]` | List of Avro fields to be recorded as InfluxDB fields. If not specified, any Avro field that is not marked as a tag will become an InfluxDB field. | -| kafkaConsumers.test.flush_interval | string | "1s" | Data flushing interval for all outputs. Don’t set this below interval. Maximum flush_interval is flush_interval + flush_jitter | +| kafkaConsumers.test.flush_interval | string | "10s" | Data flushing interval for all outputs. Don’t set this below interval. Maximum flush_interval is flush_interval + flush_jitter | | kafkaConsumers.test.flush_jitter | string | "0s" | Jitter the flush interval by a random amount. This is primarily to avoid large write spikes for users running a large number of telegraf instances. | -| kafkaConsumers.test.interval | string | "1s" | Data collection interval for the Kafka consumer. | | kafkaConsumers.test.max_processing_time | string | "5s" | Maximum processing time for a single message. | +| kafkaConsumers.test.max_undelivered_messages | int | 10000 | Maximum number of undelivered messages. Should be a multiple of metric_batch_size, setting it too low may never flush the broker's messages. | | kafkaConsumers.test.metric_batch_size | int | 1000 | Sends metrics to the output in batches of at most metric_batch_size metrics. | -| kafkaConsumers.test.metric_buffer_limit | int | 10000 | Caches metric_buffer_limit metrics for each output, and flushes this buffer on a successful write. This should be a multiple of metric_batch_size and could not be less than 2 times metric_batch_size. | +| kafkaConsumers.test.metric_buffer_limit | int | 100000 | Caches metric_buffer_limit metrics for each output, and flushes this buffer on a successful write. This should be a multiple of metric_batch_size and could not be less than 2 times metric_batch_size. | | kafkaConsumers.test.offset | string | `"oldest"` | Kafka consumer offset. Possible values are `oldest` and `newest`. | | kafkaConsumers.test.precision | string | "1us" | Data precision. | | kafkaConsumers.test.replicaCount | int | `1` | Number of Telegraf Kafka consumer replicas. Increase this value to increase the consumer throughput. | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/_helpers.tpl b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/_helpers.tpl new file mode 100644 index 0000000000..11dae28e5a --- /dev/null +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/_helpers.tpl @@ -0,0 +1,73 @@ +{{- define "configmap" -}} +{{- if .value.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: sasquatch-telegraf-{{ .key }} + labels: + app.kubernetes.io/name: sasquatch-telegraf + app.kubernetes.io/instance: sasquatch-telegraf-{{ .key }} + app.kubernetes.io/part-of: sasquatch +data: + telegraf.conf: |+ + [agent] + metric_batch_size = {{ default 1000 .value.metric_batch_size }} + metric_buffer_limit = {{ default 100000 .value.metric_buffer_limit }} + collection_jitter = {{ default "0s" .value.collection_jitter | quote }} + flush_interval = {{ default "10s" .value.flush_interval | quote }} + flush_jitter = {{ default "0s" .value.flush_jitter | quote }} + debug = {{ default false .value.debug }} + omit_hostname = true + + [[outputs.influxdb]] + urls = [ + {{ .influxdbUrl | quote }} + ] + database = {{ .value.database | quote }} + username = "${INFLUXDB_USER}" + password = "${INFLUXDB_PASSWORD}" + + [[outputs.influxdb]] + namepass = ["telegraf_*"] + urls = [ + {{ .influxdbUrl | quote }} + ] + database = "telegraf" + username = "${INFLUXDB_USER}" + password = "${INFLUXDB_PASSWORD}" + + [[inputs.kafka_consumer]] + brokers = [ + "sasquatch-kafka-brokers.sasquatch:9092" + ] + consumer_group = "telegraf-kafka-consumer-{{ .key }}" + sasl_mechanism = "SCRAM-SHA-512" + sasl_password = "$TELEGRAF_PASSWORD" + sasl_username = "telegraf" + data_format = "avro" + avro_schema_registry = "http://sasquatch-schema-registry.sasquatch:8081" + avro_timestamp = {{ default "private_efdStamp" .value.timestamp_field | quote }} + avro_timestamp_format = {{ default "unix" .value.timestamp_format | quote }} + avro_union_mode = {{ default "nullable" .value.union_mode | quote }} + avro_field_separator = {{ default "" .value.union_field_separator | quote }} + {{- if .value.fields }} + avro_fields = {{ .value.fields }} + {{- end }} + {{- if .value.tags }} + avro_tags = {{ .value.tags }} + {{- end }} + topic_regexps = {{ .value.topicRegexps }} + offset = {{ default "oldest" .value.offset | quote }} + precision = {{ default "1us" .value.precision | quote }} + max_processing_time = {{ default "5s" .value.max_processing_time | quote }} + consumer_fetch_default = {{ default "20MB" .value.consumer_fetch_default | quote }} + max_undelivered_messages = {{ default 10000 .value.max_undelivered_messages }} + compression_codec = {{ default 3 .value.compression_codec }} + + [[inputs.internal]] + name_prefix = "telegraf_" + collect_memstats = true + tags = { instance = "{{ .key }}" } +{{- end }} +{{- end }} diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml index 5e55c1a59e..6f70b74961 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml @@ -1,60 +1,5 @@ {{- range $key, $value := .Values.kafkaConsumers }} -{{- if $value.enabled }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: sasquatch-telegraf-{{ $key }} - labels: - app: sasquatch-telegraf-kakfa-consumer -data: - telegraf.conf: |+ - [agent] - interval = {{ default "1s" $value.interval | quote }} - round_interval = true - metric_batch_size = {{ default 1000 $value.metric_batch_size }} - metric_buffer_limit = {{ default 10000 $value.metric_buffer_limit }} - collection_jitter = {{ default "0s" $value.collection_jitter | quote }} - flush_interval = {{ default "1s" $value.flush_interval | quote }} - flush_jitter = {{ default "0s" $value.flush_jitter | quote }} - debug = {{ default false $value.debug }} - omit_hostname = true - [[outputs.influxdb]] - urls = [ - {{ $.Values.influxdb.url | quote }} - ] - database = {{ $value.database | quote }} - username = "${INFLUXDB_USER}" - password = "${INFLUXDB_PASSWORD}" +{{ include "configmap" (dict "key" $key "value" $value "influxdbUrl" $.Values.influxdb.url ) }} - [[inputs.kafka_consumer]] - brokers = [ - "sasquatch-kafka-brokers.sasquatch:9092" - ] - consumer_group = "telegraf-kafka-consumer-{{ $key }}" - sasl_mechanism = "SCRAM-SHA-512" - sasl_password = "$TELEGRAF_PASSWORD" - sasl_username = "telegraf" - data_format = "avro" - avro_schema_registry = "http://sasquatch-schema-registry.sasquatch:8081" - avro_timestamp = {{ default "private_efdStamp" $value.timestamp_field | quote }} - avro_timestamp_format = {{ default "unix" $value.timestamp_format | quote }} - avro_union_mode = {{ default "nullable" $value.union_mode | quote }} - avro_field_separator = {{ default "" $value.union_field_separator | quote }} - {{ with $value.fields }} - avro_fields = {{ $value.fields }} - {{ end }} - {{ with $value.tags }} - avro_tags = {{ $value.tags }} - {{ end }} - topic_regexps = {{ $value.topicRegexps }} - offset = {{ default "oldest" $value.offset | quote }} - precision = {{ default "1us" $value.precision | quote }} - max_processing_time = {{ default "5s" $value.max_processing_time | quote }} - consumer_fetch_default = {{ default "20MB" $value.consumer_fetch_default | quote }} - - [[inputs.internal]] - collect_memstats = false -{{- end }} {{- end }} diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/deployment.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/deployment.yaml index addd04a6e6..f8117c8900 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/deployment.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/deployment.yaml @@ -6,20 +6,23 @@ kind: Deployment metadata: name: sasquatch-telegraf-{{ $key }} labels: - app: sasquatch-telegraf-kafka-consumer + app.kubernetes.io/name: sasquatch-telegraf + app.kubernetes.io/instance: sasquatch-telegraf-{{ $key }} + app.kubernetes.io/part-of: sasquatch spec: replicas: {{ default 1 $value.replicaCount }} selector: matchLabels: - app: sasquatch-telegraf-kafka-consumer + app.kubernetes.io/instance: sasquatch-telegraf-{{ $key }} template: metadata: labels: - app: sasquatch-telegraf-kafka-consumer - {{- if $.Values.podAnnotations }} + app.kubernetes.io/instance: sasquatch-telegraf-{{ $key }} annotations: + checksum/config: {{ include "configmap" (dict "key" $key "value" $value "influxdbUrl" $.Values.influxdb.url ) | sha256sum }} + {{- if $.Values.podAnnotations }} {{- toYaml $.Values.podAnnotations | nindent 8 }} - {{- end }} + {{- end }} spec: securityContext: runAsNonRoot: true diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml index 9b8e89ebb3..dd0fc7cb4f 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml @@ -5,13 +5,13 @@ enabled: false image: # -- Telegraf image repository - repo: "docker.io/library/telegraf" + repo: "docker.io/lsstsqre/telegraf" # -- Telegraf image tag - tag: "1.30.2-alpine" + tag: "avro-mutex" # -- Image pull policy - pullPolicy: "Always" + pullPolicy: "IfNotPresent" # -- Annotations for telegraf-kafka-consumers pods podAnnotations: {} @@ -60,10 +60,6 @@ kafkaConsumers: # increase the consumer throughput. replicaCount: 1 - # -- Data collection interval for the Kafka consumer. - # @default -- "1s" - interval: "1s" - # -- Sends metrics to the output in batches of at most metric_batch_size # metrics. # @default -- 1000 @@ -72,8 +68,8 @@ kafkaConsumers: # -- Caches metric_buffer_limit metrics for each output, and flushes this # buffer on a successful write. This should be a multiple of metric_batch_size # and could not be less than 2 times metric_batch_size. - # @default -- 10000 - metric_buffer_limit: 10000 + # @default -- 100000 + metric_buffer_limit: 100000 # -- Data collection jitter. This is used to jitter the collection by a # random amount. Each plugin will sleep for a random time within jitter @@ -84,8 +80,8 @@ kafkaConsumers: # -- Data flushing interval for all outputs. # Don’t set this below interval. # Maximum flush_interval is flush_interval + flush_jitter - # @default -- "1s" - flush_interval: "1s" + # @default -- "10s" + flush_interval: "10s" # -- Jitter the flush interval by a random amount. This is primarily to # avoid large write spikes for users running a large number of telegraf @@ -175,6 +171,16 @@ kafkaConsumers: # @default -- "20MB" consumer_fetch_default: "20MB" + # -- Maximum number of undelivered messages. + # Should be a multiple of metric_batch_size, setting it too low may never + # flush the broker's messages. + # @default -- 10000 + max_undelivered_messages: 10000 + + # -- Compression codec. 0 : None, 1 : Gzip, 2 : Snappy, 3 : LZ4, 4 : ZSTD + # @default -- 3 + compression_codec: 3 + influxdb: # -- URL of the InfluxDB v1 instance to write to url: "http://sasquatch-influxdb.sasquatch:8086" diff --git a/applications/sasquatch/secrets.yaml b/applications/sasquatch/secrets.yaml index 2a19674f17..13cf51ef04 100644 --- a/applications/sasquatch/secrets.yaml +++ b/applications/sasquatch/secrets.yaml @@ -77,3 +77,11 @@ camera-password: description: >- camera KafkaUser password. if: strimzi-kafka.users.camera.enabled +influxdb-enterprise-license: + description: >- + InfluxDB Enterprise license. + if: influxdb-enterprise.enabled +influxdb-enterprise-shared-secret: + description: >- + InfluxDB Enterprise shared secret. + if: influxdb-enterprise.enabled diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 257afa096b..4440c387ea 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -22,6 +22,7 @@ strimzi-kafka: config: auto.create.topics.enable: false log.cleaner.min.compaction.lag.ms: 259200000 + log.message.timestamp.type: LogAppendTime log.retention.hours: 72 log.retention.ms: 259200000 storage: @@ -130,199 +131,101 @@ influxdb: enabled: true hostname: base-lsp.lsst.codes -kafka-connect-manager: - influxdbSink: - # Based on the kafka producers configuration for the BTS - # https://github.com/lsst-ts/argocd-csc/blob/main/apps/kafka-producers/values-base-teststand.yaml - connectors: - auxtel: - enabled: true - topicsRegex: "lsst.sal.ATAOS|lsst.sal.ATDome|lsst.sal.ATDomeTrajectory|lsst.sal.ATHexapod|lsst.sal.ATPneumatics|lsst.sal.ATPtg|lsst.sal.ATMCS" - maintel: - enabled: true - topicsRegex: "lsst.sal.MTAOS|lsst.sal.MTDome|lsst.sal.MTDomeTrajectory|lsst.sal.MTPtg" - mtmount: - enabled: true - topicsRegex: "lsst.sal.MTMount" - tasksMax: "8" - eas: - enabled: true - topicsRegex: "lsst.sal.DIMM|lsst.sal.DSM|lsst.sal.EPM|lsst.sal.ESS|lsst.sal.HVAC|lsst.sal.WeatherForecast" - latiss: - enabled: true - topicsRegex: "lsst.sal.ATCamera|lsst.sal.ATHeaderService|lsst.sal.ATOODS|lsst.sal.ATSpectrograph" - m1m3: - enabled: true - topicsRegex: "lsst.sal.MTM1M3" - tasksMax: "8" - m2: - enabled: true - topicsRegex: "lsst.sal.MTHexapod|lsst.sal.MTM2|lsst.sal.MTRotator" - obssys: - enabled: true - topicsRegex: "lsst.sal.Scheduler|lsst.sal.Script|lsst.sal.ScriptQueue|lsst.sal.Watcher" - ocps: - enabled: true - topicsRegex: "lsst.sal.OCPS" - test: - enabled: true - topicsRegex: "lsst.sal.Test" - mtaircompressor: - enabled: true - topicsRegex: "lsst.sal.MTAirCompressor" - lasertracker: - enabled: true - topicsRegex: "lsst.sal.LaserTracker" - genericcamera: - enabled: true - topicsRegex: "lsst.sal.GCHeaderService|lsst.sal.GenericCamera" - lsstcam: - enabled: true - topicsRegex: "lsst.sal.MTCamera|lsst.sal.MTHeaderService|lsst.sal.MTOODS" - telegraf-kafka-consumer: - enabled: false + enabled: true kafkaConsumers: auxtel: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.ATAOS", "lsst.sal.ATDome", "lsst.sal.ATDomeTrajectory", "lsst.sal.ATHexapod", "lsst.sal.ATPneumatics", "lsst.sal.ATPtg", "lsst.sal.ATMCS" ] + debug: true maintel: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.MTAOS", "lsst.sal.MTDome", "lsst.sal.MTDomeTrajectory", "lsst.sal.MTPtg" ] + debug: true mtmount: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.MTMount" ] + debug: true eas: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.DIMM", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.ESS", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] + debug: true latiss: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.ATCamera", "lsst.sal.ATHeaderService", "lsst.sal.ATOODS", "lsst.sal.ATSpectrograph" ] + debug: true m1m3: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.MTM1M3" ] + debug: true m2: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.MTHexapod", "lsst.sal.MTM2", "lsst.sal.MTRotator" ] + debug: true obssys: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.Scheduler", "lsst.sal.Script", "lsst.sal.ScriptQueue", "lsst.sal.Watcher" ] + debug: true ocps: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.OCPS" ] + debug: true test: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.Test" ] + debug: true mtaircompressor: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.MTAirCompressor" ] + debug: true lasertracker: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.LaserTracker" ] + debug: true genericcamera: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.GCHeaderService", "lsst.sal.GenericCamera" ] + debug: true lsstcam: enabled: true - replicaCount: 1 - interval: "1s" - flush_interval: "1s" - union_mode: "nullable" - timestamp_format: "unix" - timestamp_field: "private_efdStamp" + database: "efd" topicRegexps: | [ "lsst.sal.MTCamera", "lsst.sal.MTHeaderService", "lsst.sal.MTOODS" ] + debug: true + obsenv: + enabled: true + database: "lsst.obsenv" + timestamp_format: "unix_ms" + timestamp_field: "timestamp" + topicRegexps: | + [ "lsst.obsenv" ] + debug: true kafdrop: cmdArgs: "--message.format=AVRO --message.keyFormat=DEFAULT --topic.deleteEnabled=false --topic.createEnabled=false" @@ -343,6 +246,7 @@ rest-proxy: topicPrefixes: - test - lsst.dm + - lsst.obsenv chronograf: persistence: diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index 8c62d1c356..7a6158cfef 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -9,15 +9,28 @@ strimzi-kafka: loadBalancerIP: "139.229.180.2" host: sasquatch-summit-kafka-bootstrap.lsst.codes brokers: - - broker: 0 - loadBalancerIP: "139.229.180.3" - host: sasquatch-summit-kafka-0.lsst.codes - - broker: 1 - loadBalancerIP: "139.229.180.4" - host: sasquatch-summit-kafka-1.lsst.codes - - broker: 2 - loadBalancerIP: "139.229.180.5" - host: sasquatch-summit-kafka-2.lsst.codes + - broker: 6 + loadBalancerIP: "139.229.180.8" + host: sasquatch-summit-kafka-6.lsst.codes + annotations: + metallb.universe.tf/address-pool: lhn + - broker: 7 + loadBalancerIP: "139.229.180.9" + host: sasquatch-summit-kafka-7.lsst.codes + annotations: + metallb.universe.tf/address-pool: lhn + - broker: 8 + loadBalancerIP: "139.229.180.10" + host: sasquatch-summit-kafka-8.lsst.codes + annotations: + metallb.universe.tf/address-pool: lhn + resources: + requests: + memory: 32Gi + cpu: 4 + limits: + memory: 32Gi + cpu: 4 kraft: enabled: true kafkaController: @@ -51,7 +64,26 @@ strimzi-kafka: nginx.ingress.kubernetes.io/rewrite-target: /$2 hostname: summit-lsp.lsst.codes path: /schema-registry(/|$)(.*) - + cruiseControl: + enabled: true + brokerStorage: + enabled: true + storageClassName: localdrive + size: 15Ti + migration: + enabled: false + rebalance: false + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - yagan17 + - yagan18 + - yagan19 influxdb: persistence: storageClass: rook-ceph-block @@ -67,6 +99,59 @@ influxdb: memory: 128Gi cpu: 16 +influxdb-enterprise: + enabled: true + license: + secret: + name: sasquatch + key: influxdb-enterprise-license + meta: + ingress: + enabled: true + hostname: summit-lsp.lsst.codes + persistence: + enabled: true + accessMode: ReadWriteOnce + size: 16Gi + sharedSecret: + secret: + name: sasquatch + key: influxdb-enterprise-shared-secret + resources: + requests: + memory: 2Gi + cpu: 2 + limits: + memory: 4Gi + cpu: 4 + data: + replicas: 1 + ingress: + enabled: true + hostname: summit-lsp.lsst.codes + persistence: + enabled: true + accessMode: ReadWriteOnce + storageClass: localdrive + size: 15Ti + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - yagan20 + # -- InfluxDB Enterprise data pod resources, 16 cores single node license + resources: + requests: + memory: 256Gi + cpu: 16 + limits: + memory: 256Gi + cpu: 16 + kafka-connect-manager: influxdbSink: # Based on the kafka producers configuration for the Summit @@ -150,8 +235,70 @@ kafka-connect-manager: repairerConnector: false topicsRegex: "lsst.sal.MTCamera|lsst.sal.MTHeaderService|lsst.sal.MTOODS" +telegraf-kafka-consumer-oss: + enabled: true + kafkaConsumers: + oss-backpack: + enabled: true + replicaCount: 1 + database: "lsst.backpack" + timestamp_format: "unix" + timestamp_field: "timestamp" + topicRegexps: | + [ "lsst.backpack" ] + oss-atcamera: + enabled: true + replicaCount: 1 + database: "lsst.ATCamera" + timestamp_format: "unix_ms" + timestamp_field: "timestamp" + tags: | + [ "Agent", "Aspic", "Location", "Raft", "Reb", "Sensor", "Source" ] + topicRegexps: | + [ "lsst.ATCamera" ] + oss-cccamera: + enabled: true + replicaCount: 1 + database: "lsst.CCCamera" + timestamp_format: "unix_ms" + timestamp_field: "timestamp" + tags: | + [ "Agent", "Aspic", "Cold", "Cryo", "Hardware", "Location", "Ps", "RTD", "Raft", "Reb", "Segment", "Sensor", "Source" ] + topicRegexps: | + [ "lsst.CCCamera" ] + oss-mtcamera: + enabled: true + replicaCount: 1 + database: "lsst.MTCamera" + timestamp_format: "unix_ms" + timestamp_field: "timestamp" + tags: | + [ "Agent", "Aspic", "Axis", "Canbus", "Cip", "Clamp", "Cold", "Controller", "Cryo", "Gateway", "Hardware", "Hip", "Hook", "Latch", "Location", "Ps", "RTD", "Raft", "Reb", "Segment", "Sensor", "Socket", "Source", "Truck" ] + topicRegexps: | + [ "lsst.MTCamera" ] + oss-obsenv: + enabled: true + database: "lsst.obsenv" + timestamp_format: "unix_ms" + timestamp_field: "timestamp" + topicRegexps: | + [ "lsst.obsenv" ] + debug: true + oss-cp: + enabled: true + database: "lsst.cp" + timestamp_format: "unix" + timestamp_field: "timestamp" + topicRegexps: | + [ "lsst.cp" ] + tags: | + [ "dataset_tag", "band", "instrument", "skymap", "detector", "physical_filter", "tract", "exposure", "patch", "visit", "run", "pipeline" ] + debug: true + telegraf-kafka-consumer: enabled: true + influxdb: + url: "http://sasquatch-influxdb-enterprise-data.sasquatch:8086" kafkaConsumers: backpack: enabled: true @@ -161,9 +308,137 @@ telegraf-kafka-consumer: timestamp_field: "timestamp" topicRegexps: | [ "lsst.backpack" ] + debug: true + # CSC connectors + maintel: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.MTAOS", "lsst.sal.MTDome", "lsst.sal.MTDomeTrajectory", "lsst.sal.MTPtg" ] + debug: true + mtmount: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.MTMount" ] + debug: true + comcam: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.CCCamera", "lsst.sal.CCHeaderService", "lsst.sal.CCOODS" ] + debug: true + eas: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.DIMM", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.ESS", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] + debug: true + m1m3: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.MTM1M3" ] + debug: true + m2: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.MTHexapod", "lsst.sal.MTM2", "lsst.sal.MTRotator" ] + debug: true + obssys: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.Scheduler", "lsst.sal.Script", "lsst.sal.ScriptQueue", "lsst.sal.Watcher" ] + debug: true + ocps: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.OCPS" ] + debug: true + pmd: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.PMD" ] + debug: true + calsys: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.ATMonochromator", "lsst.sal.ATWhiteLight", "lsst.sal.CBP", "lsst.sal.Electrometer", "lsst.sal.FiberSpectrograph", "lsst.sal.LinearStage", "lsst.sal.TunableLaser" ] + debug: true + mtaircompressor: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.MTAirCompressor" ] + debug: true + genericcamera: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.GCHeaderService", "lsst.sal.GenericCamera" ] + debug: true + gis: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.GIS" ] + debug: true + lsstcam: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.MTCamera", "lsst.sal.MTHeaderService", "lsst.sal.MTOODS" ] + debug: true + auxtel: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.ATAOS", "lsst.sal.ATDome", "lsst.sal.ATDomeTrajectory", "lsst.sal.ATHexapod", "lsst.sal.ATPneumatics", "lsst.sal.ATPtg", "lsst.sal.ATMCS" ] + debug: true + latiss: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.ATCamera", "lsst.sal.ATHeaderService", "lsst.sal.ATOODS", "lsst.sal.ATSpectrograph" ] + debug: true + test: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.Test" ] + debug: true + lasertracker: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.LaserTracker" ] + debug: true + # CCS connectors (experimental) data is being written on separate databases for now atcamera: enabled: true - replicaCount: 1 database: "lsst.ATCamera" timestamp_format: "unix_ms" timestamp_field: "timestamp" @@ -171,9 +446,9 @@ telegraf-kafka-consumer: [ "Agent", "Aspic", "Location", "Raft", "Reb", "Sensor", "Source" ] topicRegexps: | [ "lsst.ATCamera" ] + debug: true cccamera: enabled: true - replicaCount: 1 database: "lsst.CCCamera" timestamp_format: "unix_ms" timestamp_field: "timestamp" @@ -181,9 +456,9 @@ telegraf-kafka-consumer: [ "Agent", "Aspic", "Cold", "Cryo", "Hardware", "Location", "Ps", "RTD", "Raft", "Reb", "Segment", "Sensor", "Source" ] topicRegexps: | [ "lsst.CCCamera" ] + debug: true mtcamera: enabled: true - replicaCount: 1 database: "lsst.MTCamera" timestamp_format: "unix_ms" timestamp_field: "timestamp" @@ -191,6 +466,8 @@ telegraf-kafka-consumer: [ "Agent", "Aspic", "Axis", "Canbus", "Cip", "Clamp", "Cold", "Controller", "Cryo", "Gateway", "Hardware", "Hip", "Hook", "Latch", "Location", "Ps", "RTD", "Raft", "Reb", "Segment", "Sensor", "Socket", "Source", "Truck" ] topicRegexps: | [ "lsst.MTCamera" ] + debug: true + kafdrop: ingress: @@ -208,6 +485,8 @@ rest-proxy: topicPrefixes: - lsst.dm - lsst.backpack + - lsst.obsenv + - lsst.cp - lsst.ATCamera - lsst.CCCamera - lsst.MTCamera diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 002c0d1bca..03b2703aa2 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -76,122 +76,115 @@ influxdb: hostname: tucson-teststand.lsst.codes telegraf-kafka-consumer: - enabled: false + enabled: true kafkaConsumers: auxtel: enabled: true + database: "efd" topicRegexps: | - [ ".*ATAOS", ".*ATDome", ".*ATDomeTrajectory", ".*ATHexapod", ".*ATPneumatics", ".*ATPtg", ".*ATMCS" ] + [ "lsst.sal.ATAOS", "lsst.sal.ATDome", "lsst.sal.ATDomeTrajectory", "lsst.sal.ATHexapod", "lsst.sal.ATPneumatics", "lsst.sal.ATPtg", "lsst.sal.ATMCS" ] + debug: true maintel: enabled: true + database: "efd" topicRegexps: | - [ ".*MTAOS", ".*MTDome", ".*MTDomeTrajectory", ".*MTPtg" ] + [ "lsst.sal.MTAOS", "lsst.sal.MTDome", "lsst.sal.MTDomeTrajectory", "lsst.sal.MTPtg" ] + debug: true mtmount: enabled: true + database: "efd" topicRegexps: | - [ ".*MTMount" ] - comcam: - enabled: true - topicRegexps: | - [ ".*CCCamera", ".*CCHeaderService", ".*CCOODS" ] + [ "lsst.sal.MTMount" ] + debug: true eas: enabled: true + database: "efd" + metric_batch_size: 100 + flush_interval: 20s topicRegexps: | - [ ".*DIMM", ".*DSM", ".*EPM", ".*ESS", ".*WeatherForecast" ] + [ "lsst.sal.DIMM", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.ESS", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] + debug: true latiss: enabled: true + database: "efd" topicRegexps: | - [ ".*ATCamera", ".*ATHeaderService", ".*ATOODS", ".*ATSpectrograph" ] + [ "lsst.sal.ATCamera", "lsst.sal.ATHeaderService", "lsst.sal.ATOODS", "lsst.sal.ATSpectrograph" ] + debug: true m1m3: enabled: true - flush_interval: "1s" - metric_batch_size: 5000 - interval: "0.1s" + database: "efd" topicRegexps: | - [ ".*MTM1M3" ] + [ "lsst.sal.MTM1M3" ] + debug: true m2: enabled: true + database: "efd" topicRegexps: | - [ ".*MTHexapod", ".*MTM2", ".*MTRotator" ] + [ "lsst.sal.MTHexapod", "lsst.sal.MTM2", "lsst.sal.MTRotator" ] + debug: true obssys: enabled: true + database: "efd" topicRegexps: | - [ ".*Scheduler", ".*Script", ".*ScriptQueue", ".*Watcher" ] + [ "lsst.sal.Scheduler", "lsst.sal.Script", "lsst.sal.ScriptQueue", "lsst.sal.Watcher" ] + debug: true ocps: enabled: true + database: "efd" topicRegexps: | - [ ".*OCPS" ] + [ "lsst.sal.OCPS" ] + debug: true calsys: enabled: true + database: "efd" + topicRegexps: | + [ "lsst.sal.CBP", "lsst.sal.Electrometer", "lsst.sal.FiberSpectrograph", "lsst.sal.LEDProjector", "lsst.sal.LinearStage", "lsst.sal.MTReflector", "lsst.sal.TunableLaser" ] + debug: true + comcam: + enabled: true + database: "efd" + topicRegexps: | + [ "lsst.sal.CCCamera", "lsst.sal.CCHeaderService", "lsst.sal.CCOODS" ] + debug: true + test: + enabled: true + database: "efd" topicRegexps: | - [ ".*ATMonochromator", ".*ATWhiteLight", ".*CBP", ".*Electrometer", ".*FiberSpectrograph", ".*LEDProjector", ".*LinearStage", ".*MTReflector", ".*TunableLaser" ] + [ "lsst.sal.Test" ] + debug: true mtaircompressor: enabled: true + database: "efd" topicRegexps: | - [ ".*MTAirCompressor" ] + [ "lsst.sal.MTAirCompressor" ] + debug: true lasertracker: enabled: true + database: "efd" topicRegexps: | - [ ".*LaserTracker" ] - test: + [ "lsst.sal.LaserTracker" ] + debug: true + genericcamera: enabled: true + database: "efd" topicRegexps: | - [ "lsst.sal.Test" ] - genericcamera: + [ "lsst.sal.GCHeaderService", "lsst.sal.GenericCamera" ] + debug: true + obsenv: enabled: true + database: "lsst.obsenv" + timestamp_format: "unix_ms" + timestamp_field: "timestamp" topicRegexps: | - [ ".*GCHeaderService", ".*GenericCamera" ] - -kafka-connect-manager: - influxdbSink: - # Based on the kafka producers configuration for the TTS - # https://github.com/lsst-ts/argocd-csc/blob/main/apps/kafka-producers/values-tucson-teststand.yaml - connectors: - auxtel: - enabled: true - topicsRegex: ".*ATAOS|.*ATDome|.*ATDomeTrajectory|.*ATHexapod|.*ATPneumatics|.*ATPtg|.*ATMCS" - maintel: - enabled: true - topicsRegex: ".*MTAOS|.*MTDome|.*MTDomeTrajectory|.*MTPtg" - mtmount: - enabled: true - topicsRegex: ".*MTMount" - comcam: - enabled: true - topicsRegex: ".*CCCamera|.*CCHeaderService|.*CCOODS" - eas: - enabled: true - topicsRegex: ".*DIMM|.*DSM|.*EPM|.*ESS|.*WeatherForecast" - latiss: - enabled: true - topicsRegex: ".*ATCamera|.*ATHeaderService|.*ATOODS|.*ATSpectrograph" - m1m3: - enabled: true - topicsRegex: ".*MTM1M3" - m2: - enabled: true - topicsRegex: ".*MTHexapod|.*MTM2|.*MTRotator" - obssys: - enabled: true - topicsRegex: ".*Scheduler|.*Script|.*ScriptQueue|.*Watcher" - ocps: - enabled: true - topicsRegex: ".*OCPS" - test: - enabled: true - topicsRegex: "lsst.sal.Test" - calsys: - enabled: true - topicsRegex: ".*ATMonochromator|.*ATWhiteLight|.*CBP|.*Electrometer|.*FiberSpectrograph|.*LEDProjector|.*LinearStage|.*MTReflector|.*TunableLaser" - mtaircompressor: - enabled: true - topicsRegex: ".*MTAirCompressor" - lasertracker: - enabled: true - topicsRegex: ".*LaserTracker" - genericcamera: - enabled: true - topicsRegex: ".*GCHeaderService|.*GenericCamera" + [ "lsst.obsenv" ] + debug: true + resources: + limits: + cpu: "2" + memory: "2Gi" + requests: + cpu: "1" + memory: "1Gi" kafdrop: cmdArgs: "--message.format=AVRO --message.keyFormat=DEFAULT --topic.deleteEnabled=false --topic.createEnabled=false" @@ -211,6 +204,7 @@ rest-proxy: - test.next-visit topicPrefixes: - test + - lsst.obsenv - lsst.dm chronograf: diff --git a/applications/sasquatch/values-usdfdev.yaml b/applications/sasquatch/values-usdfdev.yaml index 833f333ba7..a08a521d88 100644 --- a/applications/sasquatch/values-usdfdev.yaml +++ b/applications/sasquatch/values-usdfdev.yaml @@ -157,6 +157,7 @@ rest-proxy: kafka: topics: - test.next-visit + - test.next-visit-job topicPrefixes: - test - lsst.dm diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index 4a1503f939..9e02f4ea5e 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -22,7 +22,7 @@ strimzi-kafka: enabled: true source: bootstrapServer: sasquatch-summit-kafka-bootstrap.lsst.codes:9094 - topicsPattern: "registry-schemas, lsst.sal.*, lsst.dm.*, lsst.backpack.*, lsst.ATCamera.*, lsst.CCCamera.*, lsst.MTCamera.*" + topicsPattern: "registry-schemas, lsst.sal.*, lsst.dm.*, lsst.backpack.*, lsst.ATCamera.*, lsst.CCCamera.*, lsst.MTCamera.*, lsst.obsenv.*, lsst.cp.*" resources: requests: cpu: 2 @@ -139,6 +139,7 @@ telegraf-kafka-consumer: timestamp_field: "timestamp" topicRegexps: | [ "lsst.backpack" ] + debug: true # CSC connectors maintel: enabled: true @@ -146,124 +147,126 @@ telegraf-kafka-consumer: timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTAOS", "lsst.sal.MTDome", "lsst.sal.MTDomeTrajectory", "lsst.sal.MTPtg" ] - offset: "newest" + debug: true mtmount: enabled: true database: "efd" - replicaCount: 8 timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTMount" ] - offset: "newest" + debug: true comcam: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.CCCamera", "lsst.sal.CCHeaderService", "lsst.sal.CCOODS" ] - offset: "newest" + debug: true eas: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | - [ "lsst.sal.DIMM", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.ESS", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] - offset: "newest" + [ "lsst.sal.DIMM", "lsst.sal.ESS", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] + debug: true m1m3: enabled: true database: "efd" - replicaCount: 8 timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTM1M3" ] - offset: "newest" + debug: true m2: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTHexapod", "lsst.sal.MTM2", "lsst.sal.MTRotator" ] - offset: "newest" + debug: true obssys: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.Scheduler", "lsst.sal.Script", "lsst.sal.ScriptQueue", "lsst.sal.Watcher" ] - offset: "newest" + debug: true ocps: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.OCPS" ] - offset: "newest" + debug: true pmd: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.PMD" ] - offset: "newest" + debug: true calsys: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.ATMonochromator", "lsst.sal.ATWhiteLight", "lsst.sal.CBP", "lsst.sal.Electrometer", "lsst.sal.FiberSpectrograph", "lsst.sal.LinearStage", "lsst.sal.TunableLaser" ] - offset: "newest" + debug: true mtaircompressor: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTAirCompressor" ] - offset: "newest" + debug: true genericcamera: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.GCHeaderService", "lsst.sal.GenericCamera" ] - offset: "newest" + debug: true gis: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.GIS" ] - offset: "newest" + debug: true lsstcam: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.MTCamera", "lsst.sal.MTHeaderService", "lsst.sal.MTOODS" ] - offset: "newest" + debug: true auxtel: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.ATAOS", "lsst.sal.ATDome", "lsst.sal.ATDomeTrajectory", "lsst.sal.ATHexapod", "lsst.sal.ATPneumatics", "lsst.sal.ATPtg", "lsst.sal.ATMCS" ] + debug: true latiss: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.ATCamera", "lsst.sal.ATHeaderService", "lsst.sal.ATOODS", "lsst.sal.ATSpectrograph" ] + debug: true test: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.Test" ] + debug: true lasertracker: enabled: true database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.LaserTracker" ] + debug: true # CCS connectors (experimental) data is being written on separate databases for now atcamera: enabled: true @@ -274,6 +277,7 @@ telegraf-kafka-consumer: [ "Agent", "Aspic", "Location", "Raft", "Reb", "Sensor", "Source" ] topicRegexps: | [ "lsst.ATCamera" ] + debug: true cccamera: enabled: true database: "lsst.CCCamera" @@ -283,6 +287,7 @@ telegraf-kafka-consumer: [ "Agent", "Aspic", "Cold", "Cryo", "Hardware", "Location", "Ps", "RTD", "Raft", "Reb", "Segment", "Sensor", "Source" ] topicRegexps: | [ "lsst.CCCamera" ] + debug: true mtcamera: enabled: true database: "lsst.MTCamera" @@ -292,6 +297,25 @@ telegraf-kafka-consumer: [ "Agent", "Aspic", "Axis", "Canbus", "Cip", "Clamp", "Cold", "Controller", "Cryo", "Gateway", "Hardware", "Hip", "Hook", "Latch", "Location", "Ps", "RTD", "Raft", "Reb", "Segment", "Sensor", "Socket", "Source", "Truck" ] topicRegexps: | [ "lsst.MTCamera" ] + debug: true + obsenv: + enabled: true + database: "lsst.obsenv" + timestamp_format: "unix_ms" + timestamp_field: "timestamp" + topicRegexps: | + [ "lsst.obsenv" ] + debug: true + cp: + enabled: true + database: "lsst.cp" + timestamp_format: "unix" + timestamp_field: "timestamp" + topicRegexps: | + [ "lsst.cp" ] + tags: | + [ "dataset_tag", "band", "instrument", "skymap", "detector", "physical_filter", "tract", "exposure", "patch", "visit", "run", "pipeline" ] + debug: true kafdrop: ingress: diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index d7cb91e266..cc9fff35e6 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -283,3 +283,10 @@ global: # -- Base path for Vault secrets # @default -- Set by Argo CD vaultSecretsPath: "" + +app-metrics: + # -- Enable the app-metrics subchart with topic, user, and telegraf configurations + enabled: false + + # -- The apps to create configuration for. + apps: [] diff --git a/applications/squarebot/Chart.yaml b/applications/squarebot/Chart.yaml index e46b7e53fd..46f43eabff 100644 --- a/applications/squarebot/Chart.yaml +++ b/applications/squarebot/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: squarebot version: 1.0.0 -appVersion: "0.9.0" +appVersion: "0.10.0" description: Squarebot feeds events from services like Slack and GitHub into the SQuaRE Events Kafka message bus running on Roundtable. Backend apps like Templatebot and Unfurlbot can subscribe to these events and take domain-specific action. type: application home: https://squarebot.lsst.io/ diff --git a/applications/squarebot/README.md b/applications/squarebot/README.md index 2695e862fb..8828804def 100644 --- a/applications/squarebot/README.md +++ b/applications/squarebot/README.md @@ -19,11 +19,12 @@ Squarebot feeds events from services like Slack and GitHub into the SQuaRE Event | autoscaling.targetCPUUtilizationPercentage | int | `80` | | | config.logLevel | string | `"INFO"` | Logging level: "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" | | config.topics.slackAppMention | string | `"lsst.square-events.squarebot.slack.app.mention"` | Kafka topic name for the Slack `app_mention` events | -| config.topics.slackInteraction | string | `"lsst.square-events.squarebot.slack.interaction"` | Kafka topic for Slack interaction events | +| config.topics.slackBlockActions | string | `"lsst.square-events.squarebot.slack.interaction.block-actions"` | Kafka topic for Slack `block_actions` interaction events | | config.topics.slackMessageChannels | string | `"lsst.square-events.squarebot.slack.message.channels"` | Kafka topic name for the Slack `message.channels` events (public channels) | | config.topics.slackMessageGroups | string | `"lsst.square-events.squarebot.slack.message.groups"` | Kafka topic name for the Slack `message.groups` events (private channels) | | config.topics.slackMessageIm | string | `"lsst.square-events.squarebot.slack.message.im"` | Kafka topic name for the Slack `message.im` events (direct message channels) | | config.topics.slackMessageMpim | string | `"lsst.square-events.squarebot.slack.message.mpim"` | Kafka topic name for the Slack `message.mpim` events (multi-person direct messages) | +| config.topics.slackViewSubmission | string | `"lsst.square-events.squarebot.slack.interaction.view-submission"` | Kafka topic for Slack `view_submission` interaction events | | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | diff --git a/applications/squarebot/templates/configmap.yaml b/applications/squarebot/templates/configmap.yaml index b6f81143a8..916c526898 100644 --- a/applications/squarebot/templates/configmap.yaml +++ b/applications/squarebot/templates/configmap.yaml @@ -14,4 +14,5 @@ data: SQUAREBOT_TOPIC_MESSAGE_GROUPS: {{ .Values.config.topics.slackMessageGroups | quote }} SQUAREBOT_TOPIC_MESSAGE_IM: {{ .Values.config.topics.slackMessageIm | quote }} SQUAREBOT_TOPIC_MESSAGE_MPIM: {{ .Values.config.topics.slackMessageMpim | quote }} - SQUAREBOT_TOPIC_INTERACTION: {{ .Values.config.topics.slackInteraction | quote }} + SQUAREBOT_TOPIC_BLOCK_ACTIONS: {{ .Values.config.topics.slackBlockActions | quote }} + SQUAREBOT_TOPIC_VIEW_SUBMISSION: {{ .Values.config.topics.slackViewSubmission | quote }} diff --git a/applications/squarebot/values.yaml b/applications/squarebot/values.yaml index bd00c36a37..a59e16748f 100644 --- a/applications/squarebot/values.yaml +++ b/applications/squarebot/values.yaml @@ -107,5 +107,8 @@ config: # -- Kafka topic name for the Slack `message.mpim` events (multi-person direct messages) slackMessageMpim: "lsst.square-events.squarebot.slack.message.mpim" - # -- Kafka topic for Slack interaction events - slackInteraction: "lsst.square-events.squarebot.slack.interaction" + # -- Kafka topic for Slack `block_actions` interaction events + slackBlockActions: "lsst.square-events.squarebot.slack.interaction.block-actions" + + # -- Kafka topic for Slack `view_submission` interaction events + slackViewSubmission: "lsst.square-events.squarebot.slack.interaction.view-submission" diff --git a/applications/strimzi-access-operator/values-idfdev.yaml b/applications/strimzi-access-operator/values-idfdev.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/applications/strimzi/Chart.yaml b/applications/strimzi/Chart.yaml index 342761cc8b..fc8ddc5460 100644 --- a/applications/strimzi/Chart.yaml +++ b/applications/strimzi/Chart.yaml @@ -7,5 +7,5 @@ home: https://strimzi.io appVersion: "0.39.0" dependencies: - name: strimzi-kafka-operator - version: "0.42.0" + version: "0.43.0" repository: https://strimzi.io/charts/ diff --git a/applications/tap/secrets-usdfdev.yaml b/applications/tap/secrets-usdfdev.yaml new file mode 100644 index 0000000000..f6a85b9f26 --- /dev/null +++ b/applications/tap/secrets-usdfdev.yaml @@ -0,0 +1,4 @@ +qserv-password: + description: >- + Password for the QServ database server + if: cadc-tap.config.qserv.passwordEnabled diff --git a/applications/tap/secrets-usdfint.yaml b/applications/tap/secrets-usdfint.yaml new file mode 100644 index 0000000000..f6a85b9f26 --- /dev/null +++ b/applications/tap/secrets-usdfint.yaml @@ -0,0 +1,4 @@ +qserv-password: + description: >- + Password for the QServ database server + if: cadc-tap.config.qserv.passwordEnabled diff --git a/applications/tap/secrets-usdfprod.yaml b/applications/tap/secrets-usdfprod.yaml new file mode 100644 index 0000000000..f6a85b9f26 --- /dev/null +++ b/applications/tap/secrets-usdfprod.yaml @@ -0,0 +1,4 @@ +qserv-password: + description: >- + Password for the QServ database server + if: cadc-tap.config.qserv.passwordEnabled diff --git a/applications/tap/values-usdfdev.yaml b/applications/tap/values-usdfdev.yaml index e82393ad97..69109d427f 100644 --- a/applications/tap/values-usdfdev.yaml +++ b/applications/tap/values-usdfdev.yaml @@ -5,8 +5,9 @@ cadc-tap: config: qserv: - host: "172.24.49.51:4040" - jdbcParams: "?enabledTLSProtocols=TLSv1.2" + host: "sdfqserv001.sdf.slac.stanford.edu:4090" + jdbcParams: "?enabledTLSProtocols=TLSv1.3" + passwordEnabled: true gcsBucket: "rubin:rubin-qserv" gcsBucketUrl: "https://s3dfrgw.slac.stanford.edu" diff --git a/applications/tap/values-usdfint.yaml b/applications/tap/values-usdfint.yaml index b8f8ab9404..ca53594d8f 100644 --- a/applications/tap/values-usdfint.yaml +++ b/applications/tap/values-usdfint.yaml @@ -5,8 +5,9 @@ cadc-tap: config: qserv: - host: "172.24.49.51:4040" - jdbcParams: "?enabledTLSProtocols=TLSv1.2" + host: "sdfqserv001.sdf.slac.stanford.edu:4090" + jdbcParams: "?enabledTLSProtocols=TLSv1.3" + passwordEnabled: true gcsBucket: "rubin:rubin-qserv" gcsBucketUrl: "https://s3dfrgw.slac.stanford.edu" diff --git a/applications/tap/values-usdfprod.yaml b/applications/tap/values-usdfprod.yaml index b8f8ab9404..9021a9e3fa 100644 --- a/applications/tap/values-usdfprod.yaml +++ b/applications/tap/values-usdfprod.yaml @@ -5,8 +5,9 @@ cadc-tap: config: qserv: - host: "172.24.49.51:4040" - jdbcParams: "?enabledTLSProtocols=TLSv1.2" + host: "sdfqserv001.sdf.slac.stanford.edu:4040" + jdbcParams: "?enabledTLSProtocols=TLSv1.3" + passwordEnabled: true gcsBucket: "rubin:rubin-qserv" gcsBucketUrl: "https://s3dfrgw.slac.stanford.edu" diff --git a/applications/telegraf-ds/Chart.yaml b/applications/telegraf-ds/Chart.yaml index b6e5adade2..8cb53aec89 100644 --- a/applications/telegraf-ds/Chart.yaml +++ b/applications/telegraf-ds/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf-ds - version: 1.1.33 + version: 1.1.34 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | diff --git a/applications/telegraf/Chart.yaml b/applications/telegraf/Chart.yaml index 407d6c1e7b..33c097cea8 100644 --- a/applications/telegraf/Chart.yaml +++ b/applications/telegraf/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf - version: 1.8.53 + version: 1.8.54 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | diff --git a/applications/templatebot/.helmignore b/applications/templatebot/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/applications/templatebot/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/applications/templatebot/Chart.yaml b/applications/templatebot/Chart.yaml new file mode 100644 index 0000000000..c8a3e6c9b1 --- /dev/null +++ b/applications/templatebot/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v2 +appVersion: "tickets-DM-43699" +description: Create new projects +name: templatebot +sources: + - https://github.com/lsst-sqre/templatebot +type: application +version: 1.0.0 diff --git a/applications/templatebot/README.md b/applications/templatebot/README.md new file mode 100644 index 0000000000..fa76b28227 --- /dev/null +++ b/applications/templatebot/README.md @@ -0,0 +1,32 @@ +# templatebot + +Create new projects + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the templatebot deployment pod | +| config.logLevel | string | `"INFO"` | Logging level | +| config.logProfile | string | `"production"` | Logging profile (`production` for JSON, `development` for human-friendly) | +| config.pathPrefix | string | `"/templatebot"` | URL path prefix | +| config.topics.slackAppMention | string | `"lsst.square-events.squarebot.slack.app.mention"` | Kafka topic name for the Slack `app_mention` events | +| config.topics.slackBlockActions | string | `"lsst.square-events.squarebot.slack.interaction.block-actions"` | Kafka topic for Slack `block_actions` interaction events | +| config.topics.slackMessageIm | string | `"lsst.square-events.squarebot.slack.message.im"` | Kafka topic name for the Slack `message.im` events (direct message channels) | +| config.topics.slackViewSubmission | string | `"lsst.square-events.squarebot.slack.interaction.view-submission"` | Kafka topic for Slack `view_submission` interaction events | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the templatebot image | +| image.repository | string | `"ghcr.io/lsst-sqre/templatebot"` | Image to use in the templatebot deployment | +| image.tag | string | The appVersion of the chart | Tag of image to use | +| ingress.annotations | object | `{}` | Additional annotations for the ingress rule | +| nodeSelector | object | `{}` | Node selection rules for the templatebot deployment pod | +| podAnnotations | object | `{}` | Annotations for the templatebot deployment pod | +| replicaCount | int | `1` | Number of web deployment pods to start | +| resources | object | See `values.yaml` | Resource limits and requests for the templatebot deployment pod | +| tolerations | list | `[]` | Tolerations for the templatebot deployment pod | diff --git a/applications/templatebot/secrets.yaml b/applications/templatebot/secrets.yaml new file mode 100644 index 0000000000..7e672c9ecf --- /dev/null +++ b/applications/templatebot/secrets.yaml @@ -0,0 +1,26 @@ +TEMPLATEBOT_GITHUB_APP_ID: + description: >- + The ID of the GitHub App shared by all Squarebot services. + copy: + application: squarebot + key: SQUAREBOT_GITHUB_APP_ID +TEMPLATEBOT_GITHUB_APP_PRIVATE_KEY: + description: >- + The private key for the GitHub App shared by all Squarebot services. + onepassword: + encoded: true + copy: + application: squarebot + key: SQUAREBOT_GITHUB_APP_PRIVATE_KEY +TEMPLATEBOT_SLACK_APP_ID: + description: >- + The ID of the Slack App shared by all Squarebot services. + copy: + application: squarebot + key: SQUAREBOT_SLACK_APP_ID +TEMPLATEBOT_SLACK_TOKEN: + description: >- + The Slack bot user oauth token for the Slack App shared by all Squarebot services. + copy: + application: squarebot + key: SQUAREBOT_SLACK_TOKEN diff --git a/applications/templatebot/templates/_helpers.tpl b/applications/templatebot/templates/_helpers.tpl new file mode 100644 index 0000000000..22ab8421e4 --- /dev/null +++ b/applications/templatebot/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "templatebot.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "templatebot.labels" -}} +helm.sh/chart: {{ include "templatebot.chart" . }} +{{ include "templatebot.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "templatebot.selectorLabels" -}} +app.kubernetes.io/name: "templatebot" +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/templatebot/templates/configmap.yaml b/applications/templatebot/templates/configmap.yaml new file mode 100644 index 0000000000..343c47e17b --- /dev/null +++ b/applications/templatebot/templates/configmap.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "templatebot" + labels: + {{- include "templatebot.labels" . | nindent 4 }} +data: + TEMPLATEBOT_LOG_LEVEL: {{ .Values.config.logLevel | quote }} + TEMPLATEBOT_ENVIRONMENT_URL: {{ .Values.global.baseUrl | quote }} + TEMPLATEBOT_PATH_PREFIX: {{ .Values.config.pathPrefix | quote }} + TEMPLATEBOT_PROFILE: {{ .Values.config.logProfile | quote }} + TEMPLATEBOT_APP_MENTION_TOPIC: {{ .Values.config.topics.slackAppMention | quote }} + TEMPLATEBOT_MESSAGE_IM_TOPIC: {{ .Values.config.topics.slackMessageIm | quote }} + TEMPLATEBOT_BLOCK_ACTIONS_TOPIC: {{ .Values.config.topics.slackBlockActions | quote }} + TEMPLATEBOT_VIEW_SUBMISSION_TOPIC: {{ .Values.config.topics.slackViewSubmission | quote }} diff --git a/applications/templatebot/templates/deployment.yaml b/applications/templatebot/templates/deployment.yaml new file mode 100644 index 0000000000..79888b1aff --- /dev/null +++ b/applications/templatebot/templates/deployment.yaml @@ -0,0 +1,111 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "templatebot" + labels: + {{- include "templatebot.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "templatebot.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "templatebot.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + automountServiceAccountToken: false + containers: + - name: {{ .Chart.Name }} + envFrom: + - configMapRef: + name: "templatebot" + env: + # Writeable directory for concatenating certs. See "tmp" volume. + - name: "KAFKA_CERT_TEMP_DIR" + value: "/tmp/kafka_certs" + - name: "KAFKA_SECURITY_PROTOCOL" + value: "SSL" + # From KafkaAccess + - name: "KAFKA_BOOTSTRAP_SERVERS" + valueFrom: + secretKeyRef: + name: templatebot-kafka + key: "bootstrapServers" + - name: "KAFKA_CLUSTER_CA_PATH" + value: "/etc/kafkacluster/ca.crt" + - name: "KAFKA_CLIENT_CERT_PATH" + value: "/etc/kafkauser/user.crt" + - name: "KAFKA_CLIENT_KEY_PATH" + value: "/etc/kafkauser/user.key" + # From Vault secrets + - name: "TEMPLATEBOT_SLACK_APP_ID" + valueFrom: + secretKeyRef: + name: "templatebot" + key: "TEMPLATEBOT_SLACK_APP_ID" + - name: "TEMPLATEBOT_SLACK_TOKEN" + valueFrom: + secretKeyRef: + name: "templatebot" + key: "TEMPLATEBOT_SLACK_TOKEN" + volumeMounts: + - name: "kafka" + mountPath: "/etc/kafkacluster/ca.crt" + subPath: "ssl.truststore.crt" # CA cert from the Kafka cluster + - name: "kafka" + mountPath: "/etc/kafkauser/user.crt" + subPath: "ssl.keystore.crt" # User cert from the Kafka cluster signed by the clients' CA + - name: "kafka" + mountPath: "/etc/kafkauser/user.key" + subPath: "ssl.keystore.key" # private key for the consuming client + - name: "tmp" + mountPath: "/tmp/kafka_certs" + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: "http" + containerPort: 8080 + protocol: "TCP" + readinessProbe: + httpGet: + path: "/" + port: "http" + resources: + {{- toYaml .Values.resources | nindent 12 }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: true + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + volumes: + - name: "kafka" + secret: + secretName: templatebot-kafka + - name: "templatebot" + secret: + secretName: "templatebot" + - name: "tmp" + emptyDir: {} diff --git a/applications/templatebot/templates/kafkaaccess.yaml b/applications/templatebot/templates/kafkaaccess.yaml new file mode 100644 index 0000000000..8ca9095ac8 --- /dev/null +++ b/applications/templatebot/templates/kafkaaccess.yaml @@ -0,0 +1,14 @@ +apiVersion: access.strimzi.io/v1alpha1 +kind: KafkaAccess +metadata: + name: templatebot-kafka +spec: + kafka: + name: sasquatch + namespace: sasquatch + listener: tls + user: + kind: KafkaUser + apiGroup: kafka.strimzi.io + name: templatebot + namespace: sasquatch diff --git a/applications/templatebot/templates/networkpolicy.yaml b/applications/templatebot/templates/networkpolicy.yaml new file mode 100644 index 0000000000..ca1c1e87a1 --- /dev/null +++ b/applications/templatebot/templates/networkpolicy.yaml @@ -0,0 +1,21 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "templatebot" +spec: + podSelector: + matchLabels: + {{- include "templatebot.selectorLabels" . | nindent 6 }} + policyTypes: + - "Ingress" + ingress: + # Allow inbound access from pods (in any namespace) labeled + # gafaelfawr.lsst.io/ingress: true. + - from: + - namespaceSelector: {} + podSelector: + matchLabels: + gafaelfawr.lsst.io/ingress: "true" + ports: + - protocol: "TCP" + port: 8080 diff --git a/applications/templatebot/templates/service.yaml b/applications/templatebot/templates/service.yaml new file mode 100644 index 0000000000..2ad67bccf8 --- /dev/null +++ b/applications/templatebot/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: "templatebot" + labels: + {{- include "templatebot.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - port: 8080 + targetPort: "http" + protocol: "TCP" + name: "http" + selector: + {{- include "templatebot.selectorLabels" . | nindent 4 }} diff --git a/applications/templatebot/templates/vaultsecret.yaml b/applications/templatebot/templates/vaultsecret.yaml new file mode 100644 index 0000000000..defc7709fe --- /dev/null +++ b/applications/templatebot/templates/vaultsecret.yaml @@ -0,0 +1,9 @@ +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: templatebot + labels: + {{- include "templatebot.labels" . | nindent 4 }} +spec: + path: "{{ .Values.global.vaultSecretsPath }}/templatebot" + type: Opaque diff --git a/applications/templatebot/values-roundtable-dev.yaml b/applications/templatebot/values-roundtable-dev.yaml new file mode 100644 index 0000000000..91a3f6a1c6 --- /dev/null +++ b/applications/templatebot/values-roundtable-dev.yaml @@ -0,0 +1,5 @@ +image: + pullPolicy: Always + +config: + logLevel: "DEBUG" diff --git a/applications/templatebot/values-roundtable-prod.yaml b/applications/templatebot/values-roundtable-prod.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/applications/templatebot/values.yaml b/applications/templatebot/values.yaml new file mode 100644 index 0000000000..227aa85890 --- /dev/null +++ b/applications/templatebot/values.yaml @@ -0,0 +1,76 @@ +# Default values for templatebot. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Number of web deployment pods to start +replicaCount: 1 + +image: + # -- Image to use in the templatebot deployment + repository: "ghcr.io/lsst-sqre/templatebot" + + # -- Pull policy for the templatebot image + pullPolicy: "IfNotPresent" + + # -- Tag of image to use + # @default -- The appVersion of the chart + tag: null + +config: + # -- Logging level + logLevel: "INFO" + + # -- Logging profile (`production` for JSON, `development` for + # human-friendly) + logProfile: "production" + + # -- URL path prefix + pathPrefix: "/templatebot" + + topics: + # -- Kafka topic name for the Slack `app_mention` events + slackAppMention: "lsst.square-events.squarebot.slack.app.mention" + + # -- Kafka topic name for the Slack `message.im` events (direct message channels) + slackMessageIm: "lsst.square-events.squarebot.slack.message.im" + + # -- Kafka topic for Slack `block_actions` interaction events + slackBlockActions: "lsst.square-events.squarebot.slack.interaction.block-actions" + + # -- Kafka topic for Slack `view_submission` interaction events + slackViewSubmission: "lsst.square-events.squarebot.slack.interaction.view-submission" + +ingress: + # -- Additional annotations for the ingress rule + annotations: {} + +# -- Affinity rules for the templatebot deployment pod +affinity: {} + +# -- Node selection rules for the templatebot deployment pod +nodeSelector: {} + +# -- Annotations for the templatebot deployment pod +podAnnotations: {} + +# -- Resource limits and requests for the templatebot deployment pod +# @default -- See `values.yaml` +resources: {} + +# -- Tolerations for the templatebot deployment pod +tolerations: [] + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: null + + # -- Host name for ingress + # @default -- Set by Argo CD + host: null + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: null diff --git a/applications/times-square/Chart.yaml b/applications/times-square/Chart.yaml index fc984d18bb..5a48fbb4f9 100644 --- a/applications/times-square/Chart.yaml +++ b/applications/times-square/Chart.yaml @@ -8,7 +8,7 @@ sources: type: application # The default version tag of the times-square docker image -appVersion: "0.11.0" +appVersion: "0.13.0" dependencies: - name: redis diff --git a/applications/times-square/README.md b/applications/times-square/README.md index c690816d26..7385571ed7 100644 --- a/applications/times-square/README.md +++ b/applications/times-square/README.md @@ -23,8 +23,10 @@ An API service for managing and rendering parameterized Jupyter notebooks. | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a Cloud SQL PostgreSQL instance | | cloudsql.serviceAccount | string | `""` | The Google service account that has an IAM binding to the `times-square` Kubernetes service accounts and has the `cloudsql.client` role | | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | +| config.defaultExecutionTimeout | string | `"300"` | Default execution timeout for notebooks in seconds | | config.enableGitHubApp | string | `"False"` | Toggle to enable the GitHub App functionality | | config.githubAppId | string | `""` | GitHub application ID | +| config.githubCheckRunTimeout | string | `"900"` | Timeout for GitHub check runs in seconds | | config.githubOrgs | string | `"lsst,lsst-sqre,lsst-dm,lsst-ts,lsst-sitcom,lsst-pst"` | GitHub organizations that can sync repos to Times Square (comma-separated). | | config.logLevel | string | `"INFO"` | Logging level: "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" | | config.name | string | `"times-square"` | Name of the service. | diff --git a/applications/times-square/templates/configmap.yaml b/applications/times-square/templates/configmap.yaml index 739914e85b..d11584634f 100644 --- a/applications/times-square/templates/configmap.yaml +++ b/applications/times-square/templates/configmap.yaml @@ -16,3 +16,5 @@ data: TS_ENABLE_GITHUB_APP: {{ .Values.config.enableGitHubApp | quote }} TS_GITHUB_APP_ID: {{ .Values.config.githubAppId | quote }} TS_GITHUB_ORGS: {{ .Values.config.githubOrgs | quote }} + TS_CHECK_RUN_TIMEOUT: {{ .Values.config.githubCheckRunTimeout | quote }} + TS_DEFAULT_EXECUTION_TIMEOUT: {{ .Values.config.defaultExecutionTimeout | quote }} diff --git a/applications/times-square/values-idfdev.yaml b/applications/times-square/values-idfdev.yaml index de7c4d6e60..9adb89ef9b 100644 --- a/applications/times-square/values-idfdev.yaml +++ b/applications/times-square/values-idfdev.yaml @@ -7,6 +7,8 @@ config: databaseUrl: "postgresql://times-square@localhost/times-square" githubAppId: "196798" enableGitHubApp: "True" + githubCheckRunTimeout: "600" # 10 minutes + defaultExecutionTimeout: "60" # 1 minute cloudsql: enabled: true instanceConnectionName: "science-platform-dev-7696:us-central1:science-platform-dev-e9e11de2" diff --git a/applications/times-square/values.yaml b/applications/times-square/values.yaml index ac482b06df..e6cdc61f51 100644 --- a/applications/times-square/values.yaml +++ b/applications/times-square/values.yaml @@ -136,6 +136,12 @@ config: # -- GitHub organizations that can sync repos to Times Square (comma-separated). githubOrgs: "lsst,lsst-sqre,lsst-dm,lsst-ts,lsst-sitcom,lsst-pst" + # -- Timeout for GitHub check runs in seconds + githubCheckRunTimeout: "900" # 15 minutes + + # -- Default execution timeout for notebooks in seconds + defaultExecutionTimeout: "300" # 5 minutes + worker: # -- Enable liveness checks for the arq queue enableLivenessCheck: true @@ -194,10 +200,10 @@ redis: resources: limits: cpu: "1" - memory: "2Gi" + memory: "4Gi" requests: cpu: "6m" - memory: "50Mi" + memory: "1Gi" # -- Pod annotations for the Redis pod podAnnotations: {} diff --git a/applications/uws/values-summit.yaml b/applications/uws/values-summit.yaml index 41410dc51c..47341768f5 100644 --- a/applications/uws/values-summit.yaml +++ b/applications/uws/values-summit.yaml @@ -33,10 +33,10 @@ uws-api-server: subPath: "" readOnly: false - name: repo-comcam - server: comcam-archiver.cp.lsst.org + server: nfs3.cp.lsst.org claimName: repo-comcam-pvc mountPath: "/repo/LSSTComCam" - exportPath: "/repo/LSSTComCam" + exportPath: "/comcam/repo/LSSTComCam" subPath: "" readOnly: false - name: data-auxtel @@ -47,9 +47,9 @@ uws-api-server: subPath: "" readOnly: true - name: data-comcam - server: comcam-archiver.cp.lsst.org + server: nfs3.cp.lsst.org claimName: data-comcam-pvc mountPath: "/data/lsstdata/base/comcam" - exportPath: "/lsstdata/base/comcam" + exportPath: "/comcam/lsstdata/base/comcam" subPath: "" readOnly: true diff --git a/applications/vo-cutouts/Chart.yaml b/applications/vo-cutouts/Chart.yaml index 873d9050c4..4aed5b2fe5 100644 --- a/applications/vo-cutouts/Chart.yaml +++ b/applications/vo-cutouts/Chart.yaml @@ -4,7 +4,7 @@ version: 1.0.0 description: "Image cutout service complying with IVOA SODA" sources: - "https://github.com/lsst-sqre/vo-cutouts" -appVersion: 3.1.0 +appVersion: 3.2.0 dependencies: - name: redis diff --git a/applications/vo-cutouts/README.md b/applications/vo-cutouts/README.md index 3c9f245a24..ad78a23274 100644 --- a/applications/vo-cutouts/README.md +++ b/applications/vo-cutouts/README.md @@ -13,6 +13,7 @@ Image cutout service complying with IVOA SODA | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with Cloud SQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | +| cloudsql.image.schemaUpdateTagSuffix | string | `"-alpine"` | Tag suffix to use for the proxy for schema updates | | cloudsql.image.tag | string | `"1.37.0"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL is used | Instance connection name for a Cloud SQL PostgreSQL instance | | cloudsql.resources | object | See `values.yaml` | Resource limits and requests for the Cloud SQL Proxy container | @@ -26,6 +27,7 @@ Image cutout service complying with IVOA SODA | config.storageBucketUrl | string | None, must be set | URL for the GCS bucket for results (must start with `gs`) | | config.syncTimeout | string | `"1m"` | Timeout for results from a sync cutout in Safir `parse_timedelta` format | | config.timeout | int | 600 (10 minutes) | Timeout for a single cutout job in seconds | +| config.updateSchema | bool | `false` | Whether to automatically update the vo-cutouts database schema | | cutoutWorker.affinity | object | `{}` | Affinity rules for the cutout worker pod | | cutoutWorker.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for cutout workers | | cutoutWorker.image.repository | string | `"ghcr.io/lsst-sqre/vo-cutouts-worker"` | Stack image to use for cutouts | diff --git a/applications/vo-cutouts/templates/configmap.yaml b/applications/vo-cutouts/templates/configmap.yaml index b933134f6b..8a0a3a4dc4 100644 --- a/applications/vo-cutouts/templates/configmap.yaml +++ b/applications/vo-cutouts/templates/configmap.yaml @@ -2,6 +2,12 @@ apiVersion: v1 kind: ConfigMap metadata: name: vo-cutouts + {{- if .Values.config.updateSchema }} + annotations: + helm.sh/hook: "pre-install,pre-upgrade" + helm.sh/hook-delete-policy: "before-hook-creation" + helm.sh/hook-weight: "0" + {{- end }} labels: {{- include "vo-cutouts.labels" . | nindent 4 }} data: diff --git a/applications/vo-cutouts/templates/job-schea-update.yaml b/applications/vo-cutouts/templates/job-schea-update.yaml new file mode 100644 index 0000000000..b59461e887 --- /dev/null +++ b/applications/vo-cutouts/templates/job-schea-update.yaml @@ -0,0 +1,130 @@ +{{- if .Values.config.updateSchema -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: "vo-cutouts-schema-update" + annotations: + annotations: + helm.sh/hook: "pre-install,pre-upgrade" + helm.sh/hook-delete-policy: "hook-succeeded" + helm.sh/hook-weight: "1" + labels: + {{- include "vo-cutouts.labels" . | nindent 4 }} +spec: + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "vo-cutouts.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: "schema-update" + vo-cutouts-redis-client: "true" + spec: + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.cloudsql.enabled }} + serviceAccountName: "vo-cutouts" + {{- else }} + automountServiceAccountToken: false + {{- end }} + containers: + {{- if .Values.cloudsql.enabled }} + - name: "cloud-sql-proxy" + # Running the sidecar as normal causes it to keep running and thus + # the Pod never exits, the Job never finishes, and the hook blocks + # the sync. Have the main pod signal the sidecar by writing to a + # file on a shared emptyDir file system, and use a simple watcher + # loop in shell in the sidecar container to terminate the proxy when + # the main container finishes. + # + # Based on https://stackoverflow.com/questions/41679364/ + command: + - "/bin/sh" + - "-c" + args: + - | + /cloud_sql_proxy -ip_address_types=PRIVATE -log_debug_stdout=true -structured_logs=true -instances={{ required "cloudsql.instanceConnectionName must be specified" .Values.cloudsql.instanceConnectionName }}=tcp:5432 & + PID=$! + while true; do + if [[ -f "/lifecycle/main-terminated" ]]; then + kill $PID + exit 0 + fi + sleep 1 + done + image: "{{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }}{{ .Values.cloudsql.image.schemaUpdateTagSuffix }}" + imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy | quote }} + {{- with .Values.cloudsql.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 + volumeMounts: + - name: "lifecycle" + mountPath: "/lifecycle" + {{- end }} + - name: "vo-cutouts" + command: + - "/bin/sh" + - "-c" + - | + vo-cutouts update-schema + touch /lifecycle/main-terminated + env: + - name: "CUTOUT_ARQ_QUEUE_PASSWORD" + valueFrom: + secretKeyRef: + name: "vo-cutouts" + key: "redis-password" + - name: "CUTOUT_DATABASE_PASSWORD" + valueFrom: + secretKeyRef: + name: "vo-cutouts" + key: "database-password" + envFrom: + - configMapRef: + name: "vo-cutouts" + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: true + volumeMounts: + - name: "lifecycle" + mountPath: "/lifecycle" + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + restartPolicy: "Never" + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: "lifecycle" + emptyDir: {} +{{- end }} diff --git a/applications/vo-cutouts/templates/serviceaccount.yaml b/applications/vo-cutouts/templates/serviceaccount.yaml index dfa2303153..c2c43cfbbd 100644 --- a/applications/vo-cutouts/templates/serviceaccount.yaml +++ b/applications/vo-cutouts/templates/serviceaccount.yaml @@ -6,5 +6,10 @@ metadata: labels: {{- include "vo-cutouts.labels" . | nindent 4 }} annotations: + {{- if .Values.config.updateSchema }} + helm.sh/hook: "pre-install,pre-upgrade" + helm.sh/hook-delete-policy: "before-hook-creation" + helm.sh/hook-weight: "0" + {{- end }} iam.gke.io/gcp-service-account: {{ required "config.serviceAccount must be set to a valid Google service account" .Values.config.serviceAccount | quote }} {{- end }} diff --git a/applications/vo-cutouts/values-idfint.yaml b/applications/vo-cutouts/values-idfint.yaml index b7e41291fd..9239f30c7d 100644 --- a/applications/vo-cutouts/values-idfint.yaml +++ b/applications/vo-cutouts/values-idfint.yaml @@ -1,6 +1,7 @@ config: serviceAccount: "vo-cutouts@science-platform-int-dc5d.iam.gserviceaccount.com" storageBucketUrl: "gs://rubin-cutouts-int-us-central1-output/" + updateSchema: true cloudsql: enabled: true diff --git a/applications/vo-cutouts/values-idfprod.yaml b/applications/vo-cutouts/values-idfprod.yaml index 461cb96fe5..53657a6e3c 100644 --- a/applications/vo-cutouts/values-idfprod.yaml +++ b/applications/vo-cutouts/values-idfprod.yaml @@ -1,6 +1,7 @@ config: serviceAccount: "vo-cutouts@science-platform-stable-6994.iam.gserviceaccount.com" storageBucketUrl: "gs://rubin-cutouts-stable-us-central1-output/" + updateSchema: true cloudsql: enabled: true diff --git a/applications/vo-cutouts/values.yaml b/applications/vo-cutouts/values.yaml index f6852a7f6c..17e9ad9ba5 100644 --- a/applications/vo-cutouts/values.yaml +++ b/applications/vo-cutouts/values.yaml @@ -40,6 +40,9 @@ config: # @default -- 600 (10 minutes) timeout: 600 + # -- Whether to automatically update the vo-cutouts database schema + updateSchema: false + image: # -- vo-cutouts image to use for the frontend and database workers repository: "ghcr.io/lsst-sqre/vo-cutouts" @@ -93,6 +96,9 @@ cloudsql: # -- Cloud SQL Auth Proxy tag to use tag: "1.37.0" + # -- Tag suffix to use for the proxy for schema updates + schemaUpdateTagSuffix: "-alpine" + # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/charts/cadc-tap/README.md b/charts/cadc-tap/README.md index 3e13b7fc98..1da63ab1f8 100644 --- a/charts/cadc-tap/README.md +++ b/charts/cadc-tap/README.md @@ -22,7 +22,7 @@ IVOA TAP service | cloudsql.resources | object | See `values.yaml` | Resource limits and requests for the Cloud SQL Proxy container | | cloudsql.serviceAccount | string | None, must be set | The Google service account that has an IAM binding to the `cadc-tap` Kubernetes service accounts and has the `cloudsql.client` role, access | | config.backend | string | None, must be set to `pg` or `qserv` | What type of backend are we connecting to? | -| config.datalinkPayloadUrl | string | `"https://github.com/lsst/sdm_schemas/releases/download/3.0.2/datalink-snippets.zip"` | Datalink payload URL | +| config.datalinkPayloadUrl | string | `"https://github.com/lsst/sdm_schemas/releases/download/v3.2.1/datalink-snippets.zip"` | Datalink payload URL | | config.gcsBucket | string | `"async-results.lsst.codes"` | Name of GCS bucket in which to store results | | config.gcsBucketType | string | `"GCS"` | GCS bucket type (GCS or S3) | | config.gcsBucketUrl | string | `"https://tap-files.lsst.codes"` | Base URL for results stored in GCS bucket | @@ -31,7 +31,7 @@ IVOA TAP service | config.pg.host | string | None, must be set if backend is `pg` | Host to connect to | | config.pg.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP image | | config.pg.image.repository | string | `"ghcr.io/lsst-sqre/tap-postgres-service"` | TAP image to use | -| config.pg.image.tag | string | `"1.18.5"` | Tag of TAP image to use | +| config.pg.image.tag | string | `"1.18.6"` | Tag of TAP image to use | | config.pg.username | string | None, must be set if backend is `pg` | Username to connect with | | config.qserv.host | string | `"mock-db:3306"` (the mock QServ) | QServ hostname:port to connect to | | config.qserv.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP image | @@ -69,7 +69,7 @@ IVOA TAP service | tapSchema.affinity | object | `{}` | Affinity rules for the TAP schema database pod | | tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image | | tapSchema.image.repository | string | `"lsstsqre/tap-schema-mock"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | -| tapSchema.image.tag | string | `"3.0.2"` | Tag of TAP schema image | +| tapSchema.image.tag | string | `"v3.2.1"` | Tag of TAP schema image | | tapSchema.nodeSelector | object | `{}` | Node selection rules for the TAP schema database pod | | tapSchema.podAnnotations | object | `{}` | Annotations for the TAP schema database pod | | tapSchema.resources | object | See `values.yaml` | Resource limits and requests for the TAP schema database pod | diff --git a/charts/cadc-tap/values.yaml b/charts/cadc-tap/values.yaml index 317a89e879..fd8b7e20ce 100644 --- a/charts/cadc-tap/values.yaml +++ b/charts/cadc-tap/values.yaml @@ -71,7 +71,7 @@ config: pullPolicy: "IfNotPresent" # -- Tag of TAP image to use - tag: "1.18.5" + tag: "1.18.6" qserv: # -- QServ hostname:port to connect to @@ -99,7 +99,7 @@ config: tapSchemaAddress: "cadc-tap-schema-db:3306" # -- Datalink payload URL - datalinkPayloadUrl: "https://github.com/lsst/sdm_schemas/releases/download/3.0.2/datalink-snippets.zip" + datalinkPayloadUrl: "https://github.com/lsst/sdm_schemas/releases/download/v3.2.1/datalink-snippets.zip" # -- Name of GCS bucket in which to store results gcsBucket: "async-results.lsst.codes" @@ -162,7 +162,7 @@ tapSchema: pullPolicy: "IfNotPresent" # -- Tag of TAP schema image - tag: "3.0.2" + tag: "v3.2.1" # -- Resource limits and requests for the TAP schema database pod # @default -- See `values.yaml` diff --git a/charts/prompt-proto-service/README.md b/charts/prompt-proto-service/README.md index b046a4e24b..03390726d6 100644 --- a/charts/prompt-proto-service/README.md +++ b/charts/prompt-proto-service/README.md @@ -36,21 +36,22 @@ Event-driven processing of camera images | instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits' raws. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | | instrument.skymap | string | `""` | Skymap to use with the instrument | -| knative.cpuLimit | int | `1` | The maximum cpu cores. | -| knative.cpuRequest | int | `1` | The cpu cores requested. | -| knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). | -| knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). | +| knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | +| knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | +| knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | +| knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | | knative.extraTimeout | int | `10` | To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. This parameter adds extra time to that minimum (seconds). | | knative.gpu | bool | `false` | GPUs enabled. | -| knative.gpuRequest | int | `0` | The number of GPUs to request. | +| knative.gpuRequest | int | `0` | The number of GPUs to request for the full pod (see `containerConcurrency`). | | knative.idleTimeout | int | `0` | Maximum time that a container can send nothing to Knative (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | -| knative.memoryLimit | string | `"8Gi"` | The maximum memory limit. | -| knative.memoryRequest | string | `"2Gi"` | The minimum memory to request. | +| knative.memoryLimit | string | `"8Gi"` | The maximum memory limit for the full pod (see `containerConcurrency`). | +| knative.memoryRequest | string | `"2Gi"` | The minimum memory to request for the full pod (see `containerConcurrency`). | | knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, startup timeout is ignored. | | logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | | | podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | +| raw_microservice | string | `""` | The URI to a microservice that maps image metadata to a file location. If empty, Prompt Processing does not use a microservice. | | registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | | s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | s3.disableBucketValidation | int | `0` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | diff --git a/charts/prompt-proto-service/templates/prompt-proto-service.yaml b/charts/prompt-proto-service/templates/prompt-proto-service.yaml index f6572190d2..f08eb4b17e 100644 --- a/charts/prompt-proto-service/templates/prompt-proto-service.yaml +++ b/charts/prompt-proto-service/templates/prompt-proto-service.yaml @@ -13,17 +13,6 @@ spec: spec: containerConcurrency: {{ .Values.containerConcurrency }} initContainers: - - name: init-pgpass - # Make a copy of the read-only secret that's owned by lsst - # lsst account is created by main image with id 1000 - image: busybox - command: ["sh", "-c", "cp -L /app/pg-mount/.pgpass /app/pgsql/ && chown 1000:1000 /app/pgsql/.pgpass && chmod u=r,go-rwx /app/pgsql/.pgpass"] - volumeMounts: - - mountPath: /app/pg-mount - name: pgpass-mount - readOnly: true - - mountPath: /app/pgsql - name: pgpass-credentials-file - name: init-db-auth # Make a copy of the read-only secret that's owned by lsst # lsst account is created by main image with id 1000 @@ -40,6 +29,8 @@ spec: imagePullPolicy: {{ .Values.image.pullPolicy | quote }} name: user-container env: + - name: WORKER_COUNT + value: {{ .Values.containerConcurrency | toString | quote }} - name: WORKER_RESTART_FREQ value: {{ .Values.worker.restart | toString | quote }} - name: WORKER_TIMEOUT @@ -47,7 +38,7 @@ spec: - name: WORKER_GRACE_PERIOD value: {{ .Values.worker.grace_period | toString | quote }} {{- /* Knative not configured for timeouts longer than 1200 seconds, and shouldn't need to be. */ -}} - {{- $knative_timeout := minf 1200 (addf (mulf 2 (coalesce .Values.worker.timeout 600)) .Values.knative.extraTimeout) }} + {{- $knative_timeout := min 1200 (add (mul 2 (coalesce .Values.worker.timeout 600)) .Values.knative.extraTimeout) }} - name: RUBIN_INSTRUMENT value: {{ .Values.instrument.name }} - name: PREPROCESSING_PIPELINES_CONFIG @@ -70,6 +61,8 @@ spec: value: {{ .Values.apdb.config }} - name: KAFKA_CLUSTER value: {{ .Values.imageNotifications.kafkaClusterAddress }} + - name: RAW_MICROSERVICE + value: {{ .Values.raw_microservice }} - name: SASQUATCH_URL value: {{ .Values.sasquatch.endpointUrl }} {{- if and .Values.sasquatch.endpointUrl .Values.sasquatch.auth_env }} @@ -99,8 +92,6 @@ spec: - name: AWS_SHARED_CREDENTIALS_FILE value: /app/s3/credentials {{- end }} - - name: PGPASSFILE - value: /app/pgsql/.pgpass - name: LSST_DB_AUTH value: /app/lsst-credentials/db-auth.yaml - name: AP_KAFKA_PRODUCER_PASSWORD @@ -129,9 +120,6 @@ spec: volumeMounts: - mountPath: /tmp-butler name: ephemeral - - mountPath: /app/pgsql - name: pgpass-credentials-file - readOnly: true - mountPath: /app/lsst-credentials name: db-auth-credentials-file readOnly: true @@ -162,17 +150,6 @@ spec: - name: ephemeral emptyDir: sizeLimit: {{ .Values.knative.ephemeralStorageLimit }} - - name: pgpass-mount - # Temporary mount for .pgpass; cannot be read directly because it's owned by root - secret: - secretName: {{ template "prompt-proto-service.fullname" . }}-secret - items: - - key: pgpass_file - path: .pgpass - defaultMode: 0400 # Minimal permissions, as extra protection - - name: pgpass-credentials-file - emptyDir: - sizeLimit: 10Ki # Just a text file! - name: db-auth-mount # Temporary mount for db-auth.yaml; cannot be read directly because it's owned by root secret: diff --git a/charts/prompt-proto-service/values.yaml b/charts/prompt-proto-service/values.yaml index 8c4454ee27..954c50e7d1 100644 --- a/charts/prompt-proto-service/values.yaml +++ b/charts/prompt-proto-service/values.yaml @@ -75,6 +75,10 @@ s3: # -- Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. disableBucketValidation: 0 +# -- The URI to a microservice that maps image metadata to a file location. +# If empty, Prompt Processing does not use a microservice. +raw_microservice: "" + imageNotifications: # -- Hostname and port of the Kafka provider # @default -- None, must be set @@ -120,21 +124,23 @@ sasquatch: auth_env: true knative: - # -- The cpu cores requested. + # -- The cpu cores requested for the full pod (see `containerConcurrency`). cpuRequest: 1 - # -- The maximum cpu cores. + # -- The maximum cpu cores for the full pod (see `containerConcurrency`). cpuLimit: 1 # -- The storage space reserved for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageRequest: "5Gi" # -- The maximum storage space allowed for each container (mostly local Butler). + # This allocation is for the full pod (see `containerConcurrency`) ephemeralStorageLimit: "5Gi" # -- GPUs enabled. gpu: false - # -- The number of GPUs to request. + # -- The number of GPUs to request for the full pod (see `containerConcurrency`). gpuRequest: 0 - # -- The minimum memory to request. + # -- The minimum memory to request for the full pod (see `containerConcurrency`). memoryRequest: "2Gi" - # -- The maximum memory limit. + # -- The maximum memory limit for the full pod (see `containerConcurrency`). memoryLimit: "8Gi" # -- To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. # This parameter adds extra time to that minimum (seconds). diff --git a/docs/admin/troubleshooting.rst b/docs/admin/troubleshooting.rst index 5383f06074..cb50958d6d 100644 --- a/docs/admin/troubleshooting.rst +++ b/docs/admin/troubleshooting.rst @@ -81,3 +81,17 @@ Even when you want to be prompted. **Solution:** Have the user go to `https://cilogin.org/me `__ and choose "Delete ALL". This will clear their remembered selection. They can they retry whatever operation they were attempting. + +User deleted from COmanage and needs to be restored +=================================================== + +**Symptoms**: In a Phalanx environment that uses CILogon and COmanage, a user was deleted from COmanage, possibly because their identity record or authentication configuration was irrevocably broken. +The user needs to be reinstated with their previously existing files. + +**Solution**: The user should create their account again and choose the same username that they used previously. +This will assign them the same UID and GID that they had previously. +Currently, we don't delete files for deleted users, so all of their files should still be intact. + +UID and GID for users is tracked in Google Filestore and is assigned solely based on the user's username. +Any user in the environment with the same username will get the same UID and GID, and UIDs and GIDs are never reused. +Therefore, the same UID and GID can be retained by keeping the same username. diff --git a/docs/applications/gafaelfawr/add-oidc-client.rst b/docs/applications/gafaelfawr/add-oidc-client.rst new file mode 100644 index 0000000000..e981115f8d --- /dev/null +++ b/docs/applications/gafaelfawr/add-oidc-client.rst @@ -0,0 +1,102 @@ +############################# +Add new OpenID Connect client +############################# + +Gafaelfawr can also serve as an OpenID Connect server, allowing third-party applications running inside Phalanx and OpenID Connect clients outside of Phalanx environments to authenticate users in the same way that the Science Platform does. + +Each OpenID Connect client of Gafaelfawr must be pre-registered and assigned a ``client_id`` and password. +To complete an authentication, the client must authenticate with that ``client_id`` and password. +See `the Gafaelfawr documentation `__. + +This page describes how to register a new client of Gafaelfawr. +You will need the following information: + +* The Phalanx environment to which you'll be adding the new client. +* A short, human-readable name of the new client you're adding. +* The return URL to which the user will be sent after authentication. + +.. note:: + + The instructions here are specific to SQuaRE-managed Phalanx environments. + For other environments, you can update the ``oidc-server-secrets`` Gafaelfawr secret key however you maintain static secrets. + +Add secret +========== + +OpenID Connect clients are configured in the ``oidc-server-secrets`` key of the ``gafaelfawr`` secret. +The value of this key is, unfortunately, a JSON representation of all of the clients. +We currently maintain two parallel records of the clients, one in a structured 1Password secret that is not currently used, and separately in the ``gafaelfawr`` secret. +The goal is to eventually add automation to Phalanx to generate the latter from the former. + +#. Open 1Password. + Go to the 1Password vault for static secrets for the Phalanx environment where you want to add an OpenID Connect client. + +#. Create or edit an item named ``oidc-clients``. + If it doesn't already exist, create it as an item of type :menuselection:`Server`. + +#. Add a new section for the new client. + Set the section title to a short, human-readable name for the OpenID Connect client. + This name should be enough to tell someone looking at this secret what this client is used for. + +#. Add a text field to the new section. + Change the label to ``id``. + Change the contents to :samp:`{random-id}.clients.{fqdn}` where the random ID is the results of ``os.urandom(16).hex()`` in Python and the FQDN is the FQDN of the environment. + For example, ``de5dd2c1fbf648e11d50b6cf3aa72277.clients.data.lsst.cloud``. + +#. Add a password field to the new section, changing the label as ``secret``. + You can let 1Password generate a random 20-character password if you want, or generate one of equivalent entropy however you choose. + +#. Add a final text field to the new section. + Change the label to ``return_uri``. + Set the value to the return URL of the client. + This should be provided by the OpenID Connect client and will be the URL to which the user is sent after authentication. + +#. Now, you will need to copy this data into the ``gafaelfawr`` secret under the ``oidc-server-secrets`` key, creating that key if it doesn't already exist. + Unfortunately, you currently have to construct the JSON by hand. + The value of this key should be a JSON-encoded list of objects, and each object should have keys ``id``, ``secret``, and ``return_uri`` with the information above. + Be sure to include all the clients, not just the new one that you're adding. + +Share the secret with the client +================================ + +You now need to convey the ``client_id`` (the ``id`` value above) and the ``client_secret`` (the ``secret`` value above) to the OpenID Connect client. +They will need to configure their client software to use that ``client_id`` and ``client_secret`` whenever performing an OpenID Connect authentication. + +The easiest way to do this is often to create a separate 1Password secret and share it with the client. + +.. warning:: + + **DO NOT SHARE THE SECRETS CREATED ABOVE.** + The client should not have access to the ``oidc-clients`` or ``gafaelfawr`` secrets. + +#. Go to the SQuaRE vault and create a new secret. + Use a name like ``Gafaelfawr OIDC``, replacing ```` with a *short* human-readable name for the client. + Use the :menuselection:`Server` item type. + +#. Add the information above. + It's best to call the fields ``client_id``, ``client_secret``, and ``return_uri``, since those are the field names in the OpenID Connect standard and therefore what is usually used in software documentation. + Enter the same information as above. + +When sharing with someone who is managing multiple related clients, feel free to put all of the secrets in the same 1Password item in separate sections. + +Now, you can create a one-time 1Password link for this secret and share it with the user in Slack or via email. + +Configure Gafaelfawr +==================== + +If this is the first OpenID Connect client for Gafaelfawr, you will need to enable OpenID Connect server support. +Do this by setting ``config.oidcServer.enabled`` to true in the Gafaelfawr :file:`values-{environment}.yaml` file. +See `the Gafaelfawr documentation `__ for more details. + +If the purpose of this OpenID Connect client is to provide services to an IDAC or another external client that may need data rights information (see :dmtn:`253`), ensure the configuration of the Gafaelfawr OpenID Connect server is correct and has a ``dataRightsMapping`` setting. +See `the Gafaelfawr documentation `__ for more information. + +Then, whether or not you needed to make configuration changes, you will need to sync secrets for this environment. +Follow the normal process (:doc:`/admin/sync-secrets`) to do that. + +Finally, you will need to restart Gafaelfawr to pick up the new secret. +Do this by selecting :menuselection:`Restart` on the deployment in Argo CD (see :ref:`branch-deploy-restart`). + +.. note:: + + Since this requires a Gafaelfawr restart, and since you are changing a secret that contains manually-formatted JSON that is prone to syntax errors that will prevent Gafaelfawr from starting, you will normally want to do this during a maintenance window for a production environment. diff --git a/docs/applications/gafaelfawr/index.rst b/docs/applications/gafaelfawr/index.rst index 93546861eb..f921f7e1f2 100644 --- a/docs/applications/gafaelfawr/index.rst +++ b/docs/applications/gafaelfawr/index.rst @@ -8,7 +8,7 @@ Gafaelfawr provides authentication and identity management services for the Rubi It is primarily used as an NGINX ``auth_request`` handler configured via annotations on the ``Ingress`` resources of Science Platform services. In that role, it requires a user have the required access scope to use that service, rejects users who do not have that scope, and redirects users who are not authenticated to the authentication process. -Gafaelfawr supports authentication via either OpenID Connect (often through CILogon_ or GitHub). +Gafaelfawr supports authentication via either OpenID Connect (often through CILogon_) or GitHub. Gafaelfawr also provides a token management API and (currently) UI for users of the Science Platform. @@ -24,6 +24,7 @@ Guides bootstrap manage-schema recreate-token + add-oidc-client github-organizations troubleshoot values diff --git a/docs/applications/ghostwriter/index.rst b/docs/applications/ghostwriter/index.rst new file mode 100644 index 0000000000..fc1adf2cc9 --- /dev/null +++ b/docs/applications/ghostwriter/index.rst @@ -0,0 +1,16 @@ +.. px-app:: ghostwriter + +####################################### +ghostwriter — URL rewriter/personalizer +####################################### + +.. jinja:: ghostwriter + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/ghostwriter/values.md b/docs/applications/ghostwriter/values.md new file mode 100644 index 0000000000..0f3a1e3f75 --- /dev/null +++ b/docs/applications/ghostwriter/values.md @@ -0,0 +1,12 @@ +```{px-app-values} ghostwriter +``` + +# ghostwriter Helm values reference + +Helm values reference table for the {px-app}`ghostwriter` application. + +```{include} ../../../applications/ghostwriter/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/infrastructure.rst b/docs/applications/infrastructure.rst index 158629e5e5..bb614908d1 100644 --- a/docs/applications/infrastructure.rst +++ b/docs/applications/infrastructure.rst @@ -12,6 +12,7 @@ Argo CD project: ``infrastructure`` argocd/index cert-manager/index + ghostwriter/index ingress-nginx/index gafaelfawr/index mobu/index diff --git a/docs/applications/ppdb-replication/index.rst b/docs/applications/ppdb-replication/index.rst new file mode 100644 index 0000000000..ea26aae83a --- /dev/null +++ b/docs/applications/ppdb-replication/index.rst @@ -0,0 +1,19 @@ +.. px-app:: ppdb-replication + +############################################################ +ppdb-replication — Replicates data from the APDB to the PPDB +############################################################ + +The ``ppdb-replication`` application periodically replicates data from the +Alert Production DataBase (APDB) to the Prompt Products DataBase (PPDB). + +.. jinja:: ppdb-replication + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/ppdb-replication/values.md b/docs/applications/ppdb-replication/values.md new file mode 100644 index 0000000000..425e7f6fd2 --- /dev/null +++ b/docs/applications/ppdb-replication/values.md @@ -0,0 +1,12 @@ +```{px-app-values} ppdb-replication +``` + +# ppdb-replication Helm values reference + +Helm values reference table for the {px-app}`ppdb-replication` application. + +```{include} ../../../applications/ppdb-replication/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/rapid-analysis/index.rst b/docs/applications/rapid-analysis/index.rst new file mode 100644 index 0000000000..fe3d904d0f --- /dev/null +++ b/docs/applications/rapid-analysis/index.rst @@ -0,0 +1,29 @@ +.. px-app:: rapid-analysis + +################################################# +rapid-analysis — Real-time backend of the RubinTV +################################################# + +The Rapid Analysis Framework performes realtime analysis on data from these sources, rendering the outputs destined for RubinTV as PNGs, JPEGs, MP4s, and JSON files, which are put in S3 buckets at the summit and at USDF. +The RubinTV frontend then monitors these buckets and serves these files to users. + +At the summit, the real-time activities currently include: + +.. rst-class:: compact + +- AuxTel observing +- ComCam testing +- All sky camera observations +- StarTracker data taking on the TMA +- TMA testing activities + +.. jinja:: rapid-analysis + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values diff --git a/docs/applications/rapid-analysis/values.md b/docs/applications/rapid-analysis/values.md new file mode 100644 index 0000000000..157ad4ca2c --- /dev/null +++ b/docs/applications/rapid-analysis/values.md @@ -0,0 +1,12 @@ +```{px-app-values} rapid-analysis +``` + +# rapid-analysis Helm values reference + +Helm values reference table for the {px-app}`rapid-analysis` application. + +```{include} ../../../applications/rapid-analysis/README.md +--- +start-after: "## Values" +--- +``` diff --git a/docs/applications/roundtable.rst b/docs/applications/roundtable.rst index df4f559bbd..8d3ecce818 100644 --- a/docs/applications/roundtable.rst +++ b/docs/applications/roundtable.rst @@ -19,6 +19,7 @@ Argo CD project: ``roundtable`` ook/index sqrbot-sr/index squarebot/index + templatebot/index unfurlbot/index vault/index diff --git a/docs/applications/rsp.rst b/docs/applications/rsp.rst index 9c631d37af..b395276738 100644 --- a/docs/applications/rsp.rst +++ b/docs/applications/rsp.rst @@ -18,6 +18,7 @@ Argo CD project: ``rsp`` noteburst/index nublado/index portal/index + ppdb-replication/index semaphore/index siav2/index sqlproxy-cross-project/index diff --git a/docs/applications/rubin.rst b/docs/applications/rubin.rst index ec91711ff5..483f483438 100644 --- a/docs/applications/rubin.rst +++ b/docs/applications/rubin.rst @@ -18,7 +18,9 @@ Argo CD project: ``rubin`` nightreport/index obsloctap/index plot-navigator/index + ppdb-replication/index production-tools/index + rapid-analysis/index rubintv/index rubintv-dev/index schedview-snapshot/index diff --git a/docs/applications/templatebot/index.rst b/docs/applications/templatebot/index.rst new file mode 100644 index 0000000000..9b2f2ce3a4 --- /dev/null +++ b/docs/applications/templatebot/index.rst @@ -0,0 +1,16 @@ +.. px-app:: templatebot + +################################# +templatebot — Create new projects +################################# + +.. jinja:: templatebot + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/templatebot/values.md b/docs/applications/templatebot/values.md new file mode 100644 index 0000000000..ad83245bf4 --- /dev/null +++ b/docs/applications/templatebot/values.md @@ -0,0 +1,12 @@ +```{px-app-values} templatebot +``` + +# templatebot Helm values reference + +Helm values reference table for the {px-app}`templatebot` application. + +```{include} ../../../applications/templatebot/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/developers/helm-chart/define-secrets.rst b/docs/developers/helm-chart/define-secrets.rst index ac904f1f80..ad68a7f831 100644 --- a/docs/developers/helm-chart/define-secrets.rst +++ b/docs/developers/helm-chart/define-secrets.rst @@ -136,7 +136,7 @@ A typical ``VaultSecret`` Helm template for an application looks like this (repl apiVersion: ricoberger.de/v1alpha1 kind: VaultSecret metadata: - name: {{ include "myapp.fullname" . }} + name: "myapp" labels: {{- include "myapp.labels" . | nindent 4 }} spec: diff --git a/docs/environments/_summary.rst.jinja b/docs/environments/_summary.rst.jinja index 72e40a18e8..d83a2d6051 100644 --- a/docs/environments/_summary.rst.jinja +++ b/docs/environments/_summary.rst.jinja @@ -5,7 +5,7 @@ * - Root domain - `{{ env.fqdn }} `__ * - Identity provider - - {{ env.gafaelfawr.provider.value }}{% if env.gafaelfawr.provider_hostname %} ({{ env.gafaelfawr.provider_hostname }}){% endif %} + - {{ env.gafaelfawr.provider.value }}{% if env.gafaelfawr.provider_hostname %} ({{ env.gafaelfawr.provider_hostname }}){% endif %}{% if env.gafaelfawr.comanage_hostname %} (COmanage: `{{ env.gafaelfawr.comanage_hostname }} `__){% endif %} {%- if env.argocd.url %} * - Argo CD - {{ env.argocd.url }} diff --git a/docs/extras/schemas/secrets.json b/docs/extras/schemas/secrets.json index 415c78af8a..638d86b804 100644 --- a/docs/extras/schemas/secrets.json +++ b/docs/extras/schemas/secrets.json @@ -60,11 +60,7 @@ "title": "Condition" }, "onepassword": { - "allOf": [ - { - "$ref": "#/$defs/SecretOnepasswordConfig" - } - ], + "$ref": "#/$defs/SecretOnepasswordConfig", "description": "Configuration for how the secret is stored in 1Password", "title": "1Password configuration" }, diff --git a/environments/README.md b/environments/README.md index 100d733ea5..d44f6c7e2d 100644 --- a/environments/README.md +++ b/environments/README.md @@ -21,6 +21,7 @@ | applications.fastapi-bootcamp | bool | `false` | Enable the fastapi-bootcamp application | | applications.filestore-backup | bool | `false` | Enable the filestore-backup application | | applications.gafaelfawr | bool | `true` | Enable the Gafaelfawr application. This is required by Phalanx since most other applications use `GafaelfawrIngress` | +| applications.ghostwriter | bool | `false` | Enable the ghostwriter application | | applications.giftless | bool | `false` | Enable the giftless application | | applications.hips | bool | `false` | Enable the HiPS application | | applications.ingress-nginx | bool | `true` | Enable the ingress-nginx application. This is required for all environments, but is still configurable because currently USDF uses an unsupported configuration with ingress-nginx deployed in a different cluster. | @@ -42,6 +43,7 @@ | applications.plot-navigator | bool | `false` | Enable the plot-navigator application | | applications.portal | bool | `false` | Enable the portal application | | applications.postgres | bool | `false` | Enable the in-cluster PostgreSQL server. Use of this server is discouraged in favor of using infrastructure SQL, but will remain supported for use cases such as minikube test deployments. | +| applications.ppdb-replication | bool | `false` | Enable the ppdb-replication application | | applications.production-tools | bool | `false` | Enable the production-tools application | | applications.prompt-proto-service-hsc | bool | `false` | Enable the prompt-proto-service-hsc application | | applications.prompt-proto-service-hsc-gpu | bool | `false` | Enable the prompt-proto-service-hsc-gpu application | @@ -67,6 +69,7 @@ | applications.tap | bool | `false` | Enable the tap application | | applications.telegraf | bool | `false` | Enable the telegraf application | | applications.telegraf-ds | bool | `false` | Enable the telegraf-ds application | +| applications.templatebot | bool | `false` | Enable the templatebot application | | applications.times-square | bool | `false` | Enable the times-square application | | applications.unfurlbot | bool | `false` | Enable the unfurlbot application | | applications.uws | bool | `false` | Enable the uws application. This includes the dmocps control system application. | diff --git a/environments/templates/applications/infrastructure/ghostwriter.yaml b/environments/templates/applications/infrastructure/ghostwriter.yaml new file mode 100644 index 0000000000..5d993e0b88 --- /dev/null +++ b/environments/templates/applications/infrastructure/ghostwriter.yaml @@ -0,0 +1,34 @@ +{{- if (index .Values "applications" "ghostwriter") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "ghostwriter" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "ghostwriter" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "ghostwriter" + server: "https://kubernetes.default.svc" + project: "infrastructure" + source: + path: "applications/ghostwriter" + repoURL: {{ .Values.repoUrl | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} \ No newline at end of file diff --git a/environments/templates/applications/roundtable/templatebot.yaml b/environments/templates/applications/roundtable/templatebot.yaml new file mode 100644 index 0000000000..f0f34810ce --- /dev/null +++ b/environments/templates/applications/roundtable/templatebot.yaml @@ -0,0 +1,34 @@ +{{- if (index .Values "applications" "templatebot") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "templatebot" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "templatebot" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "templatebot" + server: "https://kubernetes.default.svc" + project: "roundtable" + source: + path: "applications/templatebot" + repoURL: {{ .Values.repoUrl | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} \ No newline at end of file diff --git a/environments/templates/applications/rubin/ppdb-replication.yaml b/environments/templates/applications/rubin/ppdb-replication.yaml new file mode 100644 index 0000000000..e9685feb11 --- /dev/null +++ b/environments/templates/applications/rubin/ppdb-replication.yaml @@ -0,0 +1,34 @@ +{{- if (index .Values "applications" "ppdb-replication") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "ppdb-replication" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "ppdb-replication" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "ppdb-replication" + server: "https://kubernetes.default.svc" + project: "rubin" + source: + path: "applications/ppdb-replication" + repoURL: {{ .Values.repoUrl | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} \ No newline at end of file diff --git a/environments/templates/applications/rubin/rapid-analysis.yaml b/environments/templates/applications/rubin/rapid-analysis.yaml new file mode 100644 index 0000000000..8af8557264 --- /dev/null +++ b/environments/templates/applications/rubin/rapid-analysis.yaml @@ -0,0 +1,34 @@ +{{- if (index .Values "applications" "rapid-analysis") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "rapid-analysis" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "rapid-analysis" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "rapid-analysis" + server: "https://kubernetes.default.svc" + project: "rubin" + source: + path: "applications/rapid-analysis" + repoURL: {{ .Values.repoUrl | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} diff --git a/environments/values-base.yaml b/environments/values-base.yaml index b5d2a8c5ac..e0a262f932 100644 --- a/environments/values-base.yaml +++ b/environments/values-base.yaml @@ -34,6 +34,6 @@ applications: uws: true controlSystem: - imageTag: "k0001" + imageTag: "k0002" siteTag: "base" s3EndpointUrl: "https://s3.ls.lsst.org" diff --git a/environments/values-idfdev.yaml b/environments/values-idfdev.yaml index c9b76b5e85..6283dddfbd 100644 --- a/environments/values-idfdev.yaml +++ b/environments/values-idfdev.yaml @@ -16,6 +16,7 @@ applications: butler: true datalinker: true filestore-backup: true + ghostwriter: true hips: true jira-data-proxy: true mobu: true @@ -24,11 +25,12 @@ applications: portal: true sasquatch: true semaphore: true - siav2: true + siav2: false ssotap: true squareone: true sqlproxy-cross-project: true strimzi: true + strimzi-access-operator: true tap: true telegraf: true telegraf-ds: true diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml index 27a63ea967..34696fc711 100644 --- a/environments/values-idfint.yaml +++ b/environments/values-idfint.yaml @@ -16,13 +16,14 @@ applications: butler: true datalinker: true filestore-backup: true + ghostwriter: true hips: true mobu: true nublado: true plot-navigator: true portal: true sasquatch: true - siav2: true + siav2: false ssotap: true production-tools: true sasquatch-backpack: true diff --git a/environments/values-idfprod.yaml b/environments/values-idfprod.yaml index 611286d511..0a6a26cc37 100644 --- a/environments/values-idfprod.yaml +++ b/environments/values-idfprod.yaml @@ -17,12 +17,13 @@ applications: butler: true datalinker: true filestore-backup: true + ghostwriter: true hips: true mobu: true nublado: true portal: true semaphore: true - siav2: true + siav2: false squareone: true ssotap: true tap: true diff --git a/environments/values-roe.yaml b/environments/values-roe.yaml index 8759014a8d..444f3bd295 100644 --- a/environments/values-roe.yaml +++ b/environments/values-roe.yaml @@ -11,3 +11,4 @@ applications: postgres: true squareone: true tap: true + ssotap: true diff --git a/environments/values-roundtable-dev.yaml b/environments/values-roundtable-dev.yaml index 1ff4738824..a11686b579 100644 --- a/environments/values-roundtable-dev.yaml +++ b/environments/values-roundtable-dev.yaml @@ -27,5 +27,6 @@ applications: strimzi-access-operator: true telegraf: true telegraf-ds: true + templatebot: true unfurlbot: true vault: true diff --git a/environments/values-summit.yaml b/environments/values-summit.yaml index 1077d688a2..ce63e3bd3f 100644 --- a/environments/values-summit.yaml +++ b/environments/values-summit.yaml @@ -14,6 +14,7 @@ applications: nightreport: true nublado: true portal: true + rapid-analysis: true rubintv: true rubintv-dev: true sasquatch: true diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index 0bd875b947..6554e28ab3 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -11,6 +11,7 @@ namespaceLabels: applications: argo-workflows: true + consdb: true exposurelog: true mobu: true narrativelog: true @@ -18,6 +19,8 @@ applications: nublado: true obsenv-management: true portal: true + rapid-analysis: true + rubintv: true sasquatch: true squareone: true strimzi: true diff --git a/environments/values-usdf-cm-dev.yaml b/environments/values-usdf-cm-dev.yaml index 365566c1f2..79573c97b2 100644 --- a/environments/values-usdf-cm-dev.yaml +++ b/environments/values-usdf-cm-dev.yaml @@ -8,5 +8,8 @@ applications: # This environment uses an ingress managed in a separate Kubernetes cluster, # despite that configuration not being officially supported by Phalanx. cert-manager: false - gafaelfawr: false + gafaelfawr: true ingress-nginx: false + + cm-service: true + postgres: true diff --git a/environments/values-usdf-cm.yaml b/environments/values-usdf-cm.yaml index 3eac3a6f9d..2fb30966d6 100644 --- a/environments/values-usdf-cm.yaml +++ b/environments/values-usdf-cm.yaml @@ -10,5 +10,3 @@ applications: cert-manager: false gafaelfawr: false ingress-nginx: false - - cm-service: true diff --git a/environments/values-usdfdev.yaml b/environments/values-usdfdev.yaml index af80333545..d186cba8d2 100644 --- a/environments/values-usdfdev.yaml +++ b/environments/values-usdfdev.yaml @@ -20,12 +20,14 @@ applications: livetap: true mobu: true narrativelog: true + nightreport: true noteburst: true nublado: true obsloctap: true plot-navigator: true portal: true postgres: true + ppdb-replication: true rubintv: true sasquatch: true schedview-snapshot: true diff --git a/environments/values-usdfprod-prompt-processing.yaml b/environments/values-usdfprod-prompt-processing.yaml index 7ec0cb921e..b1c1ce92d9 100644 --- a/environments/values-usdfprod-prompt-processing.yaml +++ b/environments/values-usdfprod-prompt-processing.yaml @@ -12,5 +12,5 @@ applications: prompt-proto-service-latiss: true prompt-proto-service-lsstcam: false prompt-proto-service-lsstcomcam: false - prompt-proto-service-lsstcomcamsim: true + prompt-proto-service-lsstcomcamsim: false vault-secrets-operator: false diff --git a/environments/values.yaml b/environments/values.yaml index e613ee06f5..cd11a31959 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -82,6 +82,9 @@ applications: # most other applications use `GafaelfawrIngress` gafaelfawr: true + # -- Enable the ghostwriter application + ghostwriter: false + # -- Enable the giftless application giftless: false @@ -150,6 +153,9 @@ applications: # supported for use cases such as minikube test deployments. postgres: false + # -- Enable the ppdb-replication application + ppdb-replication: false + # -- Enable the rubintv application rubintv: false @@ -225,6 +231,9 @@ applications: # -- Enable the telegraf-ds application telegraf-ds: false + # -- Enable the templatebot application + templatebot: false + # -- Enable the times-square application times-square: false diff --git a/pyproject.toml b/pyproject.toml index 3b5c56bdb4..c503a3df13 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,6 +21,18 @@ classifiers = [ "Operating System :: POSIX", ] requires-python = ">=3.11" +dependencies = [ + "bcrypt", + "click", + "cryptography", + "GitPython", + "hvac", + "jinja2", + "onepasswordconnectsdk", + "pydantic>2", + "PyYAML", + "safir>5", +] [project.scripts] phalanx = "phalanx.cli:main" @@ -41,22 +53,7 @@ build-backend = "setuptools.build_meta" [tool.black] line-length = 79 -target-version = ["py311"] -exclude = ''' -/( - \.eggs - | \.git - | \.mypy_cache - | \.ruff_cache - | \.tox - | \.venv - | _build - | build - | dist -)/ -''' -# Use single-quoted strings so TOML treats the string like a Python r-string -# Multi-line strings are implicitly treated by black as regular expressions +target-version = ["py312"] [tool.coverage.run] parallel = true diff --git a/requirements/dev.in b/requirements/dev.in index de327ec14d..00f15c1027 100644 --- a/requirements/dev.in +++ b/requirements/dev.in @@ -21,8 +21,3 @@ documenteer[guide]>1 sphinx-click sphinx-diagrams sphinx-jinja - -# Greenlet is a SQLAlchemy dependency on x86_64 but not on macOS, so we need -# to explicitly include it. Otherwise, if dependencies are rebuilt on macOS, -# dependency installation will fail on all other platforms. -greenlet diff --git a/requirements/dev.txt b/requirements/dev.txt index d1d038b509..a8d900ddd8 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -1,5 +1,5 @@ # This file was autogenerated by uv via the following command: -# uv pip compile --generate-hashes --output-file requirements/dev.txt requirements/dev.in +# uv pip compile --universal --generate-hashes --output-file requirements/dev.txt requirements/dev.in alabaster==1.0.0 \ --hash=sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e \ --hash=sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b @@ -10,6 +10,10 @@ annotated-types==0.7.0 \ # via # -c requirements/main.txt # pydantic +appnope==0.1.4 ; platform_system == 'Darwin' \ + --hash=sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee \ + --hash=sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c + # via ipykernel asttokens==2.4.1 \ --hash=sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24 \ --hash=sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0 @@ -32,13 +36,84 @@ beautifulsoup4==4.12.3 \ --hash=sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051 \ --hash=sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed # via pydata-sphinx-theme -certifi==2024.7.4 \ - --hash=sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b \ - --hash=sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90 +certifi==2024.8.30 \ + --hash=sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8 \ + --hash=sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9 # via # -c requirements/main.txt # requests # sphinx-prompt +cffi==1.17.1 ; implementation_name == 'pypy' \ + --hash=sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8 \ + --hash=sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2 \ + --hash=sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1 \ + --hash=sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15 \ + --hash=sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36 \ + --hash=sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824 \ + --hash=sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8 \ + --hash=sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36 \ + --hash=sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17 \ + --hash=sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf \ + --hash=sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc \ + --hash=sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3 \ + --hash=sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed \ + --hash=sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702 \ + --hash=sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1 \ + --hash=sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8 \ + --hash=sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903 \ + --hash=sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6 \ + --hash=sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d \ + --hash=sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b \ + --hash=sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e \ + --hash=sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be \ + --hash=sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c \ + --hash=sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683 \ + --hash=sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9 \ + --hash=sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c \ + --hash=sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8 \ + --hash=sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1 \ + --hash=sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4 \ + --hash=sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655 \ + --hash=sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67 \ + --hash=sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595 \ + --hash=sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0 \ + --hash=sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65 \ + --hash=sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41 \ + --hash=sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6 \ + --hash=sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401 \ + --hash=sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6 \ + --hash=sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3 \ + --hash=sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16 \ + --hash=sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93 \ + --hash=sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e \ + --hash=sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4 \ + --hash=sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964 \ + --hash=sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c \ + --hash=sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576 \ + --hash=sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0 \ + --hash=sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3 \ + --hash=sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662 \ + --hash=sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3 \ + --hash=sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff \ + --hash=sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5 \ + --hash=sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd \ + --hash=sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f \ + --hash=sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5 \ + --hash=sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14 \ + --hash=sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d \ + --hash=sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9 \ + --hash=sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7 \ + --hash=sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382 \ + --hash=sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a \ + --hash=sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e \ + --hash=sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a \ + --hash=sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4 \ + --hash=sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99 \ + --hash=sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87 \ + --hash=sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b + # via + # -c requirements/main.txt + # pyzmq charset-normalizer==3.3.2 \ --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \ --hash=sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087 \ @@ -141,6 +216,15 @@ click==8.1.7 \ # documenteer # jupyter-cache # sphinx-click +colorama==0.4.6 ; sys_platform == 'win32' or platform_system == 'Windows' \ + --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \ + --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 + # via + # -c requirements/main.txt + # click + # ipython + # pytest + # sphinx comm==0.2.2 \ --hash=sha256:3fd7a84065306e07bea1773df6eb8282de51ba82f77c72f9c85716ab11fe980e \ --hash=sha256:e6fb86cb70ff661ee8c9c14e7d36d6de3b4066f1441be4063df9c5009f0a64d3 @@ -270,9 +354,9 @@ docutils==0.21.2 \ # sphinx-jinja # sphinx-prompt # sphinxcontrib-bibtex -executing==2.0.1 \ - --hash=sha256:35afe2ce3affba8ee97f2d69927fa823b08b472b7b994e36a52a964b93d16147 \ - --hash=sha256:eac49ca94516ccc753f9fb5ce82603156e590b27525a8bc32cce8ae302eb61bc +executing==2.1.0 \ + --hash=sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf \ + --hash=sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab # via stack-data fastjsonschema==2.20.0 \ --hash=sha256:3d48fc5300ee96f5d116f10fe6f28d938e6008f59a6a025c2649475b87f76a23 \ @@ -294,71 +378,84 @@ graphviz==0.20.3 \ --hash=sha256:09d6bc81e6a9fa392e7ba52135a9d49f1ed62526f96499325930e87ca1b5925d \ --hash=sha256:81f848f2904515d8cd359cc611faba817598d2feaac4027b266aa3eda7b3dde5 # via diagrams -greenlet==3.0.3 \ - --hash=sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67 \ - --hash=sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6 \ - --hash=sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257 \ - --hash=sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4 \ - --hash=sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676 \ - --hash=sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61 \ - --hash=sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc \ - --hash=sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca \ - --hash=sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7 \ - --hash=sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728 \ - --hash=sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305 \ - --hash=sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6 \ - --hash=sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379 \ - --hash=sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414 \ - --hash=sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04 \ - --hash=sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a \ - --hash=sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf \ - --hash=sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491 \ - --hash=sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559 \ - --hash=sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e \ - --hash=sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274 \ - --hash=sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb \ - --hash=sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b \ - --hash=sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9 \ - --hash=sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b \ - --hash=sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be \ - --hash=sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506 \ - --hash=sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405 \ - --hash=sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113 \ - --hash=sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f \ - --hash=sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5 \ - --hash=sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230 \ - --hash=sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d \ - --hash=sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f \ - --hash=sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a \ - --hash=sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e \ - --hash=sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61 \ - --hash=sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6 \ - --hash=sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d \ - --hash=sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71 \ - --hash=sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22 \ - --hash=sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2 \ - --hash=sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3 \ - --hash=sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067 \ - --hash=sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc \ - --hash=sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881 \ - --hash=sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3 \ - --hash=sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e \ - --hash=sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac \ - --hash=sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53 \ - --hash=sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0 \ - --hash=sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b \ - --hash=sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83 \ - --hash=sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41 \ - --hash=sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c \ - --hash=sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf \ - --hash=sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da \ - --hash=sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33 - # via - # -r requirements/dev.in - # sqlalchemy -idna==3.7 \ - --hash=sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc \ - --hash=sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0 +greenlet==3.1.1 ; (python_full_version < '3.13' and platform_machine == 'AMD64') or (python_full_version < '3.13' and platform_machine == 'WIN32') or (python_full_version < '3.13' and platform_machine == 'aarch64') or (python_full_version < '3.13' and platform_machine == 'amd64') or (python_full_version < '3.13' and platform_machine == 'ppc64le') or (python_full_version < '3.13' and platform_machine == 'win32') or (python_full_version < '3.13' and platform_machine == 'x86_64') \ + --hash=sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e \ + --hash=sha256:03a088b9de532cbfe2ba2034b2b85e82df37874681e8c470d6fb2f8c04d7e4b7 \ + --hash=sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01 \ + --hash=sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1 \ + --hash=sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159 \ + --hash=sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563 \ + --hash=sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83 \ + --hash=sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9 \ + --hash=sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395 \ + --hash=sha256:1d3755bcb2e02de341c55b4fca7a745a24a9e7212ac953f6b3a48d117d7257aa \ + --hash=sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942 \ + --hash=sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1 \ + --hash=sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441 \ + --hash=sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22 \ + --hash=sha256:346bed03fe47414091be4ad44786d1bd8bef0c3fcad6ed3dee074a032ab408a9 \ + --hash=sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0 \ + --hash=sha256:37b9de5a96111fc15418819ab4c4432e4f3c2ede61e660b1e33971eba26ef9ba \ + --hash=sha256:396979749bd95f018296af156201d6211240e7a23090f50a8d5d18c370084dc3 \ + --hash=sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1 \ + --hash=sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6 \ + --hash=sha256:47da355d8687fd65240c364c90a31569a133b7b60de111c255ef5b606f2ae291 \ + --hash=sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39 \ + --hash=sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d \ + --hash=sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467 \ + --hash=sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475 \ + --hash=sha256:54558ea205654b50c438029505def3834e80f0869a70fb15b871c29b4575ddef \ + --hash=sha256:5e06afd14cbaf9e00899fae69b24a32f2196c19de08fcb9f4779dd4f004e5e7c \ + --hash=sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511 \ + --hash=sha256:63e4844797b975b9af3a3fb8f7866ff08775f5426925e1e0bbcfe7932059a12c \ + --hash=sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822 \ + --hash=sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a \ + --hash=sha256:6ef9ea3f137e5711f0dbe5f9263e8c009b7069d8a1acea822bd5e9dae0ae49c8 \ + --hash=sha256:7017b2be767b9d43cc31416aba48aab0d2309ee31b4dbf10a1d38fb7972bdf9d \ + --hash=sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01 \ + --hash=sha256:73aaad12ac0ff500f62cebed98d8789198ea0e6f233421059fa68a5aa7220145 \ + --hash=sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80 \ + --hash=sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13 \ + --hash=sha256:7939aa3ca7d2a1593596e7ac6d59391ff30281ef280d8632fa03d81f7c5f955e \ + --hash=sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b \ + --hash=sha256:85f3ff71e2e60bd4b4932a043fbbe0f499e263c628390b285cb599154a3b03b1 \ + --hash=sha256:8b8b36671f10ba80e159378df9c4f15c14098c4fd73a36b9ad715f057272fbef \ + --hash=sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc \ + --hash=sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff \ + --hash=sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120 \ + --hash=sha256:94ebba31df2aa506d7b14866fed00ac141a867e63143fe5bca82a8e503b36437 \ + --hash=sha256:95ffcf719966dd7c453f908e208e14cde192e09fde6c7186c8f1896ef778d8cd \ + --hash=sha256:98884ecf2ffb7d7fe6bd517e8eb99d31ff7855a840fa6d0d63cd07c037f6a981 \ + --hash=sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36 \ + --hash=sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a \ + --hash=sha256:a0dfc6c143b519113354e780a50381508139b07d2177cb6ad6a08278ec655798 \ + --hash=sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7 \ + --hash=sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761 \ + --hash=sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0 \ + --hash=sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e \ + --hash=sha256:b8da394b34370874b4572676f36acabac172602abf054cbc4ac910219f3340af \ + --hash=sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa \ + --hash=sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c \ + --hash=sha256:ca9d0ff5ad43e785350894d97e13633a66e2b50000e8a183a50a88d834752d42 \ + --hash=sha256:d0028e725ee18175c6e422797c407874da24381ce0690d6b9396c204c7f7276e \ + --hash=sha256:d21e10da6ec19b457b82636209cbe2331ff4306b54d06fa04b7c138ba18c8a81 \ + --hash=sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e \ + --hash=sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617 \ + --hash=sha256:db32b5348615a04b82240cc67983cb315309e88d444a288934ee6ceaebcad6cc \ + --hash=sha256:dcc62f31eae24de7f8dce72134c8651c58000d3b1868e01392baea7c32c247de \ + --hash=sha256:dfc59d69fc48664bc693842bd57acfdd490acafda1ab52c7836e3fc75c90a111 \ + --hash=sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383 \ + --hash=sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70 \ + --hash=sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6 \ + --hash=sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4 \ + --hash=sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011 \ + --hash=sha256:f1d4aeb8891338e60d1ab6127af1fe45def5259def8094b9c7e34690c8858803 \ + --hash=sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79 \ + --hash=sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f + # via sqlalchemy +idna==3.10 \ + --hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \ + --hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 # via # -c requirements/main.txt # requests @@ -367,9 +464,9 @@ imagesize==1.4.1 \ --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \ --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a # via sphinx -importlib-metadata==8.2.0 \ - --hash=sha256:11901fa0c2f97919b288679932bb64febaeacf289d18ac84dd68cb2e74213369 \ - --hash=sha256:72e8d4399996132204f9a16dcc751af254a48f8d1b20b9ff0f98d4a8f901e73d +importlib-metadata==8.5.0 \ + --hash=sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b \ + --hash=sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7 # via # jupyter-cache # myst-nb @@ -381,9 +478,9 @@ ipykernel==6.29.5 \ --hash=sha256:afdb66ba5aa354b09b91379bac28ae4afebbb30e8b39510c9690afb7a10421b5 \ --hash=sha256:f093a22c4a40f8828f8e330a9c297cb93dcab13bd9678ded6de8e5cf81c56215 # via myst-nb -ipython==8.26.0 \ - --hash=sha256:1cec0fbba8404af13facebe83d04436a7434c7400e59f47acf467c64abd0956c \ - --hash=sha256:e6b347c27bdf9c32ee9d31ae85defc525755a1869f14057e900675b9e8d6e6ff +ipython==8.27.0 \ + --hash=sha256:0b99a2dc9f15fd68692e898e5568725c6d49c527d36a9fb5960ffbdeaa82ff7e \ + --hash=sha256:f68b3cb8bde357a5d7adc9598d57e22a45dfbea19eb6b98286fa3b288c9cd55c # via # ipykernel # myst-nb @@ -415,9 +512,9 @@ jupyter-cache==1.0.0 \ --hash=sha256:594b1c4e29b488b36547e12477645f489dbdc62cc939b2408df5679f79245078 \ --hash=sha256:d0fa7d7533cd5798198d8889318269a8c1382ed3b22f622c09a9356521f48687 # via myst-nb -jupyter-client==8.6.2 \ - --hash=sha256:2bda14d55ee5ba58552a8c53ae43d215ad9868853489213f37da060ced54d8df \ - --hash=sha256:50cbc5c66fd1b8f65ecb66bc490ab73217993632809b6e505687de18e9dea39f +jupyter-client==8.6.3 \ + --hash=sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419 \ + --hash=sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f # via # ipykernel # nbclient @@ -514,42 +611,42 @@ matplotlib-inline==0.1.7 \ # via # ipykernel # ipython -mdit-py-plugins==0.4.1 \ - --hash=sha256:1020dfe4e6bfc2c79fb49ae4e3f5b297f5ccd20f010187acc52af2921e27dc6a \ - --hash=sha256:834b8ac23d1cd60cec703646ffd22ae97b7955a6d596eb1d304be1e251ae499c +mdit-py-plugins==0.4.2 \ + --hash=sha256:0c673c3f889399a33b95e88d2f0d111b4447bdfea7f237dab2d488f459835636 \ + --hash=sha256:5f2cd1fdb606ddf152d37ec30e46101a60512bc0e5fa1a7002c36647b09e26b5 # via myst-parser mdurl==0.1.2 \ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba # via markdown-it-py -mypy==1.11.1 \ - --hash=sha256:0624bdb940255d2dd24e829d99a13cfeb72e4e9031f9492148f410ed30bcab54 \ - --hash=sha256:0bc71d1fb27a428139dd78621953effe0d208aed9857cb08d002280b0422003a \ - --hash=sha256:0bd53faf56de9643336aeea1c925012837432b5faf1701ccca7fde70166ccf72 \ - --hash=sha256:11965c2f571ded6239977b14deebd3f4c3abd9a92398712d6da3a772974fad69 \ - --hash=sha256:1a81cf05975fd61aec5ae16501a091cfb9f605dc3e3c878c0da32f250b74760b \ - --hash=sha256:2684d3f693073ab89d76da8e3921883019ea8a3ec20fa5d8ecca6a2db4c54bbe \ - --hash=sha256:2c63350af88f43a66d3dfeeeb8d77af34a4f07d760b9eb3a8697f0386c7590b4 \ - --hash=sha256:45df906e8b6804ef4b666af29a87ad9f5921aad091c79cc38e12198e220beabd \ - --hash=sha256:4c956b49c5d865394d62941b109728c5c596a415e9c5b2be663dd26a1ff07bc0 \ - --hash=sha256:64f4a90e3ea07f590c5bcf9029035cf0efeae5ba8be511a8caada1a4893f5525 \ - --hash=sha256:749fd3213916f1751fff995fccf20c6195cae941dc968f3aaadf9bb4e430e5a2 \ - --hash=sha256:79c07eb282cb457473add5052b63925e5cc97dfab9812ee65a7c7ab5e3cb551c \ - --hash=sha256:7b6343d338390bb946d449677726edf60102a1c96079b4f002dedff375953fc5 \ - --hash=sha256:886c9dbecc87b9516eff294541bf7f3655722bf22bb898ee06985cd7269898de \ - --hash=sha256:a2b43895a0f8154df6519706d9bca8280cda52d3d9d1514b2d9c3e26792a0b74 \ - --hash=sha256:a32fc80b63de4b5b3e65f4be82b4cfa362a46702672aa6a0f443b4689af7008c \ - --hash=sha256:a707ec1527ffcdd1c784d0924bf5cb15cd7f22683b919668a04d2b9c34549d2e \ - --hash=sha256:a831671bad47186603872a3abc19634f3011d7f83b083762c942442d51c58d58 \ - --hash=sha256:b639dce63a0b19085213ec5fdd8cffd1d81988f47a2dec7100e93564f3e8fb3b \ - --hash=sha256:b868d3bcff720dd7217c383474008ddabaf048fad8d78ed948bb4b624870a417 \ - --hash=sha256:c1952f5ea8a5a959b05ed5f16452fddadbaae48b5d39235ab4c3fc444d5fd411 \ - --hash=sha256:d44be7551689d9d47b7abc27c71257adfdb53f03880841a5db15ddb22dc63edb \ - --hash=sha256:e1e30dc3bfa4e157e53c1d17a0dad20f89dc433393e7702b813c10e200843b03 \ - --hash=sha256:e4fe9f4e5e521b458d8feb52547f4bade7ef8c93238dfb5bbc790d9ff2d770ca \ - --hash=sha256:f39918a50f74dc5969807dcfaecafa804fa7f90c9d60506835036cc1bc891dc8 \ - --hash=sha256:f404a0b069709f18bbdb702eb3dcfe51910602995de00bd39cea3050b5772d08 \ - --hash=sha256:fca4a60e1dd9fd0193ae0067eaeeb962f2d79e0d9f0f66223a0682f26ffcc809 +mypy==1.11.2 \ + --hash=sha256:06d26c277962f3fb50e13044674aa10553981ae514288cb7d0a738f495550b36 \ + --hash=sha256:2ff93107f01968ed834f4256bc1fc4475e2fecf6c661260066a985b52741ddce \ + --hash=sha256:36383a4fcbad95f2657642a07ba22ff797de26277158f1cc7bd234821468b1b6 \ + --hash=sha256:37c7fa6121c1cdfcaac97ce3d3b5588e847aa79b580c1e922bb5d5d2902df19b \ + --hash=sha256:3a66169b92452f72117e2da3a576087025449018afc2d8e9bfe5ffab865709ca \ + --hash=sha256:3f14cd3d386ac4d05c5a39a51b84387403dadbd936e17cb35882134d4f8f0d24 \ + --hash=sha256:41ea707d036a5307ac674ea172875f40c9d55c5394f888b168033177fce47383 \ + --hash=sha256:478db5f5036817fe45adb7332d927daa62417159d49783041338921dcf646fc7 \ + --hash=sha256:4a8a53bc3ffbd161b5b2a4fff2f0f1e23a33b0168f1c0778ec70e1a3d66deb86 \ + --hash=sha256:539c570477a96a4e6fb718b8d5c3e0c0eba1f485df13f86d2970c91f0673148d \ + --hash=sha256:57555a7715c0a34421013144a33d280e73c08df70f3a18a552938587ce9274f4 \ + --hash=sha256:6e658bd2d20565ea86da7d91331b0eed6d2eee22dc031579e6297f3e12c758c8 \ + --hash=sha256:6e7184632d89d677973a14d00ae4d03214c8bc301ceefcdaf5c474866814c987 \ + --hash=sha256:75746e06d5fa1e91bfd5432448d00d34593b52e7e91a187d981d08d1f33d4385 \ + --hash=sha256:7f9993ad3e0ffdc95c2a14b66dee63729f021968bff8ad911867579c65d13a79 \ + --hash=sha256:801780c56d1cdb896eacd5619a83e427ce436d86a3bdf9112527f24a66618fef \ + --hash=sha256:801ca29f43d5acce85f8e999b1e431fb479cb02d0e11deb7d2abb56bdaf24fd6 \ + --hash=sha256:969ea3ef09617aff826885a22ece0ddef69d95852cdad2f60c8bb06bf1f71f70 \ + --hash=sha256:a976775ab2256aadc6add633d44f100a2517d2388906ec4f13231fafbb0eccca \ + --hash=sha256:af8d155170fcf87a2afb55b35dc1a0ac21df4431e7d96717621962e4b9192e70 \ + --hash=sha256:b499bc07dbdcd3de92b0a8b29fdf592c111276f6a12fe29c30f6c417dd546d12 \ + --hash=sha256:cd953f221ac1379050a8a646585a29574488974f79d8082cedef62744f0a0104 \ + --hash=sha256:d42a6dd818ffce7be66cce644f1dff482f1d97c53ca70908dff0b9ddc120b77a \ + --hash=sha256:e8960dbbbf36906c5c0b7f4fbf2f0c7ffb20f4898e6a879fcf56a41a08b0d318 \ + --hash=sha256:edb91dded4df17eae4537668b23f0ff6baf3707683734b6a818d5b9d0c0c31a1 \ + --hash=sha256:ee23de8530d99b6db0573c4ef4bd8f39a2a6f9b60655bf7a1357e585a3486f2b \ + --hash=sha256:f7821776e5c4286b6a13138cc935e2e9b6fde05e081bdebf5cdb2bb97c9df81d # via -r requirements/dev.in mypy-extensions==1.0.0 \ --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \ @@ -595,13 +692,13 @@ parso==0.8.4 \ --hash=sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18 \ --hash=sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d # via jedi -pexpect==4.9.0 \ +pexpect==4.9.0 ; sys_platform != 'emscripten' and sys_platform != 'win32' \ --hash=sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523 \ --hash=sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f # via ipython -platformdirs==4.2.2 \ - --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \ - --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3 +platformdirs==4.3.6 \ + --hash=sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907 \ + --hash=sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb # via jupyter-core pluggy==1.5.0 \ --hash=sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1 \ @@ -630,7 +727,7 @@ psutil==6.0.0 \ --hash=sha256:fc8c9510cde0146432bbdb433322861ee8c3efbf8589865c8bf8d21cb30c4d14 \ --hash=sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0 # via ipykernel -ptyprocess==0.7.0 \ +ptyprocess==0.7.0 ; sys_platform != 'emscripten' and sys_platform != 'win32' \ --hash=sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35 \ --hash=sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220 # via pexpect @@ -648,110 +745,116 @@ pybtex-docutils==1.0.3 \ --hash=sha256:3a7ebdf92b593e00e8c1c538aa9a20bca5d92d84231124715acc964d51d93c6b \ --hash=sha256:8fd290d2ae48e32fcb54d86b0efb8d573198653c7e2447d5bec5847095f430b9 # via sphinxcontrib-bibtex -pydantic==2.8.2 \ - --hash=sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a \ - --hash=sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8 +pycparser==2.22 ; implementation_name == 'pypy' \ + --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ + --hash=sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc + # via + # -c requirements/main.txt + # cffi +pydantic==2.9.2 \ + --hash=sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f \ + --hash=sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12 # via # -c requirements/main.txt # autodoc-pydantic # documenteer # pydantic-settings -pydantic-core==2.20.1 \ - --hash=sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d \ - --hash=sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f \ - --hash=sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686 \ - --hash=sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482 \ - --hash=sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006 \ - --hash=sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83 \ - --hash=sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6 \ - --hash=sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88 \ - --hash=sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86 \ - --hash=sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a \ - --hash=sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6 \ - --hash=sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a \ - --hash=sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6 \ - --hash=sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6 \ - --hash=sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43 \ - --hash=sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c \ - --hash=sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4 \ - --hash=sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e \ - --hash=sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203 \ - --hash=sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd \ - --hash=sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1 \ - --hash=sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24 \ - --hash=sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc \ - --hash=sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc \ - --hash=sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3 \ - --hash=sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598 \ - --hash=sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98 \ - --hash=sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331 \ - --hash=sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2 \ - --hash=sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a \ - --hash=sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6 \ - --hash=sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688 \ - --hash=sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91 \ - --hash=sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa \ - --hash=sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b \ - --hash=sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0 \ - --hash=sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840 \ - --hash=sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c \ - --hash=sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd \ - --hash=sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3 \ - --hash=sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231 \ - --hash=sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1 \ - --hash=sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953 \ - --hash=sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250 \ - --hash=sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a \ - --hash=sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2 \ - --hash=sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20 \ - --hash=sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434 \ - --hash=sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab \ - --hash=sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703 \ - --hash=sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a \ - --hash=sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2 \ - --hash=sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac \ - --hash=sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611 \ - --hash=sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121 \ - --hash=sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e \ - --hash=sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b \ - --hash=sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09 \ - --hash=sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906 \ - --hash=sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9 \ - --hash=sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7 \ - --hash=sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b \ - --hash=sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987 \ - --hash=sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c \ - --hash=sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b \ - --hash=sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e \ - --hash=sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237 \ - --hash=sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1 \ - --hash=sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19 \ - --hash=sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b \ - --hash=sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad \ - --hash=sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0 \ - --hash=sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94 \ - --hash=sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312 \ - --hash=sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f \ - --hash=sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669 \ - --hash=sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1 \ - --hash=sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe \ - --hash=sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99 \ - --hash=sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a \ - --hash=sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a \ - --hash=sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52 \ - --hash=sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c \ - --hash=sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad \ - --hash=sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1 \ - --hash=sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a \ - --hash=sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f \ - --hash=sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a \ - --hash=sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27 +pydantic-core==2.23.4 \ + --hash=sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36 \ + --hash=sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05 \ + --hash=sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071 \ + --hash=sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327 \ + --hash=sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c \ + --hash=sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36 \ + --hash=sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29 \ + --hash=sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744 \ + --hash=sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d \ + --hash=sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec \ + --hash=sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e \ + --hash=sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e \ + --hash=sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577 \ + --hash=sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232 \ + --hash=sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863 \ + --hash=sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6 \ + --hash=sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368 \ + --hash=sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480 \ + --hash=sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2 \ + --hash=sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2 \ + --hash=sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6 \ + --hash=sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769 \ + --hash=sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d \ + --hash=sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2 \ + --hash=sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84 \ + --hash=sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166 \ + --hash=sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271 \ + --hash=sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5 \ + --hash=sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb \ + --hash=sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13 \ + --hash=sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323 \ + --hash=sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556 \ + --hash=sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665 \ + --hash=sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef \ + --hash=sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb \ + --hash=sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119 \ + --hash=sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126 \ + --hash=sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510 \ + --hash=sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b \ + --hash=sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87 \ + --hash=sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f \ + --hash=sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc \ + --hash=sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8 \ + --hash=sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21 \ + --hash=sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f \ + --hash=sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6 \ + --hash=sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658 \ + --hash=sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b \ + --hash=sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3 \ + --hash=sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb \ + --hash=sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59 \ + --hash=sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24 \ + --hash=sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9 \ + --hash=sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3 \ + --hash=sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd \ + --hash=sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753 \ + --hash=sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55 \ + --hash=sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad \ + --hash=sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a \ + --hash=sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605 \ + --hash=sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e \ + --hash=sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b \ + --hash=sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433 \ + --hash=sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8 \ + --hash=sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07 \ + --hash=sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728 \ + --hash=sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0 \ + --hash=sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327 \ + --hash=sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555 \ + --hash=sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64 \ + --hash=sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6 \ + --hash=sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea \ + --hash=sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b \ + --hash=sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df \ + --hash=sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e \ + --hash=sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd \ + --hash=sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068 \ + --hash=sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3 \ + --hash=sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040 \ + --hash=sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12 \ + --hash=sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916 \ + --hash=sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f \ + --hash=sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f \ + --hash=sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801 \ + --hash=sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231 \ + --hash=sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5 \ + --hash=sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8 \ + --hash=sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee \ + --hash=sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607 # via # -c requirements/main.txt # pydantic -pydantic-settings==2.4.0 \ - --hash=sha256:bb6849dc067f1687574c12a639e231f3a6feeed0a12d710c1382045c5db1c315 \ - --hash=sha256:ed81c3a0f46392b4d7c0a565c05884e6e54b3456e6f0fe4d8814981172dc9a88 +pydantic-settings==2.5.2 \ + --hash=sha256:2c912e55fd5794a59bf8c832b9de832dcfdf4778d79ff79b708744eed499a907 \ + --hash=sha256:f90b139682bee4d2065273d5185d71d37ea46cfe57e1b5ae184fc6a0b2484ca0 # via autodoc-pydantic pydata-sphinx-theme==0.12.0 \ --hash=sha256:7a07c3ac1fb1cfbb5f7d1e147a9500fb120e329d610e0fa2caac4a645141bdd9 \ @@ -768,9 +871,9 @@ pygments==2.18.0 \ pylatexenc==2.10 \ --hash=sha256:3dd8fd84eb46dc30bee1e23eaab8d8fb5a7f507347b23e5f38ad9675c84f40d3 # via documenteer -pytest==8.3.2 \ - --hash=sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5 \ - --hash=sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce +pytest==8.3.3 \ + --hash=sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181 \ + --hash=sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2 # via # -r requirements/dev.in # pytest-cov @@ -793,6 +896,22 @@ python-dotenv==1.0.1 \ --hash=sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca \ --hash=sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a # via pydantic-settings +pywin32==306 ; platform_python_implementation != 'PyPy' and sys_platform == 'win32' \ + --hash=sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d \ + --hash=sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65 \ + --hash=sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e \ + --hash=sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b \ + --hash=sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4 \ + --hash=sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040 \ + --hash=sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a \ + --hash=sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36 \ + --hash=sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8 \ + --hash=sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e \ + --hash=sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802 \ + --hash=sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a \ + --hash=sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407 \ + --hash=sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0 + # via jupyter-core pyyaml==6.0.2 \ --hash=sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff \ --hash=sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48 \ @@ -855,116 +974,116 @@ pyyaml==6.0.2 \ # myst-parser # pybtex # sphinxcontrib-redoc -pyzmq==26.1.0 \ - --hash=sha256:038ae4ffb63e3991f386e7fda85a9baab7d6617fe85b74a8f9cab190d73adb2b \ - --hash=sha256:05bacc4f94af468cc82808ae3293390278d5f3375bb20fef21e2034bb9a505b6 \ - --hash=sha256:0614aed6f87d550b5cecb03d795f4ddbb1544b78d02a4bd5eecf644ec98a39f6 \ - --hash=sha256:08f74904cb066e1178c1ec706dfdb5c6c680cd7a8ed9efebeac923d84c1f13b1 \ - --hash=sha256:093a1a3cae2496233f14b57f4b485da01b4ff764582c854c0f42c6dd2be37f3d \ - --hash=sha256:0a1f6ea5b1d6cdbb8cfa0536f0d470f12b4b41ad83625012e575f0e3ecfe97f0 \ - --hash=sha256:0e6cea102ffa16b737d11932c426f1dc14b5938cf7bc12e17269559c458ac334 \ - --hash=sha256:263cf1e36862310bf5becfbc488e18d5d698941858860c5a8c079d1511b3b18e \ - --hash=sha256:28a8b2abb76042f5fd7bd720f7fea48c0fd3e82e9de0a1bf2c0de3812ce44a42 \ - --hash=sha256:2ae7c57e22ad881af78075e0cea10a4c778e67234adc65c404391b417a4dda83 \ - --hash=sha256:2cd0f4d314f4a2518e8970b6f299ae18cff7c44d4a1fc06fc713f791c3a9e3ea \ - --hash=sha256:2fa76ebcebe555cce90f16246edc3ad83ab65bb7b3d4ce408cf6bc67740c4f88 \ - --hash=sha256:314d11564c00b77f6224d12eb3ddebe926c301e86b648a1835c5b28176c83eab \ - --hash=sha256:347e84fc88cc4cb646597f6d3a7ea0998f887ee8dc31c08587e9c3fd7b5ccef3 \ - --hash=sha256:359c533bedc62c56415a1f5fcfd8279bc93453afdb0803307375ecf81c962402 \ - --hash=sha256:393daac1bcf81b2a23e696b7b638eedc965e9e3d2112961a072b6cd8179ad2eb \ - --hash=sha256:3b3b8e36fd4c32c0825b4461372949ecd1585d326802b1321f8b6dc1d7e9318c \ - --hash=sha256:3c397b1b450f749a7e974d74c06d69bd22dd362142f370ef2bd32a684d6b480c \ - --hash=sha256:3d3146b1c3dcc8a1539e7cc094700b2be1e605a76f7c8f0979b6d3bde5ad4072 \ - --hash=sha256:3ee647d84b83509b7271457bb428cc347037f437ead4b0b6e43b5eba35fec0aa \ - --hash=sha256:416ac51cabd54f587995c2b05421324700b22e98d3d0aa2cfaec985524d16f1d \ - --hash=sha256:451e16ae8bea3d95649317b463c9f95cd9022641ec884e3d63fc67841ae86dfe \ - --hash=sha256:45cb1a70eb00405ce3893041099655265fabcd9c4e1e50c330026e82257892c1 \ - --hash=sha256:46d6800b45015f96b9d92ece229d92f2aef137d82906577d55fadeb9cf5fcb71 \ - --hash=sha256:471312a7375571857a089342beccc1a63584315188560c7c0da7e0a23afd8a5c \ - --hash=sha256:471880c4c14e5a056a96cd224f5e71211997d40b4bf5e9fdded55dafab1f98f2 \ - --hash=sha256:5384c527a9a004445c5074f1e20db83086c8ff1682a626676229aafd9cf9f7d1 \ - --hash=sha256:57bb2acba798dc3740e913ffadd56b1fcef96f111e66f09e2a8db3050f1f12c8 \ - --hash=sha256:58c33dc0e185dd97a9ac0288b3188d1be12b756eda67490e6ed6a75cf9491d79 \ - --hash=sha256:59d0acd2976e1064f1b398a00e2c3e77ed0a157529779e23087d4c2fb8aaa416 \ - --hash=sha256:5a6ed52f0b9bf8dcc64cc82cce0607a3dfed1dbb7e8c6f282adfccc7be9781de \ - --hash=sha256:5bc2431167adc50ba42ea3e5e5f5cd70d93e18ab7b2f95e724dd8e1bd2c38120 \ - --hash=sha256:5cca7b4adb86d7470e0fc96037771981d740f0b4cb99776d5cb59cd0e6684a73 \ - --hash=sha256:61dfa5ee9d7df297c859ac82b1226d8fefaf9c5113dc25c2c00ecad6feeeb04f \ - --hash=sha256:63c1d3a65acb2f9c92dce03c4e1758cc552f1ae5c78d79a44e3bb88d2fa71f3a \ - --hash=sha256:65c6e03cc0222eaf6aad57ff4ecc0a070451e23232bb48db4322cc45602cede0 \ - --hash=sha256:67976d12ebfd61a3bc7d77b71a9589b4d61d0422282596cf58c62c3866916544 \ - --hash=sha256:68a0a1d83d33d8367ddddb3e6bb4afbb0f92bd1dac2c72cd5e5ddc86bdafd3eb \ - --hash=sha256:6c5aeea71f018ebd3b9115c7cb13863dd850e98ca6b9258509de1246461a7e7f \ - --hash=sha256:754c99a9840839375ee251b38ac5964c0f369306eddb56804a073b6efdc0cd88 \ - --hash=sha256:75a95c2358fcfdef3374cb8baf57f1064d73246d55e41683aaffb6cfe6862917 \ - --hash=sha256:7688653574392d2eaeef75ddcd0b2de5b232d8730af29af56c5adf1df9ef8d6f \ - --hash=sha256:77ce6a332c7e362cb59b63f5edf730e83590d0ab4e59c2aa5bd79419a42e3449 \ - --hash=sha256:7907419d150b19962138ecec81a17d4892ea440c184949dc29b358bc730caf69 \ - --hash=sha256:79e45a4096ec8388cdeb04a9fa5e9371583bcb826964d55b8b66cbffe7b33c86 \ - --hash=sha256:7bcbfbab4e1895d58ab7da1b5ce9a327764f0366911ba5b95406c9104bceacb0 \ - --hash=sha256:80b0c9942430d731c786545da6be96d824a41a51742e3e374fedd9018ea43106 \ - --hash=sha256:8b88641384e84a258b740801cd4dbc45c75f148ee674bec3149999adda4a8598 \ - --hash=sha256:8d4dac7d97f15c653a5fedcafa82626bd6cee1450ccdaf84ffed7ea14f2b07a4 \ - --hash=sha256:8d906d43e1592be4b25a587b7d96527cb67277542a5611e8ea9e996182fae410 \ - --hash=sha256:8efb782f5a6c450589dbab4cb0f66f3a9026286333fe8f3a084399149af52f29 \ - --hash=sha256:906e532c814e1d579138177a00ae835cd6becbf104d45ed9093a3aaf658f6a6a \ - --hash=sha256:90d4feb2e83dfe9ace6374a847e98ee9d1246ebadcc0cb765482e272c34e5820 \ - --hash=sha256:911c43a4117915203c4cc8755e0f888e16c4676a82f61caee2f21b0c00e5b894 \ - --hash=sha256:91d1a20bdaf3b25f3173ff44e54b1cfbc05f94c9e8133314eb2962a89e05d6e3 \ - --hash=sha256:94c4262626424683feea0f3c34951d39d49d354722db2745c42aa6bb50ecd93b \ - --hash=sha256:96d7c1d35ee4a495df56c50c83df7af1c9688cce2e9e0edffdbf50889c167595 \ - --hash=sha256:9869fa984c8670c8ab899a719eb7b516860a29bc26300a84d24d8c1b71eae3ec \ - --hash=sha256:98c03bd7f3339ff47de7ea9ac94a2b34580a8d4df69b50128bb6669e1191a895 \ - --hash=sha256:995301f6740a421afc863a713fe62c0aaf564708d4aa057dfdf0f0f56525294b \ - --hash=sha256:998444debc8816b5d8d15f966e42751032d0f4c55300c48cc337f2b3e4f17d03 \ - --hash=sha256:9a6847c92d9851b59b9f33f968c68e9e441f9a0f8fc972c5580c5cd7cbc6ee24 \ - --hash=sha256:9bdfcb74b469b592972ed881bad57d22e2c0acc89f5e8c146782d0d90fb9f4bf \ - --hash=sha256:9f136a6e964830230912f75b5a116a21fe8e34128dcfd82285aa0ef07cb2c7bd \ - --hash=sha256:a0f0ab9df66eb34d58205913f4540e2ad17a175b05d81b0b7197bc57d000e829 \ - --hash=sha256:a4b7a989c8f5a72ab1b2bbfa58105578753ae77b71ba33e7383a31ff75a504c4 \ - --hash=sha256:a7b8aab50e5a288c9724d260feae25eda69582be84e97c012c80e1a5e7e03fb2 \ - --hash=sha256:ad875277844cfaeca7fe299ddf8c8d8bfe271c3dc1caf14d454faa5cdbf2fa7a \ - --hash=sha256:add52c78a12196bc0fda2de087ba6c876ea677cbda2e3eba63546b26e8bf177b \ - --hash=sha256:b10163e586cc609f5f85c9b233195554d77b1e9a0801388907441aaeb22841c5 \ - --hash=sha256:b24079a14c9596846bf7516fe75d1e2188d4a528364494859106a33d8b48be38 \ - --hash=sha256:b281b5ff5fcc9dcbfe941ac5c7fcd4b6c065adad12d850f95c9d6f23c2652384 \ - --hash=sha256:b3bb34bebaa1b78e562931a1687ff663d298013f78f972a534f36c523311a84d \ - --hash=sha256:b45e6445ac95ecb7d728604bae6538f40ccf4449b132b5428c09918523abc96d \ - --hash=sha256:ba0a31d00e8616149a5ab440d058ec2da621e05d744914774c4dde6837e1f545 \ - --hash=sha256:baba2fd199b098c5544ef2536b2499d2e2155392973ad32687024bd8572a7d1c \ - --hash=sha256:bd13f0231f4788db619347b971ca5f319c5b7ebee151afc7c14632068c6261d3 \ - --hash=sha256:bd3f6329340cef1c7ba9611bd038f2d523cea79f09f9c8f6b0553caba59ec562 \ - --hash=sha256:bdeb2c61611293f64ac1073f4bf6723b67d291905308a7de9bb2ca87464e3273 \ - --hash=sha256:bef24d3e4ae2c985034439f449e3f9e06bf579974ce0e53d8a507a1577d5b2ab \ - --hash=sha256:c0665d85535192098420428c779361b8823d3d7ec4848c6af3abb93bc5c915bf \ - --hash=sha256:c5668dac86a869349828db5fc928ee3f58d450dce2c85607067d581f745e4fb1 \ - --hash=sha256:c9b9305004d7e4e6a824f4f19b6d8f32b3578aad6f19fc1122aaf320cbe3dc83 \ - --hash=sha256:ccb42ca0a4a46232d716779421bbebbcad23c08d37c980f02cc3a6bd115ad277 \ - --hash=sha256:ce6f2b66799971cbae5d6547acefa7231458289e0ad481d0be0740535da38d8b \ - --hash=sha256:d36b8fffe8b248a1b961c86fbdfa0129dfce878731d169ede7fa2631447331be \ - --hash=sha256:d3dd5523ed258ad58fed7e364c92a9360d1af8a9371e0822bd0146bdf017ef4c \ - --hash=sha256:d416f2088ac8f12daacffbc2e8918ef4d6be8568e9d7155c83b7cebed49d2322 \ - --hash=sha256:d4fafc2eb5d83f4647331267808c7e0c5722c25a729a614dc2b90479cafa78bd \ - --hash=sha256:d5c8b17f6e8f29138678834cf8518049e740385eb2dbf736e8f07fc6587ec682 \ - --hash=sha256:d9270fbf038bf34ffca4855bcda6e082e2c7f906b9eb8d9a8ce82691166060f7 \ - --hash=sha256:dcc37d9d708784726fafc9c5e1232de655a009dbf97946f117aefa38d5985a0f \ - --hash=sha256:ddbb2b386128d8eca92bd9ca74e80f73fe263bcca7aa419f5b4cbc1661e19741 \ - --hash=sha256:e1e5d0a25aea8b691a00d6b54b28ac514c8cc0d8646d05f7ca6cb64b97358250 \ - --hash=sha256:e5c88b2f13bcf55fee78ea83567b9fe079ba1a4bef8b35c376043440040f7edb \ - --hash=sha256:e7eca8b89e56fb8c6c26dd3e09bd41b24789022acf1cf13358e96f1cafd8cae3 \ - --hash=sha256:e8746ce968be22a8a1801bf4a23e565f9687088580c3ed07af5846580dd97f76 \ - --hash=sha256:ec7248673ffc7104b54e4957cee38b2f3075a13442348c8d651777bf41aa45ee \ - --hash=sha256:ecb6c88d7946166d783a635efc89f9a1ff11c33d680a20df9657b6902a1d133b \ - --hash=sha256:ef3b048822dca6d231d8a8ba21069844ae38f5d83889b9b690bf17d2acc7d099 \ - --hash=sha256:f133d05aaf623519f45e16ab77526e1e70d4e1308e084c2fb4cedb1a0c764bbb \ - --hash=sha256:f3292d384537b9918010769b82ab3e79fca8b23d74f56fc69a679106a3e2c2cf \ - --hash=sha256:f774841bb0e8588505002962c02da420bcfb4c5056e87a139c6e45e745c0e2e2 \ - --hash=sha256:f9499c70c19ff0fbe1007043acb5ad15c1dec7d8e84ab429bca8c87138e8f85c \ - --hash=sha256:f99de52b8fbdb2a8f5301ae5fc0f9e6b3ba30d1d5fc0421956967edcc6914242 \ - --hash=sha256:fa25a620eed2a419acc2cf10135b995f8f0ce78ad00534d729aa761e4adcef8a \ - --hash=sha256:fbf558551cf415586e91160d69ca6416f3fce0b86175b64e4293644a7416b81b \ - --hash=sha256:fc82269d24860cfa859b676d18850cbb8e312dcd7eada09e7d5b007e2f3d9eb1 \ - --hash=sha256:ff832cce719edd11266ca32bc74a626b814fff236824aa1aeaad399b69fe6eae +pyzmq==26.2.0 \ + --hash=sha256:007137c9ac9ad5ea21e6ad97d3489af654381324d5d3ba614c323f60dab8fae6 \ + --hash=sha256:034da5fc55d9f8da09015d368f519478a52675e558c989bfcb5cf6d4e16a7d2a \ + --hash=sha256:05590cdbc6b902101d0e65d6a4780af14dc22914cc6ab995d99b85af45362cc9 \ + --hash=sha256:070672c258581c8e4f640b5159297580a9974b026043bd4ab0470be9ed324f1f \ + --hash=sha256:0aca98bc423eb7d153214b2df397c6421ba6373d3397b26c057af3c904452e37 \ + --hash=sha256:0bed0e799e6120b9c32756203fb9dfe8ca2fb8467fed830c34c877e25638c3fc \ + --hash=sha256:0d987a3ae5a71c6226b203cfd298720e0086c7fe7c74f35fa8edddfbd6597eed \ + --hash=sha256:0eaa83fc4c1e271c24eaf8fb083cbccef8fde77ec8cd45f3c35a9a123e6da097 \ + --hash=sha256:160c7e0a5eb178011e72892f99f918c04a131f36056d10d9c1afb223fc952c2d \ + --hash=sha256:17bf5a931c7f6618023cdacc7081f3f266aecb68ca692adac015c383a134ca52 \ + --hash=sha256:17c412bad2eb9468e876f556eb4ee910e62d721d2c7a53c7fa31e643d35352e6 \ + --hash=sha256:18c8dc3b7468d8b4bdf60ce9d7141897da103c7a4690157b32b60acb45e333e6 \ + --hash=sha256:1a534f43bc738181aa7cbbaf48e3eca62c76453a40a746ab95d4b27b1111a7d2 \ + --hash=sha256:1c17211bc037c7d88e85ed8b7d8f7e52db6dc8eca5590d162717c654550f7282 \ + --hash=sha256:1f3496d76b89d9429a656293744ceca4d2ac2a10ae59b84c1da9b5165f429ad3 \ + --hash=sha256:1fcc03fa4997c447dce58264e93b5aa2d57714fbe0f06c07b7785ae131512732 \ + --hash=sha256:226af7dcb51fdb0109f0016449b357e182ea0ceb6b47dfb5999d569e5db161d5 \ + --hash=sha256:23f4aad749d13698f3f7b64aad34f5fc02d6f20f05999eebc96b89b01262fb18 \ + --hash=sha256:25bf2374a2a8433633c65ccb9553350d5e17e60c8eb4de4d92cc6bd60f01d306 \ + --hash=sha256:28ad5233e9c3b52d76196c696e362508959741e1a005fb8fa03b51aea156088f \ + --hash=sha256:28c812d9757fe8acecc910c9ac9dafd2ce968c00f9e619db09e9f8f54c3a68a3 \ + --hash=sha256:29c6a4635eef69d68a00321e12a7d2559fe2dfccfa8efae3ffb8e91cd0b36a8b \ + --hash=sha256:29c7947c594e105cb9e6c466bace8532dc1ca02d498684128b339799f5248277 \ + --hash=sha256:2a50625acdc7801bc6f74698c5c583a491c61d73c6b7ea4dee3901bb99adb27a \ + --hash=sha256:2ae90ff9dad33a1cfe947d2c40cb9cb5e600d759ac4f0fd22616ce6540f72797 \ + --hash=sha256:2c4a71d5d6e7b28a47a394c0471b7e77a0661e2d651e7ae91e0cab0a587859ca \ + --hash=sha256:2ea4ad4e6a12e454de05f2949d4beddb52460f3de7c8b9d5c46fbb7d7222e02c \ + --hash=sha256:2eb7735ee73ca1b0d71e0e67c3739c689067f055c764f73aac4cc8ecf958ee3f \ + --hash=sha256:31507f7b47cc1ead1f6e86927f8ebb196a0bab043f6345ce070f412a59bf87b5 \ + --hash=sha256:35cffef589bcdc587d06f9149f8d5e9e8859920a071df5a2671de2213bef592a \ + --hash=sha256:367b4f689786fca726ef7a6c5ba606958b145b9340a5e4808132cc65759abd44 \ + --hash=sha256:39887ac397ff35b7b775db7201095fc6310a35fdbae85bac4523f7eb3b840e20 \ + --hash=sha256:3a495b30fc91db2db25120df5847d9833af237546fd59170701acd816ccc01c4 \ + --hash=sha256:3b55a4229ce5da9497dd0452b914556ae58e96a4381bb6f59f1305dfd7e53fc8 \ + --hash=sha256:402b190912935d3db15b03e8f7485812db350d271b284ded2b80d2e5704be780 \ + --hash=sha256:43a47408ac52647dfabbc66a25b05b6a61700b5165807e3fbd40063fcaf46386 \ + --hash=sha256:4661c88db4a9e0f958c8abc2b97472e23061f0bc737f6f6179d7a27024e1faa5 \ + --hash=sha256:46a446c212e58456b23af260f3d9fb785054f3e3653dbf7279d8f2b5546b21c2 \ + --hash=sha256:470d4a4f6d48fb34e92d768b4e8a5cc3780db0d69107abf1cd7ff734b9766eb0 \ + --hash=sha256:49d34ab71db5a9c292a7644ce74190b1dd5a3475612eefb1f8be1d6961441971 \ + --hash=sha256:4d29ab8592b6ad12ebbf92ac2ed2bedcfd1cec192d8e559e2e099f648570e19b \ + --hash=sha256:4d80b1dd99c1942f74ed608ddb38b181b87476c6a966a88a950c7dee118fdf50 \ + --hash=sha256:4da04c48873a6abdd71811c5e163bd656ee1b957971db7f35140a2d573f6949c \ + --hash=sha256:4f78c88905461a9203eac9faac157a2a0dbba84a0fd09fd29315db27be40af9f \ + --hash=sha256:4ff9dc6bc1664bb9eec25cd17506ef6672d506115095411e237d571e92a58231 \ + --hash=sha256:5506f06d7dc6ecf1efacb4a013b1f05071bb24b76350832c96449f4a2d95091c \ + --hash=sha256:55cf66647e49d4621a7e20c8d13511ef1fe1efbbccf670811864452487007e08 \ + --hash=sha256:5a509df7d0a83a4b178d0f937ef14286659225ef4e8812e05580776c70e155d5 \ + --hash=sha256:5c2b3bfd4b9689919db068ac6c9911f3fcb231c39f7dd30e3138be94896d18e6 \ + --hash=sha256:6835dd60355593de10350394242b5757fbbd88b25287314316f266e24c61d073 \ + --hash=sha256:689c5d781014956a4a6de61d74ba97b23547e431e9e7d64f27d4922ba96e9d6e \ + --hash=sha256:6a96179a24b14fa6428cbfc08641c779a53f8fcec43644030328f44034c7f1f4 \ + --hash=sha256:6ace4f71f1900a548f48407fc9be59c6ba9d9aaf658c2eea6cf2779e72f9f317 \ + --hash=sha256:6b274e0762c33c7471f1a7471d1a2085b1a35eba5cdc48d2ae319f28b6fc4de3 \ + --hash=sha256:706e794564bec25819d21a41c31d4df2d48e1cc4b061e8d345d7fb4dd3e94072 \ + --hash=sha256:70fc7fcf0410d16ebdda9b26cbd8bf8d803d220a7f3522e060a69a9c87bf7bad \ + --hash=sha256:7133d0a1677aec369d67dd78520d3fa96dd7f3dcec99d66c1762870e5ea1a50a \ + --hash=sha256:7445be39143a8aa4faec43b076e06944b8f9d0701b669df4af200531b21e40bb \ + --hash=sha256:76589c020680778f06b7e0b193f4b6dd66d470234a16e1df90329f5e14a171cd \ + --hash=sha256:76589f2cd6b77b5bdea4fca5992dc1c23389d68b18ccc26a53680ba2dc80ff2f \ + --hash=sha256:77eb0968da535cba0470a5165468b2cac7772cfb569977cff92e240f57e31bef \ + --hash=sha256:794a4562dcb374f7dbbfb3f51d28fb40123b5a2abadee7b4091f93054909add5 \ + --hash=sha256:7ad1bc8d1b7a18497dda9600b12dc193c577beb391beae5cd2349184db40f187 \ + --hash=sha256:7f98f6dfa8b8ccaf39163ce872bddacca38f6a67289116c8937a02e30bbe9711 \ + --hash=sha256:8423c1877d72c041f2c263b1ec6e34360448decfb323fa8b94e85883043ef988 \ + --hash=sha256:8685fa9c25ff00f550c1fec650430c4b71e4e48e8d852f7ddcf2e48308038640 \ + --hash=sha256:878206a45202247781472a2d99df12a176fef806ca175799e1c6ad263510d57c \ + --hash=sha256:89289a5ee32ef6c439086184529ae060c741334b8970a6855ec0b6ad3ff28764 \ + --hash=sha256:8ab5cad923cc95c87bffee098a27856c859bd5d0af31bd346035aa816b081fe1 \ + --hash=sha256:8b435f2753621cd36e7c1762156815e21c985c72b19135dac43a7f4f31d28dd1 \ + --hash=sha256:8be4700cd8bb02cc454f630dcdf7cfa99de96788b80c51b60fe2fe1dac480289 \ + --hash=sha256:8c997098cc65e3208eca09303630e84d42718620e83b733d0fd69543a9cab9cb \ + --hash=sha256:8ea039387c10202ce304af74def5021e9adc6297067f3441d348d2b633e8166a \ + --hash=sha256:8f7e66c7113c684c2b3f1c83cdd3376103ee0ce4c49ff80a648643e57fb22218 \ + --hash=sha256:90412f2db8c02a3864cbfc67db0e3dcdbda336acf1c469526d3e869394fe001c \ + --hash=sha256:92a78853d7280bffb93df0a4a6a2498cba10ee793cc8076ef797ef2f74d107cf \ + --hash=sha256:989d842dc06dc59feea09e58c74ca3e1678c812a4a8a2a419046d711031f69c7 \ + --hash=sha256:9cb3a6460cdea8fe8194a76de8895707e61ded10ad0be97188cc8463ffa7e3a8 \ + --hash=sha256:9dd8cd1aeb00775f527ec60022004d030ddc51d783d056e3e23e74e623e33726 \ + --hash=sha256:9ed69074a610fad1c2fda66180e7b2edd4d31c53f2d1872bc2d1211563904cd9 \ + --hash=sha256:9edda2df81daa129b25a39b86cb57dfdfe16f7ec15b42b19bfac503360d27a93 \ + --hash=sha256:a2224fa4a4c2ee872886ed00a571f5e967c85e078e8e8c2530a2fb01b3309b88 \ + --hash=sha256:a4f96f0d88accc3dbe4a9025f785ba830f968e21e3e2c6321ccdfc9aef755115 \ + --hash=sha256:aedd5dd8692635813368e558a05266b995d3d020b23e49581ddd5bbe197a8ab6 \ + --hash=sha256:aee22939bb6075e7afededabad1a56a905da0b3c4e3e0c45e75810ebe3a52672 \ + --hash=sha256:b1d464cb8d72bfc1a3adc53305a63a8e0cac6bc8c5a07e8ca190ab8d3faa43c2 \ + --hash=sha256:b8f86dd868d41bea9a5f873ee13bf5551c94cf6bc51baebc6f85075971fe6eea \ + --hash=sha256:bc6bee759a6bddea5db78d7dcd609397449cb2d2d6587f48f3ca613b19410cfc \ + --hash=sha256:bea2acdd8ea4275e1278350ced63da0b166421928276c7c8e3f9729d7402a57b \ + --hash=sha256:bfa832bfa540e5b5c27dcf5de5d82ebc431b82c453a43d141afb1e5d2de025fa \ + --hash=sha256:c0e6091b157d48cbe37bd67233318dbb53e1e6327d6fc3bb284afd585d141003 \ + --hash=sha256:c3789bd5768ab5618ebf09cef6ec2b35fed88709b104351748a63045f0ff9797 \ + --hash=sha256:c530e1eecd036ecc83c3407f77bb86feb79916d4a33d11394b8234f3bd35b940 \ + --hash=sha256:c811cfcd6a9bf680236c40c6f617187515269ab2912f3d7e8c0174898e2519db \ + --hash=sha256:c92d73464b886931308ccc45b2744e5968cbaade0b1d6aeb40d8ab537765f5bc \ + --hash=sha256:cccba051221b916a4f5e538997c45d7d136a5646442b1231b916d0164067ea27 \ + --hash=sha256:cdeabcff45d1c219636ee2e54d852262e5c2e085d6cb476d938aee8d921356b3 \ + --hash=sha256:ced65e5a985398827cc9276b93ef6dfabe0273c23de8c7931339d7e141c2818e \ + --hash=sha256:d049df610ac811dcffdc147153b414147428567fbbc8be43bb8885f04db39d98 \ + --hash=sha256:dacd995031a01d16eec825bf30802fceb2c3791ef24bcce48fa98ce40918c27b \ + --hash=sha256:ddf33d97d2f52d89f6e6e7ae66ee35a4d9ca6f36eda89c24591b0c40205a3629 \ + --hash=sha256:ded0fc7d90fe93ae0b18059930086c51e640cdd3baebdc783a695c77f123dcd9 \ + --hash=sha256:e3e0210287329272539eea617830a6a28161fbbd8a3271bf4150ae3e58c5d0e6 \ + --hash=sha256:e6fa2e3e683f34aea77de8112f6483803c96a44fd726d7358b9888ae5bb394ec \ + --hash=sha256:ea0eb6af8a17fa272f7b98d7bebfab7836a0d62738e16ba380f440fceca2d951 \ + --hash=sha256:ea7f69de383cb47522c9c208aec6dd17697db7875a4674c4af3f8cfdac0bdeae \ + --hash=sha256:eac5174677da084abf378739dbf4ad245661635f1600edd1221f150b165343f4 \ + --hash=sha256:fc4f7a173a5609631bb0c42c23d12c49df3966f89f496a51d3eb0ec81f4519d6 \ + --hash=sha256:fdb5b3e311d4d4b0eb8b3e8b4d1b0a512713ad7e6a68791d0923d1aec433d919 # via # ipykernel # jupyter-client @@ -1089,10 +1208,12 @@ rpds-py==0.20.0 \ # via # jsonschema # referencing -setuptools==72.2.0 \ - --hash=sha256:80aacbf633704e9c8bfa1d99fa5dd4dc59573efcf9e4042c13d3bcef91ac2ef9 \ - --hash=sha256:f11dd94b7bae3a156a95ec151f24e4637fb4fa19c878e4d191bfb8b2d82728c4 - # via documenteer +setuptools==75.1.0 \ + --hash=sha256:35ab7fd3bcd95e6b7fd704e4a1539513edad446c097797f2985e0e4b960772f2 \ + --hash=sha256:d59a21b17a275fb872a9c3dae73963160ae079f1049ed956880cd7c09b120538 + # via + # documenteer + # sphinxcontrib-bibtex six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 @@ -1138,13 +1259,13 @@ sphinx==8.0.2 \ # sphinxcontrib-youtube # sphinxext-opengraph # sphinxext-rediraffe -sphinx-autodoc-typehints==2.2.3 \ - --hash=sha256:b7058e8c5831e5598afca1a78fda0695d3291388d954464a6e480c36198680c0 \ - --hash=sha256:fde3d888949bd0a91207cf1e54afda58121dbb4bf1f183d0cc78a0826654c974 +sphinx-autodoc-typehints==2.4.4 \ + --hash=sha256:940de2951fd584d147e46772579fdc904f945c5f1ee1a78c614646abfbbef18b \ + --hash=sha256:e743512da58b67a06579a1462798a6907664ab77460758a43234adeac350afbf # via documenteer -sphinx-automodapi==0.17.0 \ - --hash=sha256:4d029cb79eef29413e94ab01bb0177ebd2d5ba86e9789b73575afe9c06ae1501 \ - --hash=sha256:7ccdadad57add4aa9149d9f2bb5cf28c8f8b590280b4735b1156ea8355c423a1 +sphinx-automodapi==0.18.0 \ + --hash=sha256:022860385590768f52d4f6e19abb83b2574772d2721fb4050ecdb6e593a1a440 \ + --hash=sha256:7bf9d9a2cb67a5389c51071cfd86674ca3892ca5d5943f95de4553d6f35dddae # via documenteer sphinx-click==6.0.0 \ --hash=sha256:1e0a3c83bcb7c55497751b19d07ebe56b5d7b85eb76dd399cf9061b497adc317 \ @@ -1176,9 +1297,9 @@ sphinxcontrib-applehelp==2.0.0 \ --hash=sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1 \ --hash=sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5 # via sphinx -sphinxcontrib-bibtex==2.6.2 \ - --hash=sha256:10d45ebbb19207c5665396c9446f8012a79b8a538cb729f895b5910ab2d0b2da \ - --hash=sha256:f487af694336f28bfb7d6a17070953a7d264bec43000a2379724274f5f8d70ae +sphinxcontrib-bibtex==2.6.3 \ + --hash=sha256:7c790347ef1cb0edf30de55fc324d9782d085e89c52c2b8faafa082e08e23946 \ + --hash=sha256:ff016b738fcc867df0f75c29e139b3b2158d26a2c802db27963cb128be3b75fb # via documenteer sphinxcontrib-devhelp==2.0.0 \ --hash=sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad \ @@ -1223,56 +1344,56 @@ sphinxext-rediraffe==0.2.7 \ --hash=sha256:651dcbfae5ffda9ffd534dfb8025f36120e5efb6ea1a33f5420023862b9f725d \ --hash=sha256:9e430a52d4403847f4ffb3a8dd6dfc34a9fe43525305131f52ed899743a5fd8c # via documenteer -sqlalchemy==2.0.32 \ - --hash=sha256:01438ebcdc566d58c93af0171c74ec28efe6a29184b773e378a385e6215389da \ - --hash=sha256:0c1c9b673d21477cec17ab10bc4decb1322843ba35b481585facd88203754fc5 \ - --hash=sha256:0c9045ecc2e4db59bfc97b20516dfdf8e41d910ac6fb667ebd3a79ea54084619 \ - --hash=sha256:0d322cc9c9b2154ba7e82f7bf25ecc7c36fbe2d82e2933b3642fc095a52cfc78 \ - --hash=sha256:0ef18a84e5116340e38eca3e7f9eeaaef62738891422e7c2a0b80feab165905f \ - --hash=sha256:1467940318e4a860afd546ef61fefb98a14d935cd6817ed07a228c7f7c62f389 \ - --hash=sha256:14e09e083a5796d513918a66f3d6aedbc131e39e80875afe81d98a03312889e6 \ - --hash=sha256:167e7497035c303ae50651b351c28dc22a40bb98fbdb8468cdc971821b1ae533 \ - --hash=sha256:19d98f4f58b13900d8dec4ed09dd09ef292208ee44cc9c2fe01c1f0a2fe440e9 \ - --hash=sha256:21b053be28a8a414f2ddd401f1be8361e41032d2ef5884b2f31d31cb723e559f \ - --hash=sha256:251f0d1108aab8ea7b9aadbd07fb47fb8e3a5838dde34aa95a3349876b5a1f1d \ - --hash=sha256:295ff8689544f7ee7e819529633d058bd458c1fd7f7e3eebd0f9268ebc56c2a0 \ - --hash=sha256:2b6be53e4fde0065524f1a0a7929b10e9280987b320716c1509478b712a7688c \ - --hash=sha256:306fe44e754a91cd9d600a6b070c1f2fadbb4a1a257b8781ccf33c7067fd3e4d \ - --hash=sha256:31983018b74908ebc6c996a16ad3690301a23befb643093fcfe85efd292e384d \ - --hash=sha256:328429aecaba2aee3d71e11f2477c14eec5990fb6d0e884107935f7fb6001632 \ - --hash=sha256:3bd1cae7519283ff525e64645ebd7a3e0283f3c038f461ecc1c7b040a0c932a1 \ - --hash=sha256:3cd33c61513cb1b7371fd40cf221256456d26a56284e7d19d1f0b9f1eb7dd7e8 \ - --hash=sha256:3eb6a97a1d39976f360b10ff208c73afb6a4de86dd2a6212ddf65c4a6a2347d5 \ - --hash=sha256:4363ed245a6231f2e2957cccdda3c776265a75851f4753c60f3004b90e69bfeb \ - --hash=sha256:4488120becf9b71b3ac718f4138269a6be99a42fe023ec457896ba4f80749525 \ - --hash=sha256:49496b68cd190a147118af585173ee624114dfb2e0297558c460ad7495f9dfe2 \ - --hash=sha256:4979dc80fbbc9d2ef569e71e0896990bc94df2b9fdbd878290bd129b65ab579c \ - --hash=sha256:52fec964fba2ef46476312a03ec8c425956b05c20220a1a03703537824b5e8e1 \ - --hash=sha256:5954463675cb15db8d4b521f3566a017c8789222b8316b1e6934c811018ee08b \ - --hash=sha256:62e23d0ac103bcf1c5555b6c88c114089587bc64d048fef5bbdb58dfd26f96da \ - --hash=sha256:6bab3db192a0c35e3c9d1560eb8332463e29e5507dbd822e29a0a3c48c0a8d92 \ - --hash=sha256:6c742be912f57586ac43af38b3848f7688863a403dfb220193a882ea60e1ec3a \ - --hash=sha256:723a40ee2cc7ea653645bd4cf024326dea2076673fc9d3d33f20f6c81db83e1d \ - --hash=sha256:78c03d0f8a5ab4f3034c0e8482cfcc415a3ec6193491cfa1c643ed707d476f16 \ - --hash=sha256:7d6ba0497c1d066dd004e0f02a92426ca2df20fac08728d03f67f6960271feec \ - --hash=sha256:7dd8583df2f98dea28b5cd53a1beac963f4f9d087888d75f22fcc93a07cf8d84 \ - --hash=sha256:85a01b5599e790e76ac3fe3aa2f26e1feba56270023d6afd5550ed63c68552b3 \ - --hash=sha256:8a37e4d265033c897892279e8adf505c8b6b4075f2b40d77afb31f7185cd6ecd \ - --hash=sha256:8bd63d051f4f313b102a2af1cbc8b80f061bf78f3d5bd0843ff70b5859e27924 \ - --hash=sha256:916a798f62f410c0b80b63683c8061f5ebe237b0f4ad778739304253353bc1cb \ - --hash=sha256:9365a3da32dabd3e69e06b972b1ffb0c89668994c7e8e75ce21d3e5e69ddef28 \ - --hash=sha256:99db65e6f3ab42e06c318f15c98f59a436f1c78179e6a6f40f529c8cc7100b22 \ - --hash=sha256:aaf04784797dcdf4c0aa952c8d234fa01974c4729db55c45732520ce12dd95b4 \ - --hash=sha256:acd9b73c5c15f0ec5ce18128b1fe9157ddd0044abc373e6ecd5ba376a7e5d961 \ - --hash=sha256:ada0102afff4890f651ed91120c1120065663506b760da4e7823913ebd3258be \ - --hash=sha256:b178e875a7a25b5938b53b006598ee7645172fccafe1c291a706e93f48499ff5 \ - --hash=sha256:b27dfb676ac02529fb6e343b3a482303f16e6bc3a4d868b73935b8792edb52d0 \ - --hash=sha256:b8afd5b26570bf41c35c0121801479958b4446751a3971fb9a480c1afd85558e \ - --hash=sha256:bf2360a5e0f7bd75fa80431bf8ebcfb920c9f885e7956c7efde89031695cafb8 \ - --hash=sha256:c1b88cc8b02b6a5f0efb0345a03672d4c897dc7d92585176f88c67346f565ea8 \ - --hash=sha256:c41a2b9ca80ee555decc605bd3c4520cc6fef9abde8fd66b1cf65126a6922d65 \ - --hash=sha256:c750987fc876813f27b60d619b987b057eb4896b81117f73bb8d9918c14f1cad \ - --hash=sha256:e567a8793a692451f706b363ccf3c45e056b67d90ead58c3bc9471af5d212202 +sqlalchemy==2.0.35 \ + --hash=sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9 \ + --hash=sha256:032d979ce77a6c2432653322ba4cbeabf5a6837f704d16fa38b5a05d8e21fa00 \ + --hash=sha256:0375a141e1c0878103eb3d719eb6d5aa444b490c96f3fedab8471c7f6ffe70ee \ + --hash=sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6 \ + --hash=sha256:05c3f58cf91683102f2f0265c0db3bd3892e9eedabe059720492dbaa4f922da1 \ + --hash=sha256:0630774b0977804fba4b6bbea6852ab56c14965a2b0c7fc7282c5f7d90a1ae72 \ + --hash=sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf \ + --hash=sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8 \ + --hash=sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b \ + --hash=sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc \ + --hash=sha256:2a275a806f73e849e1c309ac11108ea1a14cd7058577aba962cd7190e27c9e3c \ + --hash=sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1 \ + --hash=sha256:2e795c2f7d7249b75bb5f479b432a51b59041580d20599d4e112b5f2046437a3 \ + --hash=sha256:3655af10ebcc0f1e4e06c5900bb33e080d6a1fa4228f502121f28a3b1753cde5 \ + --hash=sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90 \ + --hash=sha256:4c31943b61ed8fdd63dfd12ccc919f2bf95eefca133767db6fbbd15da62078ec \ + --hash=sha256:4fdcd72a789c1c31ed242fd8c1bcd9ea186a98ee8e5408a50e610edfef980d71 \ + --hash=sha256:627dee0c280eea91aed87b20a1f849e9ae2fe719d52cbf847c0e0ea34464b3f7 \ + --hash=sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b \ + --hash=sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468 \ + --hash=sha256:69683e02e8a9de37f17985905a5eca18ad651bf592314b4d3d799029797d0eb3 \ + --hash=sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e \ + --hash=sha256:732e026240cdd1c1b2e3ac515c7a23820430ed94292ce33806a95869c46bd139 \ + --hash=sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff \ + --hash=sha256:890da8cd1941fa3dab28c5bac3b9da8502e7e366f895b3b8e500896f12f94d11 \ + --hash=sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01 \ + --hash=sha256:8a6219108a15fc6d24de499d0d515c7235c617b2540d97116b663dade1a54d62 \ + --hash=sha256:8cdf1a0dbe5ced887a9b127da4ffd7354e9c1a3b9bb330dce84df6b70ccb3a8d \ + --hash=sha256:8d625eddf7efeba2abfd9c014a22c0f6b3796e0ffb48f5d5ab106568ef01ff5a \ + --hash=sha256:93a71c8601e823236ac0e5d087e4f397874a421017b3318fd92c0b14acf2b6db \ + --hash=sha256:9509c4123491d0e63fb5e16199e09f8e262066e58903e84615c301dde8fa2e87 \ + --hash=sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e \ + --hash=sha256:a62dd5d7cc8626a3634208df458c5fe4f21200d96a74d122c83bc2015b333bc1 \ + --hash=sha256:ada603db10bb865bbe591939de854faf2c60f43c9b763e90f653224138f910d9 \ + --hash=sha256:aee110e4ef3c528f3abbc3c2018c121e708938adeeff9006428dd7c8555e9b3f \ + --hash=sha256:b76d63495b0508ab9fc23f8152bac63205d2a704cd009a2b0722f4c8e0cba8e0 \ + --hash=sha256:c0d8326269dbf944b9201911b0d9f3dc524d64779a07518199a58384c3d37a44 \ + --hash=sha256:c41411e192f8d3ea39ea70e0fae48762cd11a2244e03751a98bd3c0ca9a4e936 \ + --hash=sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8 \ + --hash=sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea \ + --hash=sha256:cc32b2990fc34380ec2f6195f33a76b6cdaa9eecf09f0c9404b74fc120aef36f \ + --hash=sha256:ccae5de2a0140d8be6838c331604f91d6fafd0735dbdcee1ac78fc8fbaba76b4 \ + --hash=sha256:d299797d75cd747e7797b1b41817111406b8b10a4f88b6e8fe5b5e59598b43b0 \ + --hash=sha256:e04b622bb8a88f10e439084486f2f6349bf4d50605ac3e445869c7ea5cf0fa8c \ + --hash=sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f \ + --hash=sha256:e21f66748ab725ade40fa7af8ec8b5019c68ab00b929f6643e1b1af461eddb60 \ + --hash=sha256:eb60b026d8ad0c97917cb81d3662d0b39b8ff1335e3fabb24984c6acd0c900a2 \ + --hash=sha256:f021d334f2ca692523aaf7bbf7592ceff70c8594fad853416a81d66b35e3abf9 \ + --hash=sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33 # via jupyter-cache stack-data==0.6.3 \ --hash=sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9 \ @@ -1286,9 +1407,9 @@ termcolor==2.4.0 \ --hash=sha256:9297c0df9c99445c2412e832e882a7884038a25617c60cea2ad69488d4040d63 \ --hash=sha256:aab9e56047c8ac41ed798fa36d892a37aca6b3e9159f3e0c24bc64a9b3ac7b7a # via pytest-sugar -tomlkit==0.13.0 \ - --hash=sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72 \ - --hash=sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264 +tomlkit==0.13.2 \ + --hash=sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde \ + --hash=sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79 # via documenteer tornado==6.4.1 \ --hash=sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8 \ @@ -1360,9 +1481,9 @@ typed-ast==1.5.5 \ --hash=sha256:fd946abf3c31fb50eee07451a6aedbfff912fcd13cf357363f5b4e834cc5e71a \ --hash=sha256:fe58ef6a764de7b4b36edfc8592641f56e69b7163bba9f9c8089838ee596bfb2 # via diagrams -types-pyyaml==6.0.12.20240808 \ - --hash=sha256:b8f76ddbd7f65440a8bda5526a9607e4c7a322dc2f8e1a8c405644f9a6f4b9af \ - --hash=sha256:deda34c5c655265fc517b546c902aa6eed2ef8d3e921e4765fe606fe2afe8d35 +types-pyyaml==6.0.12.20240917 \ + --hash=sha256:392b267f1c0fe6022952462bf5d6523f31e37f6cea49b14cee7ad634b6301570 \ + --hash=sha256:d1405a86f9576682234ef83bcb4e6fff7c9305c8b1fbad5e0bcd4f7dbdc9c587 # via -r requirements/dev.in typing-extensions==4.12.2 \ --hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \ @@ -1378,9 +1499,9 @@ uc-micro-py==1.0.3 \ --hash=sha256:d321b92cff673ec58027c04015fcaa8bb1e005478643ff4a500882eaab88c48a \ --hash=sha256:db1dffff340817673d7b466ec86114a9dc0e9d4d9b5ba229d9d60e5c12600cd5 # via linkify-it-py -urllib3==2.2.2 \ - --hash=sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472 \ - --hash=sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168 +urllib3==2.2.3 \ + --hash=sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac \ + --hash=sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9 # via # -c requirements/main.txt # documenteer @@ -1390,7 +1511,7 @@ wcwidth==0.2.13 \ --hash=sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859 \ --hash=sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5 # via prompt-toolkit -zipp==3.20.0 \ - --hash=sha256:0145e43d89664cfe1a2e533adc75adafed82fe2da404b4bbb6b026c0157bdb31 \ - --hash=sha256:58da6168be89f0be59beb194da1250516fdaa062ccebd30127ac65d30045e10d +zipp==3.20.2 \ + --hash=sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350 \ + --hash=sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29 # via importlib-metadata diff --git a/requirements/main.in b/requirements/main.in deleted file mode 100644 index 8847952ab1..0000000000 --- a/requirements/main.in +++ /dev/null @@ -1,22 +0,0 @@ -# Editable runtime dependencies (equivalent to project.dependencies). -# Add direct runtime dependencies here, as well as implicit dependencies -# with constrained versions. These should be sufficient to run the phalanx -# command-line tool. -# -# After editing, update requirements/main.txt by running: -# make update-deps - -bcrypt -click -cryptography -GitPython -hvac -jinja2 -onepasswordconnectsdk -pydantic>2 -PyYAML -safir>5 - -# Uncomment this, change the branch, comment out safir above, and run make -# update-deps-no-hashes to test against an unreleased version of Safir. -# safir @ git+https://github.com/lsst-sqre/safir@main diff --git a/requirements/main.txt b/requirements/main.txt index e4b5e19f13..10ad6927b2 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -1,12 +1,12 @@ # This file was autogenerated by uv via the following command: -# uv pip compile --generate-hashes --output-file requirements/main.txt requirements/main.in +# uv pip compile --universal --generate-hashes --output-file requirements/main.txt pyproject.toml annotated-types==0.7.0 \ --hash=sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53 \ --hash=sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89 # via pydantic -anyio==4.4.0 \ - --hash=sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94 \ - --hash=sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7 +anyio==4.6.0 \ + --hash=sha256:137b4559cbb034c477165047febb6ff83f390fc3b20bf181c1fc0a728cb8beeb \ + --hash=sha256:c7d2e9d63e31599eeb636c8c5c03a7e108d73b345f064f1c19fdc87b79036a9a # via # httpcore # starlette @@ -38,82 +38,82 @@ bcrypt==4.2.0 \ --hash=sha256:cb2a8ec2bc07d3553ccebf0746bbf3d19426d1c6d1adbd4fa48925f66af7b9e8 \ --hash=sha256:cf69eaf5185fd58f268f805b505ce31f9b9fc2d64b376642164e9244540c1221 \ --hash=sha256:f4f4acf526fcd1c34e7ce851147deedd4e26e6402369304220250598b26448db - # via -r requirements/main.in -certifi==2024.7.4 \ - --hash=sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b \ - --hash=sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90 + # via phalanx (pyproject.toml) +certifi==2024.8.30 \ + --hash=sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8 \ + --hash=sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9 # via # httpcore # httpx # requests -cffi==1.17.0 \ - --hash=sha256:011aff3524d578a9412c8b3cfaa50f2c0bd78e03eb7af7aa5e0df59b158efb2f \ - --hash=sha256:0a048d4f6630113e54bb4b77e315e1ba32a5a31512c31a273807d0027a7e69ab \ - --hash=sha256:0bb15e7acf8ab35ca8b24b90af52c8b391690ef5c4aec3d31f38f0d37d2cc499 \ - --hash=sha256:0d46ee4764b88b91f16661a8befc6bfb24806d885e27436fdc292ed7e6f6d058 \ - --hash=sha256:0e60821d312f99d3e1569202518dddf10ae547e799d75aef3bca3a2d9e8ee693 \ - --hash=sha256:0fdacad9e0d9fc23e519efd5ea24a70348305e8d7d85ecbb1a5fa66dc834e7fb \ - --hash=sha256:14b9cbc8f7ac98a739558eb86fabc283d4d564dafed50216e7f7ee62d0d25377 \ - --hash=sha256:17c6d6d3260c7f2d94f657e6872591fe8733872a86ed1345bda872cfc8c74885 \ - --hash=sha256:1a2ddbac59dc3716bc79f27906c010406155031a1c801410f1bafff17ea304d2 \ - --hash=sha256:2404f3de742f47cb62d023f0ba7c5a916c9c653d5b368cc966382ae4e57da401 \ - --hash=sha256:24658baf6224d8f280e827f0a50c46ad819ec8ba380a42448e24459daf809cf4 \ - --hash=sha256:24aa705a5f5bd3a8bcfa4d123f03413de5d86e497435693b638cbffb7d5d8a1b \ - --hash=sha256:2770bb0d5e3cc0e31e7318db06efcbcdb7b31bcb1a70086d3177692a02256f59 \ - --hash=sha256:331ad15c39c9fe9186ceaf87203a9ecf5ae0ba2538c9e898e3a6967e8ad3db6f \ - --hash=sha256:3aa9d43b02a0c681f0bfbc12d476d47b2b2b6a3f9287f11ee42989a268a1833c \ - --hash=sha256:41f4915e09218744d8bae14759f983e466ab69b178de38066f7579892ff2a555 \ - --hash=sha256:4304d4416ff032ed50ad6bb87416d802e67139e31c0bde4628f36a47a3164bfa \ - --hash=sha256:435a22d00ec7d7ea533db494da8581b05977f9c37338c80bc86314bec2619424 \ - --hash=sha256:45f7cd36186db767d803b1473b3c659d57a23b5fa491ad83c6d40f2af58e4dbb \ - --hash=sha256:48b389b1fd5144603d61d752afd7167dfd205973a43151ae5045b35793232aa2 \ - --hash=sha256:4e67d26532bfd8b7f7c05d5a766d6f437b362c1bf203a3a5ce3593a645e870b8 \ - --hash=sha256:516a405f174fd3b88829eabfe4bb296ac602d6a0f68e0d64d5ac9456194a5b7e \ - --hash=sha256:5ba5c243f4004c750836f81606a9fcb7841f8874ad8f3bf204ff5e56332b72b9 \ - --hash=sha256:5bdc0f1f610d067c70aa3737ed06e2726fd9d6f7bfee4a351f4c40b6831f4e82 \ - --hash=sha256:6107e445faf057c118d5050560695e46d272e5301feffda3c41849641222a828 \ - --hash=sha256:6327b572f5770293fc062a7ec04160e89741e8552bf1c358d1a23eba68166759 \ - --hash=sha256:669b29a9eca6146465cc574659058ed949748f0809a2582d1f1a324eb91054dc \ - --hash=sha256:6ce01337d23884b21c03869d2f68c5523d43174d4fc405490eb0091057943118 \ - --hash=sha256:6d872186c1617d143969defeadac5a904e6e374183e07977eedef9c07c8953bf \ - --hash=sha256:6f76a90c345796c01d85e6332e81cab6d70de83b829cf1d9762d0a3da59c7932 \ - --hash=sha256:70d2aa9fb00cf52034feac4b913181a6e10356019b18ef89bc7c12a283bf5f5a \ - --hash=sha256:7cbc78dc018596315d4e7841c8c3a7ae31cc4d638c9b627f87d52e8abaaf2d29 \ - --hash=sha256:856bf0924d24e7f93b8aee12a3a1095c34085600aa805693fb7f5d1962393206 \ - --hash=sha256:8a98748ed1a1df4ee1d6f927e151ed6c1a09d5ec21684de879c7ea6aa96f58f2 \ - --hash=sha256:93a7350f6706b31f457c1457d3a3259ff9071a66f312ae64dc024f049055f72c \ - --hash=sha256:964823b2fc77b55355999ade496c54dde161c621cb1f6eac61dc30ed1b63cd4c \ - --hash=sha256:a003ac9edc22d99ae1286b0875c460351f4e101f8c9d9d2576e78d7e048f64e0 \ - --hash=sha256:a0ce71725cacc9ebf839630772b07eeec220cbb5f03be1399e0457a1464f8e1a \ - --hash=sha256:a47eef975d2b8b721775a0fa286f50eab535b9d56c70a6e62842134cf7841195 \ - --hash=sha256:a8b5b9712783415695663bd463990e2f00c6750562e6ad1d28e072a611c5f2a6 \ - --hash=sha256:a9015f5b8af1bb6837a3fcb0cdf3b874fe3385ff6274e8b7925d81ccaec3c5c9 \ - --hash=sha256:aec510255ce690d240f7cb23d7114f6b351c733a74c279a84def763660a2c3bc \ - --hash=sha256:b00e7bcd71caa0282cbe3c90966f738e2db91e64092a877c3ff7f19a1628fdcb \ - --hash=sha256:b50aaac7d05c2c26dfd50c3321199f019ba76bb650e346a6ef3616306eed67b0 \ - --hash=sha256:b7b6ea9e36d32582cda3465f54c4b454f62f23cb083ebc7a94e2ca6ef011c3a7 \ - --hash=sha256:bb9333f58fc3a2296fb1d54576138d4cf5d496a2cc118422bd77835e6ae0b9cb \ - --hash=sha256:c1c13185b90bbd3f8b5963cd8ce7ad4ff441924c31e23c975cb150e27c2bf67a \ - --hash=sha256:c3b8bd3133cd50f6b637bb4322822c94c5ce4bf0d724ed5ae70afce62187c492 \ - --hash=sha256:c5d97162c196ce54af6700949ddf9409e9833ef1003b4741c2b39ef46f1d9720 \ - --hash=sha256:c815270206f983309915a6844fe994b2fa47e5d05c4c4cef267c3b30e34dbe42 \ - --hash=sha256:cab2eba3830bf4f6d91e2d6718e0e1c14a2f5ad1af68a89d24ace0c6b17cced7 \ - --hash=sha256:d1df34588123fcc88c872f5acb6f74ae59e9d182a2707097f9e28275ec26a12d \ - --hash=sha256:d6bdcd415ba87846fd317bee0774e412e8792832e7805938987e4ede1d13046d \ - --hash=sha256:db9a30ec064129d605d0f1aedc93e00894b9334ec74ba9c6bdd08147434b33eb \ - --hash=sha256:dbc183e7bef690c9abe5ea67b7b60fdbca81aa8da43468287dae7b5c046107d4 \ - --hash=sha256:dca802c8db0720ce1c49cce1149ff7b06e91ba15fa84b1d59144fef1a1bc7ac2 \ - --hash=sha256:dec6b307ce928e8e112a6bb9921a1cb00a0e14979bf28b98e084a4b8a742bd9b \ - --hash=sha256:df8bb0010fdd0a743b7542589223a2816bdde4d94bb5ad67884348fa2c1c67e8 \ - --hash=sha256:e4094c7b464cf0a858e75cd14b03509e84789abf7b79f8537e6a72152109c76e \ - --hash=sha256:e4760a68cab57bfaa628938e9c2971137e05ce48e762a9cb53b76c9b569f1204 \ - --hash=sha256:eb09b82377233b902d4c3fbeeb7ad731cdab579c6c6fda1f763cd779139e47c3 \ - --hash=sha256:eb862356ee9391dc5a0b3cbc00f416b48c1b9a52d252d898e5b7696a5f9fe150 \ - --hash=sha256:ef9528915df81b8f4c7612b19b8628214c65c9b7f74db2e34a646a0a2a0da2d4 \ - --hash=sha256:f3157624b7558b914cb039fd1af735e5e8049a87c817cc215109ad1c8779df76 \ - --hash=sha256:f3e0992f23bbb0be00a921eae5363329253c3b86287db27092461c887b791e5e \ - --hash=sha256:f9338cc05451f1942d0d8203ec2c346c830f8e86469903d5126c1f0a13a2bcbb \ - --hash=sha256:ffef8fd58a36fb5f1196919638f73dd3ae0db1a878982b27a9a5a176ede4ba91 +cffi==1.17.1 ; platform_python_implementation != 'PyPy' \ + --hash=sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8 \ + --hash=sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2 \ + --hash=sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1 \ + --hash=sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15 \ + --hash=sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36 \ + --hash=sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824 \ + --hash=sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8 \ + --hash=sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36 \ + --hash=sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17 \ + --hash=sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf \ + --hash=sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc \ + --hash=sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3 \ + --hash=sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed \ + --hash=sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702 \ + --hash=sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1 \ + --hash=sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8 \ + --hash=sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903 \ + --hash=sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6 \ + --hash=sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d \ + --hash=sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b \ + --hash=sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e \ + --hash=sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be \ + --hash=sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c \ + --hash=sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683 \ + --hash=sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9 \ + --hash=sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c \ + --hash=sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8 \ + --hash=sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1 \ + --hash=sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4 \ + --hash=sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655 \ + --hash=sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67 \ + --hash=sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595 \ + --hash=sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0 \ + --hash=sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65 \ + --hash=sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41 \ + --hash=sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6 \ + --hash=sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401 \ + --hash=sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6 \ + --hash=sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3 \ + --hash=sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16 \ + --hash=sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93 \ + --hash=sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e \ + --hash=sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4 \ + --hash=sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964 \ + --hash=sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c \ + --hash=sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576 \ + --hash=sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0 \ + --hash=sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3 \ + --hash=sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662 \ + --hash=sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3 \ + --hash=sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff \ + --hash=sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5 \ + --hash=sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd \ + --hash=sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f \ + --hash=sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5 \ + --hash=sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14 \ + --hash=sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d \ + --hash=sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9 \ + --hash=sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7 \ + --hash=sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382 \ + --hash=sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a \ + --hash=sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e \ + --hash=sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a \ + --hash=sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4 \ + --hash=sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99 \ + --hash=sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87 \ + --hash=sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b # via cryptography charset-normalizer==3.3.2 \ --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \ @@ -211,43 +211,47 @@ click==8.1.7 \ --hash=sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28 \ --hash=sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de # via - # -r requirements/main.in + # phalanx (pyproject.toml) # safir -cryptography==43.0.0 \ - --hash=sha256:0663585d02f76929792470451a5ba64424acc3cd5227b03921dab0e2f27b1709 \ - --hash=sha256:08a24a7070b2b6804c1940ff0f910ff728932a9d0e80e7814234269f9d46d069 \ - --hash=sha256:232ce02943a579095a339ac4b390fbbe97f5b5d5d107f8a08260ea2768be8cc2 \ - --hash=sha256:2905ccf93a8a2a416f3ec01b1a7911c3fe4073ef35640e7ee5296754e30b762b \ - --hash=sha256:299d3da8e00b7e2b54bb02ef58d73cd5f55fb31f33ebbf33bd00d9aa6807df7e \ - --hash=sha256:2c6d112bf61c5ef44042c253e4859b3cbbb50df2f78fa8fae6747a7814484a70 \ - --hash=sha256:31e44a986ceccec3d0498e16f3d27b2ee5fdf69ce2ab89b52eaad1d2f33d8778 \ - --hash=sha256:3d9a1eca329405219b605fac09ecfc09ac09e595d6def650a437523fcd08dd22 \ - --hash=sha256:3dcdedae5c7710b9f97ac6bba7e1052b95c7083c9d0e9df96e02a1932e777895 \ - --hash=sha256:47ca71115e545954e6c1d207dd13461ab81f4eccfcb1345eac874828b5e3eaaf \ - --hash=sha256:4a997df8c1c2aae1e1e5ac49c2e4f610ad037fc5a3aadc7b64e39dea42249431 \ - --hash=sha256:51956cf8730665e2bdf8ddb8da0056f699c1a5715648c1b0144670c1ba00b48f \ - --hash=sha256:5bcb8a5620008a8034d39bce21dc3e23735dfdb6a33a06974739bfa04f853947 \ - --hash=sha256:64c3f16e2a4fc51c0d06af28441881f98c5d91009b8caaff40cf3548089e9c74 \ - --hash=sha256:6e2b11c55d260d03a8cf29ac9b5e0608d35f08077d8c087be96287f43af3ccdc \ - --hash=sha256:7b3f5fe74a5ca32d4d0f302ffe6680fcc5c28f8ef0dc0ae8f40c0f3a1b4fca66 \ - --hash=sha256:844b6d608374e7d08f4f6e6f9f7b951f9256db41421917dfb2d003dde4cd6b66 \ - --hash=sha256:9a8d6802e0825767476f62aafed40532bd435e8a5f7d23bd8b4f5fd04cc80ecf \ - --hash=sha256:aae4d918f6b180a8ab8bf6511a419473d107df4dbb4225c7b48c5c9602c38c7f \ - --hash=sha256:ac1955ce000cb29ab40def14fd1bbfa7af2017cca696ee696925615cafd0dce5 \ - --hash=sha256:b88075ada2d51aa9f18283532c9f60e72170041bba88d7f37e49cbb10275299e \ - --hash=sha256:cb013933d4c127349b3948aa8aaf2f12c0353ad0eccd715ca789c8a0f671646f \ - --hash=sha256:cc70b4b581f28d0a254d006f26949245e3657d40d8857066c2ae22a61222ef55 \ - --hash=sha256:e9c5266c432a1e23738d178e51c2c7a5e2ddf790f248be939448c0ba2021f9d1 \ - --hash=sha256:ea9e57f8ea880eeea38ab5abf9fbe39f923544d7884228ec67d666abd60f5a47 \ - --hash=sha256:ee0c405832ade84d4de74b9029bedb7b31200600fa524d218fc29bfa371e97f5 \ - --hash=sha256:fdcb265de28585de5b859ae13e3846a8e805268a823a12a4da2597f1f5afc9f0 +colorama==0.4.6 ; platform_system == 'Windows' \ + --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \ + --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 + # via click +cryptography==43.0.1 \ + --hash=sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494 \ + --hash=sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806 \ + --hash=sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d \ + --hash=sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062 \ + --hash=sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2 \ + --hash=sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4 \ + --hash=sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1 \ + --hash=sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85 \ + --hash=sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84 \ + --hash=sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042 \ + --hash=sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d \ + --hash=sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962 \ + --hash=sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2 \ + --hash=sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa \ + --hash=sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d \ + --hash=sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365 \ + --hash=sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96 \ + --hash=sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47 \ + --hash=sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d \ + --hash=sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d \ + --hash=sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c \ + --hash=sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb \ + --hash=sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277 \ + --hash=sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172 \ + --hash=sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034 \ + --hash=sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a \ + --hash=sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289 # via - # -r requirements/main.in + # phalanx (pyproject.toml) # pyjwt # safir -fastapi==0.112.0 \ - --hash=sha256:3487ded9778006a45834b8c816ec4a48d522e2631ca9e75ec5a774f1b052f821 \ - --hash=sha256:d262bc56b7d101d1f4e8fc0ad2ac75bb9935fec504d2b7117686cec50710cf05 +fastapi==0.115.0 \ + --hash=sha256:17ea427674467486e997206a5ab25760f6b09e069f099b96f5b55a32fb6f1631 \ + --hash=sha256:f93b4ca3529a8ebc6fc3fcf710e5efa8de3df9b41570958abf1d97d843138004 # via safir gidgethub==5.3.0 \ --hash=sha256:4dd92f2252d12756b13f9dd15cde322bfb0d625b6fb5d680da1567ec74b462c0 \ @@ -260,7 +264,7 @@ gitdb==4.0.11 \ gitpython==3.1.43 \ --hash=sha256:35f314a9f878467f5453cc1fee295c3e18e52f1b99f10f6cf5b1682e968a9e7c \ --hash=sha256:eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff - # via -r requirements/main.in + # via phalanx (pyproject.toml) h11==0.14.0 \ --hash=sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d \ --hash=sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761 @@ -278,10 +282,10 @@ httpx==0.23.3 \ hvac==2.3.0 \ --hash=sha256:1b85e3320e8642dd82f234db63253cda169a817589e823713dc5fca83119b1e2 \ --hash=sha256:a3afc5710760b6ee9b3571769df87a0333da45da05a5f9f963e1d3925a84be7d - # via -r requirements/main.in -idna==3.7 \ - --hash=sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc \ - --hash=sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0 + # via phalanx (pyproject.toml) +idna==3.10 \ + --hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \ + --hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 # via # anyio # requests @@ -289,7 +293,7 @@ idna==3.7 \ jinja2==3.1.4 \ --hash=sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369 \ --hash=sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d - # via -r requirements/main.in + # via phalanx (pyproject.toml) markupsafe==2.1.5 \ --hash=sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf \ --hash=sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff \ @@ -355,108 +359,108 @@ markupsafe==2.1.5 \ onepasswordconnectsdk==1.5.1 \ --hash=sha256:8924c614ffed98f29faada03dba940dc0bc47851b1f5f4ef7e312e43c10ec25b \ --hash=sha256:f8e033dbb5dcc5ff08fbdbbfe329655adce6ec44cfe54652474d7e31175de48e - # via -r requirements/main.in -pycparser==2.22 \ + # via phalanx (pyproject.toml) +pycparser==2.22 ; platform_python_implementation != 'PyPy' \ --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ --hash=sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc # via cffi -pydantic==2.8.2 \ - --hash=sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a \ - --hash=sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8 +pydantic==2.9.2 \ + --hash=sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f \ + --hash=sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12 # via - # -r requirements/main.in + # phalanx (pyproject.toml) # fastapi # safir -pydantic-core==2.20.1 \ - --hash=sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d \ - --hash=sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f \ - --hash=sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686 \ - --hash=sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482 \ - --hash=sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006 \ - --hash=sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83 \ - --hash=sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6 \ - --hash=sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88 \ - --hash=sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86 \ - --hash=sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a \ - --hash=sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6 \ - --hash=sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a \ - --hash=sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6 \ - --hash=sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6 \ - --hash=sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43 \ - --hash=sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c \ - --hash=sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4 \ - --hash=sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e \ - --hash=sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203 \ - --hash=sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd \ - --hash=sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1 \ - --hash=sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24 \ - --hash=sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc \ - --hash=sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc \ - --hash=sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3 \ - --hash=sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598 \ - --hash=sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98 \ - --hash=sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331 \ - --hash=sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2 \ - --hash=sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a \ - --hash=sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6 \ - --hash=sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688 \ - --hash=sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91 \ - --hash=sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa \ - --hash=sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b \ - --hash=sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0 \ - --hash=sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840 \ - --hash=sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c \ - --hash=sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd \ - --hash=sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3 \ - --hash=sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231 \ - --hash=sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1 \ - --hash=sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953 \ - --hash=sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250 \ - --hash=sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a \ - --hash=sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2 \ - --hash=sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20 \ - --hash=sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434 \ - --hash=sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab \ - --hash=sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703 \ - --hash=sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a \ - --hash=sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2 \ - --hash=sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac \ - --hash=sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611 \ - --hash=sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121 \ - --hash=sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e \ - --hash=sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b \ - --hash=sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09 \ - --hash=sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906 \ - --hash=sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9 \ - --hash=sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7 \ - --hash=sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b \ - --hash=sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987 \ - --hash=sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c \ - --hash=sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b \ - --hash=sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e \ - --hash=sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237 \ - --hash=sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1 \ - --hash=sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19 \ - --hash=sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b \ - --hash=sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad \ - --hash=sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0 \ - --hash=sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94 \ - --hash=sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312 \ - --hash=sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f \ - --hash=sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669 \ - --hash=sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1 \ - --hash=sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe \ - --hash=sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99 \ - --hash=sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a \ - --hash=sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a \ - --hash=sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52 \ - --hash=sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c \ - --hash=sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad \ - --hash=sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1 \ - --hash=sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a \ - --hash=sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f \ - --hash=sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a \ - --hash=sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27 +pydantic-core==2.23.4 \ + --hash=sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36 \ + --hash=sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05 \ + --hash=sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071 \ + --hash=sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327 \ + --hash=sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c \ + --hash=sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36 \ + --hash=sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29 \ + --hash=sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744 \ + --hash=sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d \ + --hash=sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec \ + --hash=sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e \ + --hash=sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e \ + --hash=sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577 \ + --hash=sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232 \ + --hash=sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863 \ + --hash=sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6 \ + --hash=sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368 \ + --hash=sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480 \ + --hash=sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2 \ + --hash=sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2 \ + --hash=sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6 \ + --hash=sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769 \ + --hash=sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d \ + --hash=sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2 \ + --hash=sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84 \ + --hash=sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166 \ + --hash=sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271 \ + --hash=sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5 \ + --hash=sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb \ + --hash=sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13 \ + --hash=sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323 \ + --hash=sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556 \ + --hash=sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665 \ + --hash=sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef \ + --hash=sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb \ + --hash=sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119 \ + --hash=sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126 \ + --hash=sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510 \ + --hash=sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b \ + --hash=sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87 \ + --hash=sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f \ + --hash=sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc \ + --hash=sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8 \ + --hash=sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21 \ + --hash=sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f \ + --hash=sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6 \ + --hash=sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658 \ + --hash=sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b \ + --hash=sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3 \ + --hash=sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb \ + --hash=sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59 \ + --hash=sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24 \ + --hash=sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9 \ + --hash=sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3 \ + --hash=sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd \ + --hash=sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753 \ + --hash=sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55 \ + --hash=sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad \ + --hash=sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a \ + --hash=sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605 \ + --hash=sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e \ + --hash=sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b \ + --hash=sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433 \ + --hash=sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8 \ + --hash=sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07 \ + --hash=sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728 \ + --hash=sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0 \ + --hash=sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327 \ + --hash=sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555 \ + --hash=sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64 \ + --hash=sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6 \ + --hash=sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea \ + --hash=sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b \ + --hash=sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df \ + --hash=sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e \ + --hash=sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd \ + --hash=sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068 \ + --hash=sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3 \ + --hash=sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040 \ + --hash=sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12 \ + --hash=sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916 \ + --hash=sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f \ + --hash=sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f \ + --hash=sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801 \ + --hash=sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231 \ + --hash=sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5 \ + --hash=sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8 \ + --hash=sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee \ + --hash=sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607 # via # pydantic # safir @@ -522,7 +526,7 @@ pyyaml==6.0.2 \ --hash=sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba \ --hash=sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12 \ --hash=sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4 - # via -r requirements/main.in + # via phalanx (pyproject.toml) requests==2.32.3 \ --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ --hash=sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6 @@ -531,10 +535,14 @@ rfc3986==1.5.0 \ --hash=sha256:270aaf10d87d0d4e095063c65bf3ddbc6ee3d0b226328ce21e036f946e421835 \ --hash=sha256:a86d6e1f5b1dc238b218b012df0aa79409667bb209e58da56d0b94704e712a97 # via httpx -safir==6.2.0 \ - --hash=sha256:335219abba8ed663395bcf6cf86a60ec8de8412ea212dc0dbe8425e9faa7bc97 \ - --hash=sha256:61cf6fd3839c0945bcc7c01469dc8fcd19351eba33b6022c596684d87763e50e - # via -r requirements/main.in +safir==6.4.0 \ + --hash=sha256:ba7af071eab0d198e6e15a2117028566f3f4237e02e2278e8bfc2633a7c68228 \ + --hash=sha256:f38c3f1d7d76d304984b572288826510e5c7a0e1f965b2eabdd7f3bace07c48a + # via phalanx (pyproject.toml) +safir-logging==6.4.0 \ + --hash=sha256:4031a430d738b8fe5bfd29125dce6cbf4e4949879307ba4146648afa3d24cd0a \ + --hash=sha256:e2dbf0b5d9dabecd70c27bff9bf01629bf0724b05b0f0087a1fe4f45c702215f + # via safir six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 @@ -550,16 +558,18 @@ sniffio==1.3.1 \ # anyio # httpcore # httpx -starlette==0.37.2 \ - --hash=sha256:6fe59f29268538e5d0d182f2791a479a0c64638e6935d1c6989e63fb2699c6ee \ - --hash=sha256:9af890290133b79fc3db55474ade20f6220a364a0402e0b556e7cd5e1e093823 +starlette==0.38.6 \ + --hash=sha256:4517a1409e2e73ee4951214ba012052b9e16f60e90d73cfb06192c19203bbb05 \ + --hash=sha256:863a1588f5574e70a821dadefb41e4881ea451a47a3cd1b4df359d4ffefe5ead # via # fastapi # safir structlog==24.4.0 \ --hash=sha256:597f61e80a91cc0749a9fd2a098ed76715a1c8a01f73e336b746504d1aad7610 \ --hash=sha256:b27bfecede327a6d2da5fbc96bd859f114ecc398a6389d664f62085ee7ae6fc4 - # via safir + # via + # safir + # safir-logging typing-extensions==4.12.2 \ --hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \ --hash=sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8 @@ -571,7 +581,7 @@ uritemplate==4.1.1 \ --hash=sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0 \ --hash=sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e # via gidgethub -urllib3==2.2.2 \ - --hash=sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472 \ - --hash=sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168 +urllib3==2.2.3 \ + --hash=sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac \ + --hash=sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9 # via requests diff --git a/requirements/tox.txt b/requirements/tox.txt index 2c83eef26d..f50f47aa68 100644 --- a/requirements/tox.txt +++ b/requirements/tox.txt @@ -1,8 +1,8 @@ # This file was autogenerated by uv via the following command: -# uv pip compile --generate-hashes --output-file requirements/tox.txt requirements/tox.in -cachetools==5.4.0 \ - --hash=sha256:3ae3b49a3d5e28a77a0be2b37dbcb89005058959cb2323858c2657c4a8cab474 \ - --hash=sha256:b8adc2e7c07f105ced7bc56dbb6dfbe7c4a00acce20e2227b3f355be89bc6827 +# uv pip compile --universal --generate-hashes --output-file requirements/tox.txt requirements/tox.in +cachetools==5.5.0 \ + --hash=sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292 \ + --hash=sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a # via tox chardet==5.2.0 \ --hash=sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7 \ @@ -11,14 +11,17 @@ chardet==5.2.0 \ colorama==0.4.6 \ --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \ --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 - # via tox + # via + # -c requirements/dev.txt + # -c requirements/main.txt + # tox distlib==0.3.8 \ --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \ --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64 # via virtualenv -filelock==3.15.4 \ - --hash=sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb \ - --hash=sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7 +filelock==3.16.1 \ + --hash=sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0 \ + --hash=sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435 # via # tox # virtualenv @@ -30,9 +33,9 @@ packaging==24.1 \ # pyproject-api # tox # tox-uv -platformdirs==4.2.2 \ - --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \ - --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3 +platformdirs==4.3.6 \ + --hash=sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907 \ + --hash=sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb # via # -c requirements/dev.txt # tox @@ -43,41 +46,41 @@ pluggy==1.5.0 \ # via # -c requirements/dev.txt # tox -pyproject-api==1.7.1 \ - --hash=sha256:2dc1654062c2b27733d8fd4cdda672b22fe8741ef1dde8e3a998a9547b071eeb \ - --hash=sha256:7ebc6cd10710f89f4cf2a2731710a98abce37ebff19427116ff2174c9236a827 +pyproject-api==1.8.0 \ + --hash=sha256:3d7d347a047afe796fd5d1885b1e391ba29be7169bd2f102fcd378f04273d228 \ + --hash=sha256:77b8049f2feb5d33eefcc21b57f1e279636277a8ac8ad6b5871037b243778496 # via tox -tox==4.18.0 \ - --hash=sha256:0a457400cf70615dc0627eb70d293e80cd95d8ce174bb40ac011011f0c03a249 \ - --hash=sha256:5dfa1cab9f146becd6e351333a82f9e0ade374451630ba65ee54584624c27b58 +tox==4.20.0 \ + --hash=sha256:21a8005e3d3fe5658a8e36b8ca3ed13a4230429063c5cc2a2fdac6ee5aa0de34 \ + --hash=sha256:5b78a49b6eaaeab3ae4186415e7c97d524f762ae967c63562687c3e5f0ec23d5 # via # -r requirements/tox.in # tox-uv -tox-uv==1.11.2 \ - --hash=sha256:7f8f1737b3277e1cddcb5b89fcc5931d04923562c940ae60f29e140908566df2 \ - --hash=sha256:a7aded5c3fb69f055b523357988c1055bb573e91bfd7ecfb9b5233ebcab5d10b +tox-uv==1.13.0 \ + --hash=sha256:1037e4abad15a3b708b5970ed7a17a0765d7249b641a92b155bc3343b8b0145b \ + --hash=sha256:fb087b8b4ff779c72b48fc72ea1995387bb1c0dfb37910c20e46cef8b5f98c15 # via -r requirements/tox.in -uv==0.2.36 \ - --hash=sha256:083e56a18fc33395aeed4f56a47003e08f2ad9d5039af63ad5b107a241d0e7a3 \ - --hash=sha256:139889680c2475afbab61c725df951c4dfa030c42c4eaa8f27d05286c96e8aab \ - --hash=sha256:3f18322decfb0da577e40675620f6e6b9ffe1d8ee1de88a448bfe67fe7884626 \ - --hash=sha256:463a45a34277b9334e500fce463f59408a6bab0c1b5cb3023f25185a4805a562 \ - --hash=sha256:5a3800d2484b989041139ef96b395cec0e4e0a13132584b0147c739063a2494f \ - --hash=sha256:5c8d624975f8355e00ad5f802ed27fcfc7b86d0bd50b57efe24bd665fd3f9a9b \ - --hash=sha256:8753851cc10b0a67e5c5dd29a6f35a072341290cf27a7bb3193ddd92bda19f51 \ - --hash=sha256:8820dd5b77ffcda07dde09712a43d969d39b0aace112d8074c540f19a4911cc2 \ - --hash=sha256:89d3fb3d7a66fa4a4f7c938be0277457fe71179ec4e72758cfe16faec1daa362 \ - --hash=sha256:8e5e2e8e218eb672a3bb57af0ab2d2d3de79119b5dc6b6edb03d349739e474ff \ - --hash=sha256:a08d485cd8eae0352b4765005a4499ad5db073c3534866d68617bbb831ee219a \ - --hash=sha256:a4fddaf0a6a995591042a57ac48557b9b2c1e2c7f09e0f7880f40c34e61f53f8 \ - --hash=sha256:a7961f4d88100fc48129c918545cbb17b9a0d8e3d163c65985e1d1682e056321 \ - --hash=sha256:a837b799e3af1535473b8ab14e414e50f595d547d295879db0d6b0943b7476df \ - --hash=sha256:d093fd10aaf29e85128beaa514f8d37d7374cf1d1a95da966e15788a6fe7d55d \ - --hash=sha256:e36cd4e9c1187d155139b98bcd2cfbfb275f9f601c550fcc38a283983c74f93d \ - --hash=sha256:e79a4cdb3b89b011fafcaa853ebbb9254115f3f7cadbd9141492c48ceeac1b2d \ - --hash=sha256:f1d711629dd8610933687ceea4ad82156ef7b2102c4e9da72afe6c01981f8a1a +uv==0.4.15 \ + --hash=sha256:04858bfd551fabe1635127d9a0afe5c62e1e7d56cf309a9674840c90bfc1f21e \ + --hash=sha256:0e9b78f1a800a4cfdfbdc9ff4e5d4cce34af770f8a1f2b9416b161f294eb3703 \ + --hash=sha256:1401e73f0e8df62b4cfbf394e65a75f18b73bf8a94a6c5653a55bd6fdb8e1bc3 \ + --hash=sha256:1bb79cb06be9bb25a1bf8641bf34593f64a96b3ba66ebd8712954f647d9faa24 \ + --hash=sha256:21a3cedb2276d635543a10a11c61f75c6e387110e23e90cdb6c6dd2e1f3c9453 \ + --hash=sha256:27884429b7fed371fe1fcbe829659c4a259463d0ecacb7891d800e4754b5f24c \ + --hash=sha256:4e40deb2cf2cb403dbaf65209d49c45462ebbb1bff290d4c18b902b5b385cdc9 \ + --hash=sha256:6eef6881abf9b858020ffd23f4e5d77423329da2d4a1bc0af6613c2f698c369a \ + --hash=sha256:7fcf7f3812dd173d39273e99fb2abb0814be6133e7a721baa424cbcfd25b483b \ + --hash=sha256:8d45295757f66d1913e5917c06f1974745adad842403d419362491939be889a6 \ + --hash=sha256:8e36b8e07595fc6216d01e729c81a0b4ff029a93cc2ef987a73d3b650d6d559c \ + --hash=sha256:9822fa4db0d8d50abf5eebe081c01666a98120455090d0b71463d01d5d4153c1 \ + --hash=sha256:9e28141883c0aa8525ad5418e519d8791b7dd75f35020d3b1457db89346c5dc8 \ + --hash=sha256:a5920ff4d114025c51d3f925130ca3b0fad277631846b1109347c24948b29159 \ + --hash=sha256:be46b37b569e3c8ffb7d78022bcc0eadeb987109f709c1cec01b00c261ed9595 \ + --hash=sha256:cf7d554656bb8c5b7710300e04d86ab5137ebdd31fe309d66860a9d474b385f8 \ + --hash=sha256:d16ae6b97eb77f478dfe51d6eb3627048d3f47bd04282d3006e6a212e541dba0 \ + --hash=sha256:e32137ba8202b1291e879e8145113bfb543fcc992b5f043852a96d803788b83c # via tox-uv -virtualenv==20.26.3 \ - --hash=sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a \ - --hash=sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589 +virtualenv==20.26.5 \ + --hash=sha256:4f3ac17b81fba3ce3bd6f4ead2749a72da5929c01774948e243db9ba41df4ff6 \ + --hash=sha256:ce489cac131aa58f4b25e321d6d186171f78e6cb13fafbf32a840cee67733ff4 # via tox diff --git a/src/phalanx/models/environments.py b/src/phalanx/models/environments.py index ab1df52d87..bc15ff58c3 100644 --- a/src/phalanx/models/environments.py +++ b/src/phalanx/models/environments.py @@ -23,12 +23,14 @@ from .secrets import Secret __all__ = [ + "ArgoCDDetails", "ControlSystemConfig", "Environment", "EnvironmentBaseConfig", "EnvironmentConfig", "EnvironmentDetails", "GCPMetadata", + "GafaelfawrDetails", "GafaelfawrGitHubGroup", "GafaelfawrGitHubTeam", "GafaelfawrScope", @@ -467,6 +469,9 @@ class GafaelfawrDetails(BaseModel): provider_hostname: str | None = None """Hostname of upstream identity provider, if meaningful.""" + comanage_hostname: str | None = None + """Hostname of COmanage instance, if COmanage is in use.""" + scopes: list[GafaelfawrScope] = [] """Gafaelfawr scopes and their associated groups.""" diff --git a/src/phalanx/storage/config.py b/src/phalanx/storage/config.py index 99b4a62966..b66e01e49d 100644 --- a/src/phalanx/storage/config.py +++ b/src/phalanx/storage/config.py @@ -786,9 +786,14 @@ def _build_gafaelfawr_details( # Determine the upstream identity provider. provider_hostname = None + comanage_hostname = None if gafaelfawr: if gafaelfawr.values["config"]["cilogon"]["clientId"]: provider = IdentityProvider.CILOGON + cilogon_config = gafaelfawr.values["config"]["cilogon"] + if cilogon_config["enrollmentUrl"]: + url = cilogon_config["enrollmentUrl"] + comanage_hostname = urlparse(url).hostname elif gafaelfawr.values["config"]["github"]["clientId"]: provider = IdentityProvider.GITHUB elif gafaelfawr.values["config"]["oidc"]["clientId"]: @@ -828,6 +833,7 @@ def _build_gafaelfawr_details( return GafaelfawrDetails( provider=provider, provider_hostname=provider_hostname, + comanage_hostname=comanage_hostname, scopes=sorted(gafaelfawr_scopes, key=lambda s: s.scope), ) diff --git a/tox.ini b/tox.ini index 87733c00bd..0a26029174 100644 --- a/tox.ini +++ b/tox.ini @@ -45,13 +45,6 @@ deps = pre-commit commands = pre-commit run --all-files -[testenv:neophile-update] -description = Run neophile to update dependencies -skip_install = true -deps = - neophile -commands = neophile update {posargs} - [testenv:phalanx-lint-change] description = Lint application chart changes determined by Git commands =